diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml
index db634b55a..91bfb31c1 100644
--- a/.github/workflows/e2e.yaml
+++ b/.github/workflows/e2e.yaml
@@ -63,6 +63,80 @@ jobs:
kubectl -n helm-system wait helmreleases/podinfo-git --for=condition=ready --timeout=4m
kubectl -n helm-system wait helmreleases/podinfo-oci --for=condition=ready --timeout=4m
kubectl -n helm-system delete -f config/testdata/podinfo
+ - name: Run server-side apply test
+ run: |
+ test_name=server-side-apply
+ kubectl -n helm-system apply -f config/testdata/$test_name/install.yaml
+ kubectl -n helm-system wait helmreleases/$test_name --for=condition=ready --timeout=4m
+
+ # Verify the release is deployed with SSA.
+ APPLY_METHOD=$(kubectl -n helm-system get secret sh.helm.release.v1.$test_name.v1 -o jsonpath='{.data.release}' | base64 -d | base64 -d | gunzip | jq -r '.apply_method')
+ if [ "$APPLY_METHOD" != "ssa" ]; then
+ echo -e "Unexpected apply method: $APPLY_METHOD (expected: ssa)"
+ exit 1
+ fi
+
+ # Upgrade with SSA.
+ kubectl -n helm-system apply -f config/testdata/$test_name/upgrade.yaml
+ kubectl -n helm-system wait helmreleases/$test_name --for=condition=ready --timeout=4m
+
+ # Validate release was upgraded.
+ REVISION_COUNT=$(helm -n helm-system history -o json $test_name | jq 'length')
+ if [ "$REVISION_COUNT" != 2 ]; then
+ echo -e "Unexpected revision count: $REVISION_COUNT"
+ exit 1
+ fi
+
+ kubectl -n helm-system delete -f config/testdata/$test_name/install.yaml
+ - name: Run server-side apply rollback test
+ run: |
+ test_name=server-side-apply-rollback
+ kubectl -n helm-system apply -f config/testdata/server-side-apply/rollback-install.yaml
+ kubectl -n helm-system wait helmreleases/$test_name --for=condition=ready --timeout=4m
+
+ # Verify the release is deployed with SSA.
+ APPLY_METHOD=$(kubectl -n helm-system get secret sh.helm.release.v1.$test_name.v1 -o jsonpath='{.data.release}' | base64 -d | base64 -d | gunzip | jq -r '.apply_method')
+ if [ "$APPLY_METHOD" != "ssa" ]; then
+ echo -e "Unexpected apply method: $APPLY_METHOD (expected: ssa)"
+ exit 1
+ fi
+
+ # Upgrade with failing config to trigger rollback.
+ kubectl -n helm-system apply -f config/testdata/server-side-apply/rollback-upgrade.yaml
+ echo -n ">>> Waiting for rollback"
+ count=0
+ until [ 'true' == "$( kubectl -n helm-system get helmrelease/$test_name -o json | jq '.status.conditions | map( { (.type): .status } ) | add | .Released=="False" and .Ready=="False" and .Remediated=="True"' )" ]; do
+ echo -n '.'
+ sleep 5
+ count=$((count + 1))
+ if [[ ${count} -eq 24 ]]; then
+ echo ' No more retries left!'
+ exit 1
+ fi
+ done
+ echo ' done'
+
+ # Validate rollback happened with SSA (revision 3 = rollback to 1).
+ HISTORY=$(helm -n helm-system history -o json $test_name)
+ REVISION_COUNT=$(echo "$HISTORY" | jq 'length')
+ if [ "$REVISION_COUNT" != 3 ]; then
+ echo -e "Unexpected revision count: $REVISION_COUNT"
+ exit 1
+ fi
+ LAST_REVISION_DESCRIPTION=$(echo "$HISTORY" | jq -r 'last | .description')
+ if [ "$LAST_REVISION_DESCRIPTION" != "Rollback to 1" ]; then
+ echo -e "Unexpected last revision description: $LAST_REVISION_DESCRIPTION"
+ exit 1
+ fi
+
+ # Verify the rollback release used SSA.
+ APPLY_METHOD=$(kubectl -n helm-system get secret sh.helm.release.v1.$test_name.v3 -o jsonpath='{.data.release}' | base64 -d | base64 -d | gunzip | jq -r '.apply_method')
+ if [ "$APPLY_METHOD" != "ssa" ]; then
+ echo -e "Unexpected apply method after rollback: $APPLY_METHOD (expected: ssa)"
+ exit 1
+ fi
+
+ kubectl -n helm-system delete -f config/testdata/server-side-apply/rollback-install.yaml
- name: Run dependency tests
run: |
kubectl -n helm-system apply -f config/testdata/dependencies
diff --git a/Dockerfile b/Dockerfile
index 71f176e28..9028805b3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -19,6 +19,7 @@ COPY api/ api/
# copy modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
+COPY helm/ helm/
# cache modules
RUN go mod download
diff --git a/api/v2/helmrelease_types.go b/api/v2/helmrelease_types.go
index dfc3c3df2..01e43e97e 100644
--- a/api/v2/helmrelease_types.go
+++ b/api/v2/helmrelease_types.go
@@ -541,6 +541,11 @@ type Install struct {
// On uninstall, the namespace will not be garbage collected.
// +optional
CreateNamespace bool `json:"createNamespace,omitempty"`
+
+ // ServerSideApply enables server-side apply for resources during install.
+ // Defaults to true (or false when UseHelm3Defaults feature gate is enabled).
+ // +optional
+ ServerSideApply *bool `json:"serverSideApply,omitempty"`
}
// GetTimeout returns the configured timeout for the Helm install action,
@@ -569,6 +574,11 @@ func (in Install) GetRetry() Retry {
return in.Strategy
}
+// GetDisableWait returns whether waiting is disabled for the Helm install action.
+func (in Install) GetDisableWait() bool {
+ return in.DisableWait
+}
+
// InstallStrategy holds the configuration for Helm install strategy.
// +kubebuilder:validation:XValidation:rule="!has(self.retryInterval) || self.name != 'RemediateOnFailure'", message=".retryInterval cannot be set when .name is 'RemediateOnFailure'"
type InstallStrategy struct {
@@ -674,6 +684,21 @@ const (
CreateReplace CRDsPolicy = "CreateReplace"
)
+// ServerSideApplyMode defines the server-side apply mode for Helm upgrade and
+// rollback actions.
+type ServerSideApplyMode string
+
+var (
+ // ServerSideApplyEnabled enables server-side apply for resources.
+ ServerSideApplyEnabled ServerSideApplyMode = "enabled"
+
+ // ServerSideApplyDisabled disables server-side apply for resources.
+ ServerSideApplyDisabled ServerSideApplyMode = "disabled"
+
+ // ServerSideApplyAuto uses the release's previous apply method.
+ ServerSideApplyAuto ServerSideApplyMode = "auto"
+)
+
// Upgrade holds the configuration for Helm upgrade actions for this
// HelmRelease.
type Upgrade struct {
@@ -758,6 +783,14 @@ type Upgrade struct {
// +kubebuilder:validation:Enum=Skip;Create;CreateReplace
// +optional
CRDs CRDsPolicy `json:"crds,omitempty"`
+
+ // ServerSideApply enables server-side apply for resources during upgrade.
+ // Can be "enabled", "disabled", or "auto".
+ // When "auto", server-side apply usage will be based on the release's previous usage.
+ // Defaults to "auto".
+ // +kubebuilder:validation:Enum=enabled;disabled;auto
+ // +optional
+ ServerSideApply ServerSideApplyMode `json:"serverSideApply,omitempty"`
}
// GetTimeout returns the configured timeout for the Helm upgrade action, or the
@@ -787,6 +820,11 @@ func (in Upgrade) GetRetry() Retry {
return in.Strategy
}
+// GetDisableWait returns whether waiting is disabled for the Helm upgrade action.
+func (in Upgrade) GetDisableWait() bool {
+ return in.DisableWait
+}
+
// UpgradeStrategy holds the configuration for Helm upgrade strategy.
// +kubebuilder:validation:XValidation:rule="!has(self.retryInterval) || self.name == 'RetryOnFailure'", message=".retryInterval can only be set when .name is 'RetryOnFailure'"
type UpgradeStrategy struct {
@@ -991,7 +1029,15 @@ type Rollback struct {
// +optional
DisableHooks bool `json:"disableHooks,omitempty"`
- // Recreate performs pod restarts for the resource if applicable.
+ // Recreate performs pod restarts for any managed workloads.
+ //
+ // Deprecated: This behavior was deprecated in Helm 3:
+ // - Deprecation: https://github.com/helm/helm/pull/6463
+ // - Removal: https://github.com/helm/helm/pull/31023
+ // After helm-controller was upgraded to the Helm 4 SDK,
+ // this field is no longer functional and will print a
+ // warning if set to true. It will also be removed in a
+ // future release.
// +optional
Recreate bool `json:"recreate,omitempty"`
@@ -1003,6 +1049,14 @@ type Rollback struct {
// rollback action when it fails.
// +optional
CleanupOnFail bool `json:"cleanupOnFail,omitempty"`
+
+ // ServerSideApply enables server-side apply for resources during rollback.
+ // Can be "enabled", "disabled", or "auto".
+ // When "auto", server-side apply usage will be based on the release's previous usage.
+ // Defaults to "auto".
+ // +kubebuilder:validation:Enum=enabled;disabled;auto
+ // +optional
+ ServerSideApply ServerSideApplyMode `json:"serverSideApply,omitempty"`
}
// GetTimeout returns the configured timeout for the Helm rollback action, or
@@ -1014,6 +1068,11 @@ func (in Rollback) GetTimeout(defaultTimeout metav1.Duration) metav1.Duration {
return *in.Timeout
}
+// GetDisableWait returns whether waiting is disabled for the Helm rollback action.
+func (in Rollback) GetDisableWait() bool {
+ return in.DisableWait
+}
+
// Uninstall holds the configuration for Helm uninstall actions for this
// HelmRelease.
type Uninstall struct {
@@ -1065,6 +1124,11 @@ func (in Uninstall) GetDeletionPropagation() string {
return *in.DeletionPropagation
}
+// GetDisableWait returns whether waiting is disabled for the Helm uninstall action.
+func (in Uninstall) GetDisableWait() bool {
+ return in.DisableWait
+}
+
// ReleaseAction is the action to perform a Helm release.
type ReleaseAction string
@@ -1322,10 +1386,10 @@ func (in HelmRelease) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
-// GetValues unmarshals the raw values to a map[string]interface{} and returns
+// GetValues unmarshals the raw values to a map[string]any and returns
// the result.
-func (in HelmRelease) GetValues() map[string]interface{} {
- var values map[string]interface{}
+func (in HelmRelease) GetValues() map[string]any {
+ var values map[string]any
if in.Spec.Values != nil {
_ = yaml.Unmarshal(in.Spec.Values.Raw, &values)
}
diff --git a/api/v2/zz_generated.deepcopy.go b/api/v2/zz_generated.deepcopy.go
index 57d818d3a..108e0195f 100644
--- a/api/v2/zz_generated.deepcopy.go
+++ b/api/v2/zz_generated.deepcopy.go
@@ -485,6 +485,11 @@ func (in *Install) DeepCopyInto(out *Install) {
*out = new(InstallRemediation)
(*in).DeepCopyInto(*out)
}
+ if in.ServerSideApply != nil {
+ in, out := &in.ServerSideApply, &out.ServerSideApply
+ *out = new(bool)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Install.
diff --git a/api/v2beta1/helmrelease_types.go b/api/v2beta1/helmrelease_types.go
index c9ff4ac7a..785512ba6 100644
--- a/api/v2beta1/helmrelease_types.go
+++ b/api/v2beta1/helmrelease_types.go
@@ -1115,10 +1115,10 @@ func (in HelmRelease) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
-// GetValues unmarshals the raw values to a map[string]interface{} and returns
+// GetValues unmarshals the raw values to a map[string]any and returns
// the result.
-func (in HelmRelease) GetValues() map[string]interface{} {
- var values map[string]interface{}
+func (in HelmRelease) GetValues() map[string]any {
+ var values map[string]any
if in.Spec.Values != nil {
_ = json.Unmarshal(in.Spec.Values.Raw, &values)
}
diff --git a/api/v2beta2/helmrelease_types.go b/api/v2beta2/helmrelease_types.go
index 57cbf87a9..9e97229a5 100644
--- a/api/v2beta2/helmrelease_types.go
+++ b/api/v2beta2/helmrelease_types.go
@@ -1185,10 +1185,10 @@ func (in HelmRelease) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
-// GetValues unmarshals the raw values to a map[string]interface{} and returns
+// GetValues unmarshals the raw values to a map[string]any and returns
// the result.
-func (in HelmRelease) GetValues() map[string]interface{} {
- var values map[string]interface{}
+func (in HelmRelease) GetValues() map[string]any {
+ var values map[string]any
if in.Spec.Values != nil {
_ = yaml.Unmarshal(in.Spec.Values.Raw, &values)
}
diff --git a/config/crd/bases/helm.toolkit.fluxcd.io_helmreleases.yaml b/config/crd/bases/helm.toolkit.fluxcd.io_helmreleases.yaml
index 47c137a33..1b6500cd9 100644
--- a/config/crd/bases/helm.toolkit.fluxcd.io_helmreleases.yaml
+++ b/config/crd/bases/helm.toolkit.fluxcd.io_helmreleases.yaml
@@ -439,6 +439,11 @@ spec:
Replace tells the Helm install action to re-use the 'ReleaseName', but only
if that name is a deleted release which remains in the history.
type: boolean
+ serverSideApply:
+ description: |-
+ ServerSideApply enables server-side apply for resources during install.
+ Defaults to true (or false when UseHelm3Defaults feature gate is enabled).
+ type: boolean
skipCRDs:
description: |-
SkipCRDs tells the Helm install action to not install any CRDs. By default,
@@ -716,9 +721,28 @@ spec:
strategy.
type: boolean
recreate:
- description: Recreate performs pod restarts for the resource if
- applicable.
+ description: |-
+ Recreate performs pod restarts for any managed workloads.
+
+ Deprecated: This behavior was deprecated in Helm 3:
+ - Deprecation: https://github.com/helm/helm/pull/6463
+ - Removal: https://github.com/helm/helm/pull/31023
+ After helm-controller was upgraded to the Helm 4 SDK,
+ this field is no longer functional and will print a
+ warning if set to true. It will also be removed in a
+ future release.
type: boolean
+ serverSideApply:
+ description: |-
+ ServerSideApply enables server-side apply for resources during rollback.
+ Can be "enabled", "disabled", or "auto".
+ When "auto", server-side apply usage will be based on the release's previous usage.
+ Defaults to "auto".
+ enum:
+ - enabled
+ - disabled
+ - auto
+ type: string
timeout:
description: |-
Timeout is the time to wait for any individual Kubernetes operation (like
@@ -937,6 +961,17 @@ spec:
- uninstall
type: string
type: object
+ serverSideApply:
+ description: |-
+ ServerSideApply enables server-side apply for resources during upgrade.
+ Can be "enabled", "disabled", or "auto".
+ When "auto", server-side apply usage will be based on the release's previous usage.
+ Defaults to "auto".
+ enum:
+ - enabled
+ - disabled
+ - auto
+ type: string
strategy:
description: |-
Strategy defines the upgrade strategy to use for this HelmRelease.
diff --git a/config/testdata/server-side-apply/install.yaml b/config/testdata/server-side-apply/install.yaml
new file mode 100644
index 000000000..e2409af37
--- /dev/null
+++ b/config/testdata/server-side-apply/install.yaml
@@ -0,0 +1,21 @@
+apiVersion: helm.toolkit.fluxcd.io/v2
+kind: HelmRelease
+metadata:
+ name: server-side-apply
+spec:
+ interval: 5m
+ install:
+ serverSideApply: true
+ chart:
+ spec:
+ chart: podinfo
+ version: '>=6.0.0 <7.0.0'
+ sourceRef:
+ kind: HelmRepository
+ name: podinfo
+ interval: 1m
+ values:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 64Mi
diff --git a/config/testdata/server-side-apply/rollback-install.yaml b/config/testdata/server-side-apply/rollback-install.yaml
new file mode 100644
index 000000000..7194ab634
--- /dev/null
+++ b/config/testdata/server-side-apply/rollback-install.yaml
@@ -0,0 +1,21 @@
+apiVersion: helm.toolkit.fluxcd.io/v2
+kind: HelmRelease
+metadata:
+ name: server-side-apply-rollback
+spec:
+ interval: 30s
+ install:
+ serverSideApply: true
+ chart:
+ spec:
+ chart: podinfo
+ version: '>=6.0.0 <7.0.0'
+ sourceRef:
+ kind: HelmRepository
+ name: podinfo
+ interval: 10m
+ values:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 64Mi
diff --git a/config/testdata/server-side-apply/rollback-upgrade.yaml b/config/testdata/server-side-apply/rollback-upgrade.yaml
new file mode 100644
index 000000000..03c9633e2
--- /dev/null
+++ b/config/testdata/server-side-apply/rollback-upgrade.yaml
@@ -0,0 +1,32 @@
+apiVersion: helm.toolkit.fluxcd.io/v2
+kind: HelmRelease
+metadata:
+ name: server-side-apply-rollback
+spec:
+ interval: 30s
+ install:
+ serverSideApply: true
+ upgrade:
+ serverSideApply: enabled
+ remediation:
+ remediateLastFailure: true
+ rollback:
+ serverSideApply: enabled
+ chart:
+ spec:
+ chart: podinfo
+ version: '>=6.0.0 <7.0.0'
+ sourceRef:
+ kind: HelmRepository
+ name: podinfo
+ interval: 10m
+ values:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 64Mi
+ # Make wait fail to trigger rollback
+ replicaCount: 2
+ faults:
+ unready: true
+ timeout: 3s
diff --git a/config/testdata/server-side-apply/upgrade.yaml b/config/testdata/server-side-apply/upgrade.yaml
new file mode 100644
index 000000000..f25cb2a4a
--- /dev/null
+++ b/config/testdata/server-side-apply/upgrade.yaml
@@ -0,0 +1,24 @@
+apiVersion: helm.toolkit.fluxcd.io/v2
+kind: HelmRelease
+metadata:
+ name: server-side-apply
+spec:
+ interval: 5m
+ install:
+ serverSideApply: true
+ upgrade:
+ serverSideApply: enabled
+ chart:
+ spec:
+ chart: podinfo
+ version: '>=6.0.0 <7.0.0'
+ sourceRef:
+ kind: HelmRepository
+ name: podinfo
+ interval: 1m
+ values:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 64Mi
+ replicaCount: 2
diff --git a/docs/api/v2/helm.md b/docs/api/v2/helm.md
index 1c2abf3c8..c5d1952b8 100644
--- a/docs/api/v2/helm.md
+++ b/docs/api/v2/helm.md
@@ -2109,6 +2109,19 @@ HelmReleaseSpec.TargetNamespace if it does not exist yet.
On uninstall, the namespace will not be garbage collected.
+
+
+serverSideApply
+
+bool
+
+
+
+(Optional)
+
ServerSideApply enables server-side apply for resources during install.
+Defaults to true (or false when UseHelm3Defaults feature gate is enabled).
+
+
@@ -2414,7 +2427,14 @@ bool
(Optional)
-
Recreate performs pod restarts for the resource if applicable.
+
Recreate performs pod restarts for any managed workloads.
+
Deprecated: This behavior was deprecated in Helm 3:
+- Deprecation: https://github.com/helm/helm/pull/6463
+- Removal: https://github.com/helm/helm/pull/31023
+After helm-controller was upgraded to the Helm 4 SDK,
+this field is no longer functional and will print a
+warning if set to true. It will also be removed in a
+future release.
@@ -2442,10 +2462,36 @@ bool
rollback action when it fails.
ServerSideApply enables server-side apply for resources during rollback.
+Can be “enabled”, “disabled”, or “auto”.
+When “auto”, server-side apply usage will be based on the release’s previous usage.
+Defaults to “auto”.
ServerSideApply enables server-side apply for resources during upgrade.
+Can be “enabled”, “disabled”, or “auto”.
+When “auto”, server-side apply usage will be based on the release’s previous usage.
+Defaults to “auto”.
+
+
diff --git a/docs/spec/v2/helmreleases.md b/docs/spec/v2/helmreleases.md
index e63f494d6..b15fd71b1 100644
--- a/docs/spec/v2/helmreleases.md
+++ b/docs/spec/v2/helmreleases.md
@@ -745,7 +745,11 @@ The field offers the following subfields:
- `.force` (Optional): Forces resource updates through a replacement strategy.
Defaults to `false`.
- `.recreate` (Optional): Performs Pod restarts if applicable. Defaults to
- `false`.
+ `false`. **Warning**: As of Flux v2.8, this option is deprecated and no
+ longer has any effect. It will be removed in a future release. The
+ helm-controller will print a warning if this option is used. Please
+ see the [Helm 4 issue](https://github.com/fluxcd/helm-controller/issues/1300#issuecomment-3740272924)
+ for more details.
### Uninstall configuration
diff --git a/go.mod b/go.mod
index 101a66a4f..ab700728f 100644
--- a/go.mod
+++ b/go.mod
@@ -14,18 +14,21 @@ replace (
sigs.k8s.io/kustomize/kyaml => sigs.k8s.io/kustomize/kyaml v0.20.1
)
+// Just checking if CI passes with my bugfix: https://github.com/helm/helm/pull/31730
+replace helm.sh/helm/v4 => ./helm
+
require (
github.com/Masterminds/semver/v3 v3.4.0
- github.com/fluxcd/cli-utils v0.36.0-flux.15
+ github.com/fluxcd/cli-utils v0.37.0-flux.1
github.com/fluxcd/helm-controller/api v1.4.0
github.com/fluxcd/pkg/apis/acl v0.9.0
github.com/fluxcd/pkg/apis/event v0.21.0
github.com/fluxcd/pkg/apis/kustomize v1.14.0
- github.com/fluxcd/pkg/apis/meta v1.23.0
+ github.com/fluxcd/pkg/apis/meta v1.24.0
github.com/fluxcd/pkg/auth v0.33.0
github.com/fluxcd/pkg/cache v0.12.0
- github.com/fluxcd/pkg/chartutil v1.17.0
- github.com/fluxcd/pkg/runtime v0.91.0
+ github.com/fluxcd/pkg/chartutil v1.19.1-0.20260112215923-5e9e934fe7e3
+ github.com/fluxcd/pkg/runtime v0.94.0
github.com/fluxcd/pkg/ssa v0.61.0
github.com/fluxcd/pkg/testserver v0.13.0
github.com/fluxcd/source-controller/api v1.7.2
@@ -40,14 +43,14 @@ require (
github.com/spf13/pflag v1.0.10
github.com/wI2L/jsondiff v0.7.0
go.uber.org/zap v1.27.0
- golang.org/x/text v0.31.0
- helm.sh/helm/v3 v3.19.2
- k8s.io/api v0.34.2
- k8s.io/apiextensions-apiserver v0.34.2
- k8s.io/apimachinery v0.34.2
- k8s.io/cli-runtime v0.34.2
- k8s.io/client-go v0.34.2
- k8s.io/kubectl v0.34.2
+ golang.org/x/text v0.33.0
+ helm.sh/helm/v4 v4.1.0-rc.1
+ k8s.io/api v0.35.0
+ k8s.io/apiextensions-apiserver v0.35.0
+ k8s.io/apimachinery v0.35.0
+ k8s.io/cli-runtime v0.35.0
+ k8s.io/client-go v0.35.0
+ k8s.io/kubectl v0.35.0
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
sigs.k8s.io/controller-runtime v0.22.4
sigs.k8s.io/kustomize/api v0.21.0
@@ -68,11 +71,12 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
- github.com/BurntSushi/toml v1.5.0 // indirect
+ github.com/BurntSushi/toml v1.6.0 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
+ github.com/ProtonMail/go-crypto v1.3.0 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go-v2 v1.39.6 // indirect
@@ -95,19 +99,18 @@ require (
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
- github.com/containerd/containerd v1.7.29 // indirect
- github.com/containerd/errdefs v1.0.0 // indirect
- github.com/containerd/log v0.1.0 // indirect
- github.com/containerd/platforms v0.2.1 // indirect
- github.com/cyphar/filepath-securejoin v0.6.0 // indirect
+ github.com/cloudflare/circl v1.6.1 // indirect
+ github.com/cyphar/filepath-securejoin v0.6.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/docker/cli v28.3.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.9.3 // indirect
+ github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a // indirect
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
- github.com/fatih/color v1.16.0 // indirect
+ github.com/extism/go-sdk v1.7.1 // indirect
+ github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
@@ -130,7 +133,6 @@ require (
github.com/go-openapi/swag/typeutils v0.24.0 // indirect
github.com/go-openapi/swag/yamlutils v0.24.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
- github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
@@ -139,18 +141,15 @@ require (
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
- github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
- github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
- github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
+ github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmoiron/sqlx v1.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
@@ -164,13 +163,11 @@ require (
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
- github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/term v0.5.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
@@ -180,15 +177,17 @@ require (
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
- github.com/rubenv/sql-migrate v1.8.0 // indirect
+ github.com/rubenv/sql-migrate v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/cast v1.7.0 // indirect
- github.com/spf13/cobra v1.10.1 // indirect
+ github.com/spf13/cobra v1.10.2 // indirect
github.com/stoewer/go-strcase v1.3.1 // indirect
github.com/stretchr/testify v1.11.1 // indirect
+ github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 // indirect
+ github.com/tetratelabs/wazero v1.11.0 // indirect
github.com/tidwall/gjson v1.18.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
@@ -201,16 +200,17 @@ require (
go.opentelemetry.io/otel v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/crypto v0.44.0 // indirect
+ golang.org/x/crypto v0.47.0 // indirect
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect
- golang.org/x/net v0.47.0 // indirect
+ golang.org/x/net v0.48.0 // indirect
golang.org/x/oauth2 v0.33.0 // indirect
- golang.org/x/sync v0.18.0 // indirect
- golang.org/x/sys v0.38.0 // indirect
- golang.org/x/term v0.37.0 // indirect
+ golang.org/x/sync v0.19.0 // indirect
+ golang.org/x/sys v0.40.0 // indirect
+ golang.org/x/term v0.39.0 // indirect
golang.org/x/time v0.14.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
google.golang.org/api v0.256.0 // indirect
@@ -222,10 +222,10 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gotest.tools/v3 v3.4.0 // indirect
- k8s.io/apiserver v0.34.2 // indirect
- k8s.io/component-base v0.34.2 // indirect
+ k8s.io/apiserver v0.35.0 // indirect
+ k8s.io/component-base v0.35.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/kube-openapi v0.0.0-20250814151709-d7b6acb124c3 // indirect
+ k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
oras.land/oras-go/v2 v2.6.0 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
diff --git a/go.sum b/go.sum
index a3b878bbe..a59ae7335 100644
--- a/go.sum
+++ b/go.sum
@@ -30,8 +30,8 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJ
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
-github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
-github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
+github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
@@ -44,10 +44,10 @@ github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
+github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
+github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go-v2 v1.39.6 h1:2JrPCVgWJm7bm83BDwY5z8ietmeJUbh3O2ACnn+Xsqk=
@@ -94,14 +94,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
-github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE=
-github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs=
-github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
-github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
-github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
-github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
-github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
-github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
+github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
+github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/coreos/go-oidc v2.3.0+incompatible h1:+5vEsrgprdLjjQ9FzIKAzQz1wwPD+83hQRfUIPh7rO0=
github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow=
github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
@@ -110,8 +104,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
-github.com/cyphar/filepath-securejoin v0.6.0 h1:BtGB77njd6SVO6VztOHfPxKitJvd/VPT+OFBFMOi1Is=
-github.com/cyphar/filepath-securejoin v0.6.0/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc=
+github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE=
+github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -132,6 +126,8 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a h1:UwSIFv5g5lIvbGgtf3tVwC7Ky9rmMFBp0RMs+6f6YqE=
+github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a/go.mod h1:C8DzXehI4zAbrdlbtOByKX6pfivJTBiV9Jjqv56Yd9Q=
github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=
github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
@@ -140,36 +136,38 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
-github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
-github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/extism/go-sdk v1.7.1 h1:lWJos6uY+tRFdlIHR+SJjwFDApY7OypS/2nMhiVQ9Sw=
+github.com/extism/go-sdk v1.7.1/go.mod h1:IT+Xdg5AZM9hVtpFUA+uZCJMge/hbvshl8bwzLtFyKA=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/fluxcd/cli-utils v0.36.0-flux.15 h1:Et5QLnIpRjj+oZtM9gEybkAaoNsjysHq0y1253Ai94Y=
-github.com/fluxcd/cli-utils v0.36.0-flux.15/go.mod h1:AqRUmWIfNE7cdL6NWSGF0bAlypGs+9x5UQ2qOtlEzv4=
+github.com/fluxcd/cli-utils v0.37.0-flux.1 h1:k/VvPNT3tGa/l2N+qzHduaQr3GVbgoWS6nw7tGZz16w=
+github.com/fluxcd/cli-utils v0.37.0-flux.1/go.mod h1:aND5wX3LuTFtB7eUT7vsWr8mmxRVSPR2Wkvbn0SqPfw=
github.com/fluxcd/pkg/apis/acl v0.9.0 h1:wBpgsKT+jcyZEcM//OmZr9RiF8klL3ebrDp2u2ThsnA=
github.com/fluxcd/pkg/apis/acl v0.9.0/go.mod h1:TttNS+gocsGLwnvmgVi3/Yscwqrjc17+vhgYfqkfrV4=
github.com/fluxcd/pkg/apis/event v0.21.0 h1:VVl0WmgDXJwDS3Pivkk+31h3fWHbq+BpbNLUF5d61ec=
github.com/fluxcd/pkg/apis/event v0.21.0/go.mod h1:jacQdE6DdxoBsUOLMzEZNtpd4TqtYaiH1DWoyHMSUSo=
github.com/fluxcd/pkg/apis/kustomize v1.14.0 h1:PmWqMpRX0v7/aCAUNWfohe4o1qa9G3Cg/vVr5PCedI4=
github.com/fluxcd/pkg/apis/kustomize v1.14.0/go.mod h1:CGRpU9Od4ht5+MHL6QlMfWaW87U9UTfGVM5CM4PZ28I=
-github.com/fluxcd/pkg/apis/meta v1.23.0 h1:fLis5YcHnOsyKYptzBtituBm5EWNx13I0bXQsy0FG4s=
-github.com/fluxcd/pkg/apis/meta v1.23.0/go.mod h1:UWsIbBPCxYvoVklr2mV2uLFBf/n17dNAmKFjRfApdDo=
+github.com/fluxcd/pkg/apis/meta v1.24.0 h1:+e33T4OL9oqMWZSltsgImvi+/Punx42X9NqFlPesH6o=
+github.com/fluxcd/pkg/apis/meta v1.24.0/go.mod h1:UWsIbBPCxYvoVklr2mV2uLFBf/n17dNAmKFjRfApdDo=
github.com/fluxcd/pkg/auth v0.33.0 h1:3ccwqpBr8uWEQgl15b7S0PwJ9EgtcKObg4J1jnaof2w=
github.com/fluxcd/pkg/auth v0.33.0/go.mod h1:ZAFC8pNZxhe+7RV2cQO1K9X62HM8BbRBnCE118oY/0A=
github.com/fluxcd/pkg/cache v0.12.0 h1:mabABT3jIfuo84VbIW+qvfqMZ7PbM5tXQgQvA2uo2rc=
github.com/fluxcd/pkg/cache v0.12.0/go.mod h1:HL/9cgBmwCdKIr3JH57rxrGdb7rOgX5Z1eJlHsaV1vE=
-github.com/fluxcd/pkg/chartutil v1.17.0 h1:UiSBRujE2/Qo8qrv8F3XGEMI5YANS0PpbG/r+CxKUW0=
-github.com/fluxcd/pkg/chartutil v1.17.0/go.mod h1:Zt8EolwLyYj0689Ivk9cL4mYZlR3BBi/XVGyeGmPVlE=
-github.com/fluxcd/pkg/runtime v0.91.0 h1:Z92sOLsJXa+0RIi/vNl87zF5qnsBUdOb60d2a0b4Ulo=
-github.com/fluxcd/pkg/runtime v0.91.0/go.mod h1:D/gUsaSpyw6Od2QEL7MELi5m+oUmwokuxUVZ+vKQxdo=
+github.com/fluxcd/pkg/chartutil v1.19.1-0.20260112215923-5e9e934fe7e3 h1:+wt81/9tbSpI0HJfXvC8Wmqp2Lz4Wz49QMKrdMr58og=
+github.com/fluxcd/pkg/chartutil v1.19.1-0.20260112215923-5e9e934fe7e3/go.mod h1:9H9EGMP98YqwQmYuKTv9M7b1giJ6U0Z/0QFMraZxTmA=
+github.com/fluxcd/pkg/runtime v0.94.0 h1:z33lG+albHTmmcpZgV7DY5VVUZXFFAErnBBATDI2B5I=
+github.com/fluxcd/pkg/runtime v0.94.0/go.mod h1:/E4dT1pdSkidyRTR5ghSzoyHEUcEJw3ipvJt597ArOA=
github.com/fluxcd/pkg/ssa v0.61.0 h1:GeueQfZVrjPLEzmEkq6gpFTBr1MDcqUihCQDf6AaIo8=
github.com/fluxcd/pkg/ssa v0.61.0/go.mod h1:PNRlgihYbmlQU5gzsB14nrsNMbtACNanBnKhLCWmeX8=
github.com/fluxcd/pkg/testserver v0.13.0 h1:xEpBcEYtD7bwvZ+i0ZmChxKkDo/wfQEV3xmnzVybSSg=
github.com/fluxcd/pkg/testserver v0.13.0/go.mod h1:akRYv3FLQUsme15na9ihECRG6hBuqni4XEY9W8kzs8E=
github.com/fluxcd/source-controller/api v1.7.2 h1:/lg/xoyRjxwdhHKqjTxQS2o1cp+DMKJ8W4rpm+ZLemQ=
github.com/fluxcd/source-controller/api v1.7.2/go.mod h1:2JtCeUVpl0aqKImS19jUz9EEnMdzgqNWHkllrIhV004=
-github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
-github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
+github.com/foxcpp/go-mockdns v1.2.0 h1:omK3OrHRD1IWJz1FuFBCFquhXslXoF17OvBS6JPzZF0=
+github.com/foxcpp/go-mockdns v1.2.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
@@ -223,8 +221,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
-github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
@@ -257,23 +253,16 @@ github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyE
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
-github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
-github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
-github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
-github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
-github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=
github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw=
@@ -282,6 +271,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvH
github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca h1:T54Ema1DU8ngI+aef9ZhAhNGQhcRTrWxVeG07F+c/Rw=
+github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
@@ -292,8 +283,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
@@ -334,8 +323,6 @@ github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQ
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
-github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
-github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -348,10 +335,8 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw=
-github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE=
+github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
+github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/opencontainers/go-digest v1.0.1-0.20231025023718-d50d2fec9c98 h1:H55sU3giNgBkIvmAo0vI/AAFwVTwfWsf6MN3+9H6U8o=
@@ -362,8 +347,6 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
-github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -387,10 +370,10 @@ github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ=
github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
-github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o=
-github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
+github.com/rubenv/sql-migrate v1.8.1 h1:EPNwCvjAowHI3TnZ+4fQu3a915OpnQoPAjTXCGOy2U0=
+github.com/rubenv/sql-migrate v1.8.1/go.mod h1:BTIKBORjzyxZDS6dzoiw6eAFYJ1iNlGAtjn4LGeVjS8=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ=
@@ -403,8 +386,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
-github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
-github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
+github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
+github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -424,6 +407,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 h1:ZF+QBjOI+tILZjBaFj3HgFonKXUcwgJ4djLb6i42S3Q=
+github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834/go.mod h1:m9ymHTgNSEjuxvw8E7WWe4Pl4hZQHXONY8wE6dMLaRk=
+github.com/tetratelabs/wazero v1.11.0 h1:+gKemEuKCTevU4d7ZTzlsvgd1uaToIDtlQlmNbwqYhA=
+github.com/tetratelabs/wazero v1.11.0/go.mod h1:eV28rsN8Q+xwjogd7f4/Pp4xFxO7uOGbLcD/LzB1wiU=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
@@ -440,7 +427,6 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
@@ -494,8 +480,6 @@ go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJr
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
-go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
-go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -509,27 +493,24 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
-golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
+golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
-golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
+golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
+golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
-golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
+golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
+golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
-golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -540,23 +521,21 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
-golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
-golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
+golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
-golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
-golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
+golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
+golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -591,28 +570,26 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
-helm.sh/helm/v3 v3.19.2 h1:psQjaM8aIWrSVEly6PgYtLu/y6MRSmok4ERiGhZmtUY=
-helm.sh/helm/v3 v3.19.2/go.mod h1:gX10tB5ErM+8fr7bglUUS/UfTOO8UUTYWIBH1IYNnpE=
-k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY=
-k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw=
-k8s.io/apiextensions-apiserver v0.34.2 h1:WStKftnGeoKP4AZRz/BaAAEJvYp4mlZGN0UCv+uvsqo=
-k8s.io/apiextensions-apiserver v0.34.2/go.mod h1:398CJrsgXF1wytdaanynDpJ67zG4Xq7yj91GrmYN2SE=
-k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4=
-k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
-k8s.io/apiserver v0.34.2 h1:2/yu8suwkmES7IzwlehAovo8dDE07cFRC7KMDb1+MAE=
-k8s.io/apiserver v0.34.2/go.mod h1:gqJQy2yDOB50R3JUReHSFr+cwJnL8G1dzTA0YLEqAPI=
-k8s.io/cli-runtime v0.34.2 h1:cct1GEuWc3IyVT8MSCoIWzRGw9HJ/C5rgP32H60H6aE=
-k8s.io/cli-runtime v0.34.2/go.mod h1:X13tsrYexYUCIq8MarCBy8lrm0k0weFPTpcaNo7lms4=
-k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M=
-k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE=
-k8s.io/component-base v0.34.2 h1:HQRqK9x2sSAsd8+R4xxRirlTjowsg6fWCPwWYeSvogQ=
-k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM=
+k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
+k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
+k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4=
+k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU=
+k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
+k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
+k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4=
+k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds=
+k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE=
+k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY=
+k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
+k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
+k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94=
+k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20250814151709-d7b6acb124c3 h1:liMHz39T5dJO1aOKHLvwaCjDbf07wVh6yaUlTpunnkE=
-k8s.io/kube-openapi v0.0.0-20250814151709-d7b6acb124c3/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
-k8s.io/kubectl v0.34.2 h1:+fWGrVlDONMUmmQLDaGkQ9i91oszjjRAa94cr37hzqA=
-k8s.io/kubectl v0.34.2/go.mod h1:X2KTOdtZZNrTWmUD4oHApJ836pevSl+zvC5sI6oO2YQ=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
+k8s.io/kubectl v0.35.0 h1:cL/wJKHDe8E8+rP3G7avnymcMg6bH6JEcR5w5uo06wc=
+k8s.io/kubectl v0.35.0/go.mod h1:VR5/TSkYyxZwrRwY5I5dDq6l5KXmiCb+9w8IKplk3Qo=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
diff --git a/helm/.github/ISSUE_TEMPLATE/bug-report.yaml b/helm/.github/ISSUE_TEMPLATE/bug-report.yaml
new file mode 100644
index 000000000..1637d26a5
--- /dev/null
+++ b/helm/.github/ISSUE_TEMPLATE/bug-report.yaml
@@ -0,0 +1,70 @@
+name: Bug Report
+description: Report a bug encountered in Helm
+labels: kind/bug
+body:
+ - type: textarea
+ id: problem
+ attributes:
+ label: What happened?
+ description: |
+ Please provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner.
+ validations:
+ required: true
+
+ - type: textarea
+ id: expected
+ attributes:
+ label: What did you expect to happen?
+ validations:
+ required: true
+
+ - type: textarea
+ id: repro
+ attributes:
+ label: How can we reproduce it (as minimally and precisely as possible)?
+ description: |
+ Please list steps someone can follow to trigger the issue.
+
+ For example:
+ 1. Run `helm install mychart ./path-to-chart -f values.yaml --debug`
+ 2. Observe the following error: ...
+
+ You can include:
+ - a sample `values.yaml` block
+ - a link to a chart
+ - specific `helm` commands used
+
+ This helps others reproduce and debug your issue more effectively.
+ validations:
+ required: true
+
+ - type: textarea
+ id: helmVersion
+ attributes:
+ label: Helm version
+ value: |
+
+
+ ```console
+ $ helm version
+ # paste output here
+ ```
+
+ validations:
+ required: true
+
+ - type: textarea
+ id: kubeVersion
+ attributes:
+ label: Kubernetes version
+ value: |
+
+
+ ```console
+ $ kubectl version
+ # paste output here
+ ```
+
+
+ validations:
+ required: true
diff --git a/helm/.github/ISSUE_TEMPLATE/documentation.yaml b/helm/.github/ISSUE_TEMPLATE/documentation.yaml
new file mode 100644
index 000000000..bb1b7537c
--- /dev/null
+++ b/helm/.github/ISSUE_TEMPLATE/documentation.yaml
@@ -0,0 +1,27 @@
+name: Documentation
+description: Report any mistakes or missing information from the documentation or the examples
+labels: kind/documentation
+body:
+ - type: markdown
+ attributes:
+ value: |
+ ⚠️ **Note**: Most documentation lives in [helm/helm-www](https://github.com/helm/helm-www).
+ If your issue is about Helm website documentation or examples, please [open an issue there](https://github.com/helm/helm-www/issues/new/choose).
+
+ - type: textarea
+ id: feature
+ attributes:
+ label: What would you like to be added?
+ description: |
+ Link to the issue (please include a link to the specific documentation or example).
+ Link to the issue raised in [Helm Documentation Improvement Proposal](https://github.com/helm/helm-www)
+ validations:
+ required: true
+
+ - type: textarea
+ id: rationale
+ attributes:
+ label: Why is this needed?
+ validations:
+ required: true
+
diff --git a/helm/.github/ISSUE_TEMPLATE/feature.yaml b/helm/.github/ISSUE_TEMPLATE/feature.yaml
new file mode 100644
index 000000000..45b9c3f94
--- /dev/null
+++ b/helm/.github/ISSUE_TEMPLATE/feature.yaml
@@ -0,0 +1,21 @@
+name: Enhancement/feature
+description: Provide supporting details for a feature in development
+labels: kind/feature
+body:
+ - type: textarea
+ id: feature
+ attributes:
+ label: What would you like to be added?
+ description: |
+ Feature requests are unlikely to make progress as issues.
+ Initial discussion and ideas can happen on an issue.
+ But significant changes or features must be proposed as a [Helm Improvement Proposal](https://github.com/helm/community/blob/main/hips/hip-0001.md) (HIP)
+ validations:
+ required: true
+
+ - type: textarea
+ id: rationale
+ attributes:
+ label: Why is this needed?
+ validations:
+ required: true
diff --git a/helm/.github/dependabot.yml b/helm/.github/dependabot.yml
new file mode 100644
index 000000000..0133fd8f4
--- /dev/null
+++ b/helm/.github/dependabot.yml
@@ -0,0 +1,39 @@
+version: 2
+
+updates:
+ - # Keep dev-v3 branch dependencies up to date, while Helm v3 is within support
+ package-ecosystem: "gomod"
+ target-branch: "dev-v3"
+ directory: "/"
+ schedule:
+ interval: "daily"
+ groups:
+ k8s.io:
+ patterns:
+ - "k8s.io/api"
+ - "k8s.io/apiextensions-apiserver"
+ - "k8s.io/apimachinery"
+ - "k8s.io/apiserver"
+ - "k8s.io/cli-runtime"
+ - "k8s.io/client-go"
+ - "k8s.io/kubectl"
+ - package-ecosystem: "gomod"
+ target-branch: "main"
+ directory: "/"
+ schedule:
+ interval: "daily"
+ groups:
+ k8s.io:
+ patterns:
+ - "k8s.io/api"
+ - "k8s.io/apiextensions-apiserver"
+ - "k8s.io/apimachinery"
+ - "k8s.io/apiserver"
+ - "k8s.io/cli-runtime"
+ - "k8s.io/client-go"
+ - "k8s.io/kubectl"
+ - package-ecosystem: "github-actions"
+ target-branch: "main"
+ directory: "/"
+ schedule:
+ interval: "daily"
diff --git a/helm/.github/env b/helm/.github/env
new file mode 100644
index 000000000..9d79b174f
--- /dev/null
+++ b/helm/.github/env
@@ -0,0 +1,2 @@
+GOLANG_VERSION=1.25
+GOLANGCI_LINT_VERSION=v2.5.0
diff --git a/helm/.github/pull_request_template.md b/helm/.github/pull_request_template.md
new file mode 100644
index 000000000..0fe5f1106
--- /dev/null
+++ b/helm/.github/pull_request_template.md
@@ -0,0 +1,12 @@
+
+
+**What this PR does / why we need it**:
+
+**Special notes for your reviewer**:
+
+**If applicable**:
+- [ ] this PR contains user facing changes (the `docs needed` label should be applied if so)
+- [ ] this PR contains unit tests
+- [ ] this PR has been tested for backwards compatibility
diff --git a/helm/.github/workflows/build-test.yml b/helm/.github/workflows/build-test.yml
new file mode 100644
index 000000000..77e5d9343
--- /dev/null
+++ b/helm/.github/workflows/build-test.yml
@@ -0,0 +1,36 @@
+name: build-test
+on:
+ push:
+ branches:
+ - "main"
+ - "dev-v3"
+ - "release-**"
+ pull_request:
+ branches:
+ - "main"
+ - "dev-v3"
+
+permissions:
+ contents: read
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout source code
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1
+ - name: Add variables to environment file
+ run: cat ".github/env" >> "$GITHUB_ENV"
+ - name: Setup Go
+ uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # pin@6.2.0
+ with:
+ go-version: '${{ env.GOLANG_VERSION }}'
+ check-latest: true
+ - name: Test source headers are present
+ run: make test-source-headers
+ - name: Check if go modules need to be tidied
+ run: go mod tidy -diff
+ - name: Run unit tests
+ run: make test-coverage
+ - name: Test build
+ run: make build
diff --git a/helm/.github/workflows/codeql-analysis.yml b/helm/.github/workflows/codeql-analysis.yml
new file mode 100644
index 000000000..0f3fe6d8f
--- /dev/null
+++ b/helm/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,75 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches:
+ - main
+ - dev-v3
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches:
+ - main
+ - dev-v3
+ schedule:
+ - cron: '29 6 * * 6'
+
+permissions:
+ contents: read
+ security-events: write
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'go' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
+ # Learn more:
+ # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@e296a935590eb16afc0c0108289f68c87e2a89a5 # pinv4.30.7
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@e296a935590eb16afc0c0108289f68c87e2a89a5 # pinv4.30.7
+
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 https://git.io/JvXDl
+
+ # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
+ # and modify them (or add more) to build your code if your project
+ # uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@e296a935590eb16afc0c0108289f68c87e2a89a5 # pinv4.30.7
diff --git a/helm/.github/workflows/golangci-lint.yml b/helm/.github/workflows/golangci-lint.yml
new file mode 100644
index 000000000..ede3b4c71
--- /dev/null
+++ b/helm/.github/workflows/golangci-lint.yml
@@ -0,0 +1,27 @@
+name: golangci-lint
+
+on:
+ push:
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ golangci:
+ name: golangci-lint
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1
+ - name: Add variables to environment file
+ run: cat ".github/env" >> "$GITHUB_ENV"
+ - name: Setup Go
+ uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # pin@6.2.0
+ with:
+ go-version: '${{ env.GOLANG_VERSION }}'
+ check-latest: true
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 #pin@9.2.0
+ with:
+ version: ${{ env.GOLANGCI_LINT_VERSION }}
diff --git a/helm/.github/workflows/govulncheck.yml b/helm/.github/workflows/govulncheck.yml
new file mode 100644
index 000000000..b1a50b553
--- /dev/null
+++ b/helm/.github/workflows/govulncheck.yml
@@ -0,0 +1,35 @@
+name: govulncheck
+on:
+ push:
+ paths:
+ - go.sum
+ - .github/workflows/govulncheck.yml
+ pull_request:
+ paths:
+ - go.sum
+ - .github/workflows/govulncheck.yml
+ schedule:
+ - cron: "0 0 * * *"
+
+permissions: read-all
+
+jobs:
+ govulncheck:
+ name: govulncheck
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1
+ with:
+ persist-credentials: false
+ - name: Add variables to environment file
+ run: cat ".github/env" >> "$GITHUB_ENV"
+ - name: Setup Go
+ uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # pin@6.2.0
+ with:
+ go-version: '${{ env.GOLANG_VERSION }}'
+ check-latest: true
+ - name: govulncheck
+ uses: golang/govulncheck-action@b625fbe08f3bccbe446d94fbf87fcc875a4f50ee # pin@1.0.4
+ with:
+ go-package: ./...
diff --git a/helm/.github/workflows/release.yml b/helm/.github/workflows/release.yml
new file mode 100644
index 000000000..46c999191
--- /dev/null
+++ b/helm/.github/workflows/release.yml
@@ -0,0 +1,115 @@
+name: release
+on:
+ create:
+ tags:
+ - v*
+ push:
+ branches:
+ - main
+
+permissions: read-all
+
+# Note the only differences between release and canary-release jobs are:
+# - only canary passes --overwrite flag
+# - the VERSION make variable passed to 'make dist checksum' is expected to
+# be "canary" if the job is triggered by a push to "main" branch. If the
+# job is triggered by a tag push, VERSION should be the tag ref.
+jobs:
+ release:
+ if: startsWith(github.ref, 'refs/tags/v') && github.repository == 'helm/helm'
+ runs-on: ubuntu-latest-16-cores
+ steps:
+ - name: Checkout source code
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1
+ with:
+ fetch-depth: 0
+
+ - name: Add variables to environment file
+ run: cat ".github/env" >> "$GITHUB_ENV"
+
+ - name: Setup Go
+ uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # pin@6.2.0
+ with:
+ go-version: '${{ env.GOLANG_VERSION }}'
+ check-latest: true
+ - name: Run unit tests
+ run: make test-coverage
+ - name: Build Helm Binaries
+ run: |
+ set -eu -o pipefail
+
+ make build-cross VERSION="${{ github.ref_name }}"
+ make dist checksum VERSION="${{ github.ref_name }}"
+
+ - name: Set latest version
+ run: |
+ set -eu -o pipefail
+
+ mkdir -p _dist_versions
+
+ # Push the latest semver tag, excluding prerelease tags
+ LATEST_VERSION="$(git tag | sort -r --version-sort | grep '^v[0-9]' | grep -v '-' | head -n1)"
+ echo "LATEST_VERSION=${LATEST_VERSION}"
+ if [[ "${LATEST_VERSION}" != v4.* ]]; then
+ echo "Error: Latest version ${LATEST_VERSION} is not a v4 release"
+ exit 1
+ fi
+
+ echo "${LATEST_VERSION}" > _dist_versions/helm-latest-version
+ echo "${LATEST_VERSION}" > _dist_versions/helm4-latest-version
+
+ - name: Upload Binaries
+ uses: bacongobbler/azure-blob-storage-upload@50f7d898b7697e864130ea04c303ca38b5751c50 # pin@3.0.0
+ env:
+ AZURE_STORAGE_CONNECTION_STRING: "${{ secrets.AZURE_STORAGE_CONNECTION_STRING }}"
+ AZURE_STORAGE_CONTAINER_NAME: "${{ secrets.AZURE_STORAGE_CONTAINER_NAME }}"
+ with:
+ source_dir: _dist
+ container_name: ${{ secrets.AZURE_STORAGE_CONTAINER_NAME }}
+ connection_string: ${{ secrets.AZURE_STORAGE_CONNECTION_STRING }}
+ extra_args: '--pattern helm-*'
+
+ - name: Upload Version tag files
+ uses: bacongobbler/azure-blob-storage-upload@50f7d898b7697e864130ea04c303ca38b5751c50 # pin@3.0.0
+ env:
+ AZURE_STORAGE_CONNECTION_STRING: "${{ secrets.AZURE_STORAGE_CONNECTION_STRING }}"
+ AZURE_STORAGE_CONTAINER_NAME: "${{ secrets.AZURE_STORAGE_CONTAINER_NAME }}"
+ with:
+ overwrite: 'true'
+ source_dir: _dist_versions
+ container_name: ${{ secrets.AZURE_STORAGE_CONTAINER_NAME }}
+ connection_string: ${{ secrets.AZURE_STORAGE_CONNECTION_STRING }}
+
+ canary-release:
+ runs-on: ubuntu-latest-16-cores
+ if: github.ref == 'refs/heads/main' && github.repository == 'helm/helm'
+ steps:
+ - name: Checkout source code
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # pin@v6.0.1
+
+ - name: Add variables to environment file
+ run: cat ".github/env" >> "$GITHUB_ENV"
+
+ - name: Setup Go
+ uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # pin@6.2.0
+ with:
+ go-version: '${{ env.GOLANG_VERSION }}'
+ check-latest: true
+
+ - name: Run unit tests
+ run: make test-coverage
+
+ - name: Build Helm Binaries
+ run: |
+ make build-cross
+ make dist checksum VERSION="canary"
+
+ - name: Upload Binaries
+ uses: bacongobbler/azure-blob-storage-upload@50f7d898b7697e864130ea04c303ca38b5751c50 # pin@3.0.0
+ with:
+ source_dir: _dist
+ container_name: ${{ secrets.AZURE_STORAGE_CONTAINER_NAME }}
+ connection_string: ${{ secrets.AZURE_STORAGE_CONNECTION_STRING }}
+ extra_args: '--pattern helm-*'
+ # WARNING: this will overwrite existing blobs in your blob storage
+ overwrite: 'true'
diff --git a/helm/.github/workflows/scorecards.yml b/helm/.github/workflows/scorecards.yml
new file mode 100644
index 000000000..514a649cb
--- /dev/null
+++ b/helm/.github/workflows/scorecards.yml
@@ -0,0 +1,69 @@
+name: Scorecard supply-chain security
+on:
+ # For Branch-Protection check. Only the default branch is supported. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
+ branch_protection_rule:
+ # To guarantee Maintained check is occasionally updated. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
+ schedule:
+ - cron: '25 7 * * 0'
+ push:
+ branches: [ "main" ]
+
+# Declare default permissions as read only.
+permissions: read-all
+
+jobs:
+ analysis:
+ name: Scorecard analysis
+ runs-on: ubuntu-latest
+ permissions:
+ # Needed to upload the results to code-scanning dashboard.
+ security-events: write
+ # Needed to publish results and get a badge (see publish_results below).
+ id-token: write
+ # Uncomment the permissions below if installing in a private repository.
+ # contents: read
+ # actions: read
+
+ steps:
+ - name: "Checkout code"
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
+ with:
+ persist-credentials: false
+
+ - name: "Run analysis"
+ uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
+ with:
+ results_file: results.sarif
+ results_format: sarif
+ # (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
+ # - you want to enable the Branch-Protection check on a *public* repository, or
+ # - you are installing Scorecard on a *private* repository
+ # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
+ # repo_token: ${{ secrets.SCORECARD_TOKEN }}
+
+ # Public repositories:
+ # - Publish results to OpenSSF REST API for easy access by consumers
+ # - Allows the repository to include the Scorecard badge.
+ # - See https://github.com/ossf/scorecard-action#publishing-results.
+ # For private repositories:
+ # - `publish_results` will always be set to `false`, regardless
+ # of the value entered here.
+ publish_results: true
+
+ # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
+ # format to the repository Actions tab.
+ - name: "Upload artifact"
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
+ with:
+ name: SARIF file
+ path: results.sarif
+ retention-days: 5
+
+ # Upload the results to GitHub's code scanning dashboard (optional).
+ # Commenting out will disable upload of results to your repo's Code Scanning dashboard
+ - name: "Upload to code-scanning"
+ uses: github/codeql-action/upload-sarif@v4
+ with:
+ sarif_file: results.sarif
diff --git a/helm/.github/workflows/stale.yaml b/helm/.github/workflows/stale.yaml
new file mode 100644
index 000000000..574427a5d
--- /dev/null
+++ b/helm/.github/workflows/stale.yaml
@@ -0,0 +1,18 @@
+name: "Close stale issues"
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+jobs:
+ stale:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'This issue has been marked as stale because it has been open for 90 days with no activity. This thread will be automatically closed in 30 days if no further activity occurs.'
+ stale-pr-message: 'This pull request has been marked as stale because it has been open for 90 days with no activity. This pull request will be automatically closed in 30 days if no further activity occurs.'
+ exempt-issue-labels: 'keep open,v4.x,in progress'
+ days-before-stale: 90
+ days-before-close: 30
+ operations-per-run: 200
diff --git a/helm/.gitignore b/helm/.gitignore
new file mode 100644
index 000000000..0fd2c6bda
--- /dev/null
+++ b/helm/.gitignore
@@ -0,0 +1,16 @@
+*.exe
+*.swp
+.DS_Store
+.coverage/
+.idea
+.vimrc
+.vscode/
+.devcontainer/
+_dist/
+_dist_versions/
+bin/
+vendor/
+# Ignores charts pulled for dependency build tests
+cmd/helm/testdata/testcharts/issue-7233/charts/*
+pkg/cmd/testdata/testcharts/issue-7233/charts/*
+.pre-commit-config.yaml
diff --git a/helm/.golangci.yml b/helm/.golangci.yml
new file mode 100644
index 000000000..7eca135e5
--- /dev/null
+++ b/helm/.golangci.yml
@@ -0,0 +1,83 @@
+formatters:
+ enable:
+ - gofmt
+ - goimports
+
+ exclusions:
+ generated: lax
+
+ settings:
+ gofmt:
+ simplify: true
+
+ goimports:
+ local-prefixes:
+ - helm.sh/helm/v4
+
+linters:
+ default: none
+
+ enable:
+ - depguard
+ - dupl
+ - gomodguard
+ - govet
+ - ineffassign
+ - misspell
+ - nakedret
+ - revive
+ - sloglint
+ - staticcheck
+ - thelper
+ - unused
+ - usestdlibvars
+ - usetesting
+ - exhaustive
+
+ exclusions:
+
+ generated: lax
+
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+
+ rules:
+ # This rule is triggered for packages like 'util'. When changes to those packages
+ # occur it triggers this rule. This exclusion enables making changes to existing
+ # packages.
+ - linters:
+ - revive
+ text: 'var-naming: avoid meaningless package names'
+
+ warn-unused: true
+
+ settings:
+ depguard:
+ rules:
+ Main:
+ deny:
+ - pkg: github.com/hashicorp/go-multierror
+ desc: "use errors instead"
+ - pkg: github.com/pkg/errors
+ desc: "use errors instead"
+
+ dupl:
+ threshold: 400
+
+ gomodguard:
+ blocked:
+ modules:
+ - github.com/evanphx/json-patch:
+ recommendations:
+ - github.com/evanphx/json-patch/v5
+
+ exhaustive:
+ default-signifies-exhaustive: true
+
+run:
+ timeout: 10m
+
+version: "2"
diff --git a/helm/ADOPTERS.md b/helm/ADOPTERS.md
new file mode 100644
index 000000000..a83519fea
--- /dev/null
+++ b/helm/ADOPTERS.md
@@ -0,0 +1,25 @@
+ To add your organization to this list, open a pull request that adds your
+ organization's name, optionally with a link. The list is in alphabetical order.
+
+ (Remember to use `git commit --signoff` to comply with the DCO)
+
+# Organizations Using Helm
+
+- [IBM](https://www.ibm.com)
+- [InfoCert](https://www.infocert.it/)
+- [Intercept](https://Intercept.cloud)
+- [Microsoft](https://microsoft.com)
+- [New Relic](https://www.newrelic.com)
+- [Octopus Deploy](https://octopus.com/)
+- [Omnistrate](https://omnistrate.com)
+- [Oracle](www.oracle.com)
+- [Percona](https://www.percona.com)
+- [Qovery](https://www.qovery.com/)
+- [Samsung SDS](https://www.samsungsds.com/)
+- [Softonic](https://hello.softonic.com/)
+- [SyncTune](https://mb-consulting.dev)
+- [Syself](https://syself.com)
+- [Ville de Montreal](https://montreal.ca)
+
+
+_This file is part of the CNCF official documentation for projects._
diff --git a/helm/AGENTS.md b/helm/AGENTS.md
new file mode 100644
index 000000000..d2904a9da
--- /dev/null
+++ b/helm/AGENTS.md
@@ -0,0 +1,48 @@
+# AGENTS.md
+
+## Overview
+Helm is a package manager for Kubernetes written in Go, supporting v3 (stable) and v4 (unstable) APIs.
+
+## Build & Test
+```bash
+make build # Build binary
+make test # Run all tests (style + unit)
+make test-unit # Unit tests only
+make test-coverage # With coverage
+make test-style # Linting
+golangci-lint run # Direct linting
+go test -run TestName # Specific test
+```
+
+## Code Structure
+- `/cmd/helm/` - CLI entry point (Cobra-based)
+- `/pkg/` - Public API
+ - `action/` - Core operations (install, upgrade, rollback)
+ - `chart/v2/` - Stable chart format
+ - `engine/` - Template rendering (Go templates + Sprig)
+ - `registry/` - OCI support
+ - `storage/` - Release backends (Secrets/ConfigMaps/SQL)
+- `/internal/` - Private implementation
+ - `chart/v3/` - Next-gen chart format
+
+## Development Guidelines
+
+### Code Standards
+- Use table-driven tests with testify
+- Golden files in `testdata/` for complex output
+- Mock Kubernetes clients for action tests
+- All commits must include DCO sign-off: `git commit -s`
+
+### Branching
+- `main` - Helm v4 development
+- `dev-v3` - Helm v3 stable (backport from main)
+
+### Dependencies
+- `k8s.io/client-go` - Kubernetes interaction
+- `github.com/spf13/cobra` - CLI framework
+- `github.com/Masterminds/sprig` - Template functions
+
+### Key Patterns
+- **Actions**: Operations in `/pkg/action/` use shared Configuration
+- **Dual Chart Support**: v2 (stable) in `/pkg/`, v3 (dev) in `/internal/`
+- **Storage Abstraction**: Pluggable release storage backends
diff --git a/helm/CONTRIBUTING.md b/helm/CONTRIBUTING.md
new file mode 100644
index 000000000..e809e7ca2
--- /dev/null
+++ b/helm/CONTRIBUTING.md
@@ -0,0 +1,363 @@
+# Contributing Guidelines
+
+The Helm project accepts contributions via GitHub pull requests. This document outlines the process
+to help get your contribution accepted.
+
+## Reporting a Security Issue
+
+Most of the time, when you find a bug in Helm, it should be reported using [GitHub
+issues](https://github.com/helm/helm/issues). However, if you are reporting a _security
+vulnerability_, please email a report to
+[cncf-helm-security@lists.cncf.io](mailto:cncf-helm-security@lists.cncf.io). This will give us a
+chance to try to fix the issue before it is exploited in the wild.
+
+## Helm v3 and v4
+
+Helm v4 is currently under development on the `main` branch. During the development of Helm v4 and for some time after its released, Helm v3 will continue to be supported and developed on the `dev-v3` branch. Helm v3 will continue to get bug fixes and updates for new Kubernetes releases. Helm v4 is where new features and major changes will happen. For features to be backported to Helm v3, an exception will be needed. Bugs should first be fixed on Helm v4 and then backported to Helm v3.
+
+## Sign Your Work
+
+The sign-off is a simple line at the end of the explanation for a commit. All commits need to be
+signed. Your signature certifies that you wrote the patch or otherwise have the right to contribute
+the material. The rules are pretty simple, if you can certify the below (from
+[developercertificate.org](https://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+1 Letterman Drive
+Suite D4700
+San Francisco, CA, 94129
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your commit automatically
+with `git commit -s`.
+
+The following command will update your git config with `user.email`:
+
+``` bash
+git config --global user.email joe.smith@example.com
+```
+
+This command will update your git config with `user.name`:
+
+``` bash
+git config --global user.name "Joe Smith"
+```
+
+Note: If your git config information is set properly then viewing the `git log` information for your
+ commit will look something like this:
+
+```
+Author: Joe Smith
+Date: Thu Feb 2 11:41:15 2018 -0800
+
+ Update README
+
+ Signed-off-by: Joe Smith
+```
+
+Notice the `Author` and `Signed-off-by` lines match. If they don't your PR will be rejected by the
+automated DCO check.
+
+## Support Channels
+
+Whether you are a user or contributor, official support channels include:
+
+- [Issues](https://github.com/helm/helm/issues)
+- Slack:
+ - User: [#helm-users](https://kubernetes.slack.com/messages/C0NH30761/details/)
+ - Contributor: [#helm-dev](https://kubernetes.slack.com/messages/C51E88VDG/)
+
+Before opening a new issue or submitting a new pull request, it's helpful to search the project -
+it's likely that another user has already reported the issue you're facing, or it's a known issue
+that we're already aware of. It is also worth asking on the Slack channels.
+
+## Milestones
+
+We use milestones to track progress of specific planned releases.
+
+For example, if the latest currently-released version is `3.2.1`, an issue/PR which pertains to a
+specific upcoming bugfix or feature release could fall into one of two different active milestones:
+`3.2.2` or `3.3.0`.
+
+Issues and PRs which are deemed backwards-incompatible may be added to the discussion items for
+Helm 4 with [label:v4.x](https://github.com/helm/helm/labels/v4.x). An issue or PR that we are not
+sure if we will be addressing will not be added to any milestone.
+
+A milestone (and hence release) can be closed when all outstanding issues/PRs have been closed
+or moved to another milestone and the associated release has been published.
+
+## Semantic Versioning
+
+Helm maintains a strong commitment to backward compatibility. All of our changes to protocols and
+formats are backward compatible from one major release to the next. No features, flags, or commands
+are removed or substantially modified (unless we need to fix a security issue).
+
+We also remain committed to not changing publicly accessible Go library definitions inside of the `pkg/` directory of our source code in a non-backwards-compatible way.
+
+For more details on Helm’s minor and patch release backwards-compatibility rules, please read [HIP-0004](https://github.com/helm/community/blob/main/hips/hip-0004.md)
+
+For a quick summary of our backward compatibility guidelines for releases between 3.0 and 4.0:
+
+- Command line commands, flags, and arguments MUST be backward compatible
+- File formats (such as Chart.yaml) MUST be backward compatible
+- Any chart that worked on a previous version of Helm 3 MUST work on a new version of Helm 3
+ (barring the cases where (a) Kubernetes itself changed, and (b) the chart worked because it
+ exploited a bug)
+- Chart repository functionality MUST be backward compatible
+- Go libraries inside of `pkg/` MUST remain backward compatible, though code inside of `cmd/` and
+ `internal/` may be changed from release to release without notice.
+
+## Issues
+
+Issues are used as the primary method for tracking anything to do with the Helm project.
+
+### Issue Types
+
+There are 5 types of issues (each with their own corresponding [label](#labels)):
+
+- `question/support`: These are support or functionality inquiries that we want to have a record of
+ for future reference. Generally these are questions that are too complex or large to store in the
+ Slack channel or have particular interest to the community as a whole. Depending on the
+ discussion, these can turn into `feature` or `bug` issues.
+- `proposal`: Used for items (like this one) that propose a new ideas or functionality that require
+ a larger community discussion. This allows for feedback from others in the community before a
+ feature is actually developed. This is not needed for small additions. Final word on whether
+ a feature needs a proposal is up to the core maintainers. All issues that are proposals should
+ both have a label and an issue title of "Proposal: [the rest of the title]." A proposal can become
+ a `feature` and does not require a milestone.
+- `feature`: These track specific feature requests and ideas until they are complete. They can
+ evolve from a `proposal` or can be submitted individually depending on the size.
+- `bug`: These track bugs with the code
+- `docs`: These track problems with the documentation (i.e. missing or incomplete)
+
+### Issue Lifecycle
+
+The issue lifecycle is mainly driven by the core maintainers, but is good information for those
+contributing to Helm. All issue types follow the same general lifecycle. Differences are noted
+below.
+
+1. Issue creation
+2. Triage
+ - The maintainer in charge of triaging will apply the proper labels for the issue. This includes
+ labels for priority, type, and metadata (such as `good first issue`). The only issue priority
+ we will be tracking is whether the issue is "critical." If additional levels are needed
+ in the future, we will add them.
+ - (If needed) Clean up the title to succinctly and clearly state the issue. Also ensure that
+ proposals are prefaced with "Proposal: [the rest of the title]".
+ - Add the issue to the correct milestone. If any questions come up, don't worry about adding the
+ issue to a milestone until the questions are answered.
+ - We attempt to do this process at least once per work day.
+3. Discussion
+ - Issues that are labeled `feature` or `proposal` must write a Helm Improvement Proposal (HIP).
+ See [Proposing an Idea](#proposing-an-idea). Smaller quality-of-life enhancements are exempt.
+ - Issues that are labeled as `feature` or `bug` should be connected to the PR that resolves it.
+ - Whoever is working on a `feature` or `bug` issue (whether a maintainer or someone from the
+ community), should either assign the issue to themselves or make a comment in the issue saying
+ that they are taking it.
+ - `proposal` and `support/question` issues should stay open until resolved or if they have not
+ been active for more than 30 days. This will help keep the issue queue to a manageable size
+ and reduce noise. Should the issue need to stay open, the `keep open` label can be added.
+4. Issue closure
+
+## Proposing an Idea
+
+Before proposing a new idea to the Helm project, please make sure to write up a [Helm Improvement
+Proposal](https://github.com/helm/community/tree/master/hips). A Helm Improvement Proposal is a
+design document that describes a new feature for the Helm project. The proposal should provide a
+concise technical specification and rationale for the feature.
+
+It is also worth considering vetting your idea with the community via the
+[cncf-helm](mailto:cncf-helm@lists.cncf.io) mailing list. Vetting an idea publicly before going as
+far as writing a proposal is meant to save the potential author time. Many ideas have been proposed;
+it's quite likely there are others in the community who may be working on a similar proposal, or a
+similar proposal may have already been written.
+
+HIPs are submitted to the [helm/community repository](https://github.com/helm/community). [HIP
+1](https://github.com/helm/community/blob/master/hips/hip-0001.md) describes the process to write a
+HIP as well as the review process.
+
+After your proposal has been approved, follow the [developer's
+guide](https://helm.sh/docs/community/developers/) to get started.
+
+## How to Contribute a Patch
+
+1. Identify or create the related issue. If you're proposing a larger change to
+ Helm, see [Proposing an Idea](#proposing-an-idea).
+2. Fork the desired repo; develop and test your code changes.
+3. Submit a pull request, making sure to sign your work and link the related issue.
+
+Coding conventions and standards are explained in the [official developer
+docs](https://helm.sh/docs/developers/).
+
+## Pull Requests
+
+Like any good open source project, we use Pull Requests (PRs) to track code changes.
+
+### PR Lifecycle
+
+1. PR creation
+ - PRs are usually created to fix or else be a subset of other PRs that fix a particular issue.
+ - We more than welcome PRs that are currently in progress. They are a great way to keep track of
+ important work that is in-flight, but useful for others to see. If a PR is a work in progress,
+ it **must** be prefaced with "WIP: [title]". Once the PR is ready for review, remove "WIP"
+ from the title.
+ - It is preferred, but not required, to have a PR tied to a specific issue. There can be
+ circumstances where if it is a quick fix then an issue might be overkill. The details provided
+ in the PR description would suffice in this case.
+2. Triage
+ - The maintainer in charge of triaging will apply the proper labels for the issue. This should
+ include at least a size label, `bug` or `feature`, and `awaiting review` once all labels are
+ applied. See the [Labels section](#labels) for full details on the definitions of labels.
+ - Add the PR to the correct milestone. This should be the same as the issue the PR closes.
+3. Assigning reviews
+ - Once a review has the `awaiting review` label, maintainers will review them as schedule
+ permits. The maintainer who takes the issue should self-request a review.
+ - PRs from a community member with the label `size/S` or larger requires 2 review approvals from
+ maintainers before it can be merged. Those with `size/XS` are per the judgement of the
+ maintainers. For more detail see the [Size Labels](#size-labels) section.
+4. Reviewing/Discussion
+ - All reviews will be completed using GitHub review tool.
+ - A "Comment" review should be used when there are questions about the code that should be
+ answered, but that don't involve code changes. This type of review does not count as approval.
+ - A "Changes Requested" review indicates that changes to the code need to be made before they
+ will be merged.
+ - Reviewers should update labels as needed (such as `needs rebase`)
+5. Address comments by answering questions or changing code
+6. LGTM (Looks good to me)
+ - Once a Reviewer has completed a review and the code looks ready to merge, an "Approve" review
+ is used to signal to the contributor and to other maintainers that you have reviewed the code
+ and feel that it is ready to be merged.
+7. Merge or close
+ - PRs should stay open until merged or if they have not been active for more than 30 days. This
+ will help keep the PR queue to a manageable size and reduce noise. Should the PR need to stay
+ open (like in the case of a WIP), the `keep open` label can be added.
+ - Before merging a PR, refer to the topic on [Size Labels](#size-labels) below to determine if
+ the PR requires more than one LGTM to merge.
+ - If the owner of the PR is listed in the `OWNERS` file, that user **must** merge their own PRs
+ or explicitly request another OWNER do that for them.
+ - If the owner of a PR is _not_ listed in `OWNERS`, any core maintainer may merge the PR.
+
+### Documentation PRs
+
+Documentation PRs should be made on the docs repo: . Keeping Helm's documentation up to date is highly desirable, and is recommended for all user facing changes. Accurate and helpful documentation is critical for effectively communicating Helm's behavior to a wide audience.
+
+Small, ad-hoc changes/PRs to Helm which introduce user facing changes, which would benefit from documentation changes, should apply the `docs needed` label. Larger changes associated with a HIP should track docs via that HIP. The `docs needed` label doesn't block PRs, and maintainers/PR reviewers should apply discretion judging in whether the `docs needed` label should be applied.
+
+### Profiling PRs
+
+If your contribution requires profiling to check memory and/or CPU usage, you can set `HELM_PPROF_CPU_PROFILE=/path/to/cpu.prof` and/or `HELM_PPROF_MEM_PROFILE=/path/to/mem.prof` environment variables to collect runtime profiling data for analysis. You can use Golang's [pprof](https://github.com/google/pprof/blob/main/doc/README.md) tool to inspect the results.
+
+Example analysing collected profiling data
+```
+HELM_PPROF_CPU_PROFILE=cpu.prof HELM_PPROF_MEM_PROFILE=mem.prof helm show all bitnami/nginx
+
+# Visualize graphs. You need to have installed graphviz package in your system
+go tool pprof -http=":8000" cpu.prof
+
+go tool pprof -http=":8001" mem.prof
+```
+
+## The Triager
+
+Each week, one of the core maintainers will serve as the designated "triager" starting after the
+public stand-up meetings on Thursday. This person will be in charge triaging new PRs and issues
+throughout the work week.
+
+## Labels
+
+The following tables define all label types used for Helm. It is split up by category.
+
+### Common
+
+| Label | Description |
+| ----- | ----------- |
+| `bug` | Marks an issue as a bug or a PR as a bugfix |
+| `critical` | Marks an issue or PR as critical. This means that addressing the PR or issue is top priority and must be addressed as soon as possible |
+| `docs` | Indicates the issue or PR is a documentation change |
+| `feature` | Marks the issue as a feature request or a PR as a feature implementation |
+| `keep open` | Denotes that the issue or PR should be kept open past 30 days of inactivity |
+| `refactor` | Indicates that the issue is a code refactor and is not fixing a bug or adding additional functionality |
+
+### Issue Specific
+
+| Label | Description |
+| ----- | ----------- |
+| `help wanted` | Marks an issue needs help from the community to solve |
+| `proposal` | Marks an issue as a proposal |
+| `question/support` | Marks an issue as a support request or question |
+| `good first issue` | Marks an issue as a good starter issue for someone new to Helm |
+| `wont fix` | Marks an issue as discussed and will not be implemented (or accepted in the case of a proposal) |
+
+### PR Specific
+
+| Label | Description |
+| ----- | ----------- |
+| `awaiting review` | Indicates a PR has been triaged and is ready for someone to review |
+| `breaking` | Indicates a PR has breaking changes (such as API changes) |
+| `in progress` | Indicates that a maintainer is looking at the PR, even if no review has been posted yet |
+| `needs rebase` | Indicates a PR needs to be rebased before it can be merged |
+| `needs pick` | Indicates a PR needs to be cherry-picked into a feature branch (generally bugfix branches). Once it has been, the `picked` label should be applied and this one removed |
+| `picked` | This PR has been cherry-picked into a feature branch |
+| `docs needed` | Tracks PRs that introduces a feature/change for which documentation update would be desirable (non-blocking). Once a suitable documentation PR has been created, then this label should be removed |
+
+#### Size labels
+
+Size labels are used to indicate how "dangerous" a PR is. The guidelines below are used to assign
+the labels, but ultimately this can be changed by the maintainers. For example, even if a PR only
+makes 30 lines of changes in 1 file, but it changes key functionality, it will likely be labeled as
+`size/L` because it requires sign off from multiple people. Conversely, a PR that adds a small
+feature, but requires another 150 lines of tests to cover all cases, could be labeled as `size/S`
+even though the number of lines is greater than defined below.
+
+Any changes from the community labeled as `size/S` or larger should be thoroughly tested before
+merging and always requires approval from 2 core maintainers. PRs submitted by a core maintainer,
+regardless of size, only requires approval from one additional maintainer. This ensures there are at
+least two maintainers who are aware of any significant PRs introduced to the codebase.
+
+| Label | Description |
+| ----- | ----------- |
+| `size/XS` | Denotes a PR that changes 0-9 lines, ignoring generated files. Very little testing may be required depending on the change. |
+| `size/S` | Denotes a PR that changes 10-29 lines, ignoring generated files. Only small amounts of manual testing may be required. |
+| `size/M` | Denotes a PR that changes 30-99 lines, ignoring generated files. Manual validation should be required. |
+| `size/L` | Denotes a PR that changes 100-499 lines, ignoring generated files. |
+| `size/XL` | Denotes a PR that changes 500-999 lines, ignoring generated files. |
+| `size/XXL` | Denotes a PR that changes 1000+ lines, ignoring generated files. |
diff --git a/helm/KEYS b/helm/KEYS
new file mode 100644
index 000000000..e772fff40
--- /dev/null
+++ b/helm/KEYS
@@ -0,0 +1,1060 @@
+This file contains the PGP keys of developers who have signed releases of Helm.
+
+For your convenience, commands are provided for those who use pgp and gpg.
+
+For users to import keys:
+ pgp < KEYS
+ or
+ gpg --import KEYS
+
+Developers to add their keys:
+ pgp -kxa and append it to this file.
+ or
+ (pgpk -ll && pgpk -xa ) >> KEYS
+ or
+ (gpg --list-sigs
+ && gpg --armor --export ) >> KEYS
+
+pub rsa4096/0x461449C25E36B98E 2017-11-10 [SC]
+ 672C657BE06B4B30969C4A57461449C25E36B98E
+uid [ultimate] Matthew Farina
+sig 3 0x461449C25E36B98E 2017-11-10 Matthew Farina
+sig 0x2CDBBFBB37AE822A 2018-12-12 Adnan Abdulhussein
+sig 0x1EF612347F8A9958 2018-12-12 Adam Reese
+sig 0x62F49E747D911B60 2018-12-12 Matt Butcher
+sub rsa4096/0xCCCE67689DF05738 2017-11-10 [E]
+sig 0x461449C25E36B98E 2017-11-10 Matthew Farina
+sub rsa4096/0x9436E80BFBA46909 2017-11-10 [S] [expires: 2022-11-09]
+sig 0x461449C25E36B98E 2017-11-10 Matthew Farina
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFoFERgBEADdhgM8EPo9fxnu2iW75r4uha2TrhWaO3EJIo53sa6U9nePIeWc
+oWqjDZqYvIMJcylfocrVi4m6HdNcPrWo5pSWeKd8J9X8d4BUhoKFmJdHqWzgokwW
+Rk06Doro2FHFyHoPPrI3a1HGVWA0xFhBYqSbim4j/Q0FouS566MofeRGnnacJ88z
+Z7yErN5Gy4jk7pOgwvMewoGpEd8FMcyYSJfSjeoqdIZYp89EKTLbgQZuOJ9yVZnY
+c0mtpH57UbkrkGv8hRuViWSO99q/mpMQyWQGYVoTV4QM/0q4jUbkRazaeY3N4hGC
+I6Xf4ilWyNmmVODI6JcvWY+vXPtxIKjEjYiomVCF6jCYWWCA7cf3+kqJ+T4sc0NF
+fseR/TAOkDV/XsZ1ufbSHBEiZTIjLvoAGJ+u+3go+UysVVCw4L1NSGFeDrZ97KSe
+w0MeuV2SYfdZ4so7k4YDNbBLTVx0V/wl+laFtdjo167D18AYw54HIv3snHkjABfY
+7Q06Ye7FuuKzdrj9KpmzUYnN3hRGqe84GIcM3D5+vElj0vyg8th32Dig5Xi38s0M
+sz7hPg+oFk7csslMVAnLtWYvsv2FMSKB9FUHYv9AJ6yjYfyLlQgjjda0z6Sq5zpu
+qVZqTNSxEIZFDKfTgQV6rocIK5VKP063KS6qwpHzPxKADaLTUPOWeum9/wARAQAB
+tCRNYXR0aGV3IEZhcmluYSA8bWF0dEBtYXR0ZmFyaW5hLmNvbT6JAk4EEwEIADgW
+IQRnLGV74GtLMJacSldGFEnCXja5jgUCWgURGAIbAwULCQgHAwUVCgkICwUWAwIB
+AAIeAQIXgAAKCRBGFEnCXja5jjtQEADJvSx67Qz8gTxvUH3HaMsXaeb6BG3zLJXj
+34pqAGNkKB4/ZgpFVYE1R0QuvYn9CbFpD1UcSank3L3xBroeOEUN3kvOg3D6Bv8f
+mtwtW1TDjaWDTa0mZ8icanjXVNfK3K8pAwni2FPrW/tesEt/8GI48ZxPMzHk1qrL
+8mETLRn1EBL3vq5qPDIK87XhhW9WAgwsadn6BQKSTSVVUACBAlV7EbqE4DHqhwYz
+D1HrEIAtXkkb9JJejUnAbiOqPmm9s6iWC13K1P27FB8EEYiKxL8kb7xv5xW7+Pmg
+kb03OqZtZYu9Fl1MF1zVQe4mXVflcbj7mYU1kb8vepD6bOUA89z8FggU2Q38cxkD
+TYQsxpGwWz3nvEu29KbHmjQja1+G5D8kQ8bv1mNdiXQbOz51v2+7vowKKUoPQfp9
+n8Ez4dxWVrFtf218Mtt8wbYmmVYijLIBDArYKDeVqNNua8YC9641DcvRdCCvaYEx
+Q9vWKjpAWmXKy2bb7TQ2TjGRh+Ly47z+PTluqUeYuBREAN4Hd4xwiClRbhb3I9To
+YTJkPOkaOR967zBho5orA8xww4hcsufhjqsoU0/MGbG6jvJihHFR9Jq+0gVzakca
+K8tGRSA8l5xdjow5dVOPzeXuKDPuvHEwa63TWsH5H8s6iembNT1H9bate8wQT1TN
+9PH/6sthz4kCMwQQAQgAHRYhBFER2nPfEtjoEspGLyzbv7s3roIqBQJcET6LAAoJ
+ECzbv7s3roIqozgQAIG5IqJ7hYjndCLW2MBLEa9oA04QSgF9qcqfiG00tjhBVwEK
+YE6r7BUgC7r7dP1xVa/+5lVRATfiJ+Raq7udm/RQsamyp9Q8xBOuavPcJDZMX5m7
+OqPZMs+TDFPYM914GIWPAQf9ehaHHnmCNZXExxYlnZBPFsOcLYSNGH/xQeiA+q3F
+tCOdRhjcpbt4rcx+Jq/l6X3cxstFwcYeljhvebblpwcVNJVArVrWZmosFl3rz3bs
+PKfZKAvjV65knRkra73ZjN+YEYMMr6MzvVh/cnigk9XHgu5Y7imLv9qf1leyFCaa
+oJoQDAcHIfs/eQmaEbYUyw/jX53/PyGqXlmkW7D3wqAGH5yx+ske7otCiaHHoTK0
+vHsEvO9b4dLtr0uMMNRO7St+3EtMa070s537XymG1HSeW8QbVEg/+w2YW5DyTe5p
+WaNJS6WUc7UuIgEWvgitVxhUheZRumh5/EW673yI8iUchGslAuL1W5R1rXQfMPVA
+BsI8D8pWs9EKjP4Lpu1Wgoxm0O4kaAxRbbHjrIYLtoRRrakr+kfqjZ/rJM89JQpl
+NWNBZ61IDKROj7U2kLAxCJSB3RfAuqinyFGjxod7ENW7u6z0SCdupybbmylAfD+T
+t3Z2DBB9tjxNnsgb2pbcm8cDGrJOZhIDdcVChvMXnHNxEmXbHvTKocci0t4viQIz
+BBABCgAdFiEESdCchsPcjaPwoHYiHvYSNH+KmVgFAlwRP38ACgkQHvYSNH+KmVgP
+rxAAkhggTXggRwpWzgU7PRsj347DqtH3f/2EfTOhAi6PGOiw2EFocTrx47WHAjs6
+XFT+c0yHCv58fGHKrrfeOT1VCjk2xf0NSdf00CTHO+DqepNiXzFYCJ0fUTL3w2JC
+ugrfhwEdVH3TYJffFlmi0VZVCrGT3ZU1H+N/mVcd4FniOPWaGYoSG15iift4cAO/
+CynMFUbl5NYCuE/z9lR8o/3KSu7vuffLsvXdkxCX6fjxkSWcBKgH7ts7OWyPv9H1
+r/I295CoG9ZmeKVtScY7lamb+vOw9ryHbTACo0aprPQ1kCjr+3JIJdodNkRQvzZX
+Ayxmc/zWSmPlJ7zjVkmoLaU7YmN7dPaVpQiELQGKhm/TyH++ZxoA4Rw4dwtqqk86
++F5ncsqJ107IW7ce6lnZVEvUBD4DHkMRQQZOA9hWBxVeDznjXzfpNNTB07mtzArG
+nrbbnNu3epUPthZlhQ8C+dZeBOfGzyr3Aj6CQqKMziiL2Tf4Coa7PhHRBs6rf1PD
+xNhnnybCvaMJEMSyX6b/lqb967yVI6g3TXQvi0cGGvYmwEBOiKkXSRHtQBjC1Ocq
+qUjzg1dvyfJu84S0kSt2oEHL5n1TAvIrwqNNOwS6CL0x2pSLOVhZmpummSqybvsF
+YJjctDJvBA7URB9asMOK3CS6UsJaVzUFkybxaYIdUPylh1mJAjMEEAEKAB0WIQSr
+olKVmPZibEINM1ti9J50fZEbYAUCXBE1mgAKCRBi9J50fZEbYEcVEACOTG1qO0m/
++8T2S8rskKDrgoXMi22x3n4SqdKIA5TwWdWp18nVyXIxUWvI1cS73WupHNtEKTLc
++yObvNo1N3syj/5c14RcRLUcWTFKs596TcUP5/xNH33j0nFplKplBP4MegnduXsB
+HibxiEycpkTFVxc3xbW9KeWSzqEHxxOXE1okL0SDWTj/oNRToaDc4zdm26veZd25
+ycxqRkksZZCPuczqb2SB/mDqHx1jl4z2B6CzN3OUzMk40a77xwZXKNGTO4+fMEOJ
+Flch8YQXh+gPbS1F/Q7qCrQOkhoV3nI/0CxNgWNcPrUd52xtGHzgxbdrgT7L0XMO
+/KmIu1O8E+znjOxcSAklwh1xLsT01193vbVyW2pcmmtqo1ku0taLlw4T7VHQNb88
+uOKucXlA10L2lFFnqBWLOuZDcVpgywMjIrKTPoEpDcVPaBUDQCFBZE9ogA/Edhlo
+mxGxhtzG/O6wwFcLoleMH1Lf6zMxhwOAIvkWVjsuQ312uVy1RNY7b3UFrxOw8/qq
+UBy6AFE/dp9PF8BIQ37NHKeAlvCexEedwJi4RwH0hUQkBhxBeNrTOEE7cCaZ9Shz
+IWhPKxSRKKblYY4fpDzl2uMBwdetk9jfZF2ofoSOKXTVh+YJ8PzncD6xJVesbMIW
+0aPkERdmz8JeGBclBR0miED+zidofWCgD7kCDQRaBREYARAAqiqhYIA3ci/sJ7y3
+mJaQ/lsL2nsy+RgW52ETpLp3tIO2r3rxNn7CB/kJhPimDIo4OJSV2bl3Sr2llgwX
+PrBQ+Z5bCUV70uc1U0vvJEW/r9tkyOu3YV7VXWXtaQWkCgxIqWgNJvU5A/9/6vz9
+u1RdMZwxpjy/4HuWvHYRXlJmeeca/BEoaYWMRlECuJjIBcAzuVJTlKBT7x7U4Ptc
+qqZGbzr0+zU39y1kMXu/ayldlsF3k6DKYZYNaa8cKNqorV0FqBVm1JZSjiAAWqGp
+tmYxUmv/riY6cP28tP3G6noH1XqzEvZ3fdYIsGM29YQ1Y1vrVrrBVju/aMzss498
+czxMtp8e0sudHt+ommUDkA2WBEPuqJPIcOj+7bvFiv6smyxcU8VmsyEapknq+Dq8
+wG0w3fGsRdy8puc5COz/3xuiFlHQ97wtnnmyWbmdQmx7EfZcGWFfnK6HwEXAbcjO
+aaFwSISK8ROgqoKfTss6/8Go+vbmtKJQH2w1fQArnPHGu9qFM/sBNhZ+ieiZ6x1H
+CdU3qvuycFZMSsMhk4ER2vJdeJ8tu2jUhMOIuA/VUgUblCJkAaBE9wXaiibCZ/XT
+XBXVb81v+EpLsoc5G/wrg35D5U/Gqqc+KAABK2zHa4L7rIs6jb2daeRrUBytsWm2
+Exq5sE1Uf5mioHtZpbr6rKIGzT0AEQEAAYkCNgQYAQgAIBYhBGcsZXvga0swlpxK
+V0YUScJeNrmOBQJaBREYAhsMAAoJEEYUScJeNrmOb2oQALYcLV3wFFR5v9zpEPdS
+haOIpYyuFBkN0FoID+w7Hb7R3pyl7c6nLI9tyFEkJBM1faGke8vKj6HZSfcyX1Lo
+2rBL+yW7Gu8z3uEbkTnPFew9LnutGFuFTnbpVdLcpsbm2lG5yhdmjvJBKI4CfX4Z
+UFlhyGtwqsl+1lpUgvOuMI2HjyHcFbzkhiSRDQvtXCgJu6orjzEvqiKNM4MM7PMJ
+AwU0Lf3NV/p1H2mFllfotmXVZ/TjXuGcOYH56gcf4XpkuD5Vb2Qhu7IbR6TneC5j
+yPdC0yQYcXqrpYhNBmlbXIoEL1m0xXhrFVPxS3QeMfkhQOqjvhaxBGCt29YJaTfQ
+ugN7I1YfEJIxTap8xzEdJ+80YL3iNCIzaWSsd/xUKpobHSsu4RU1cv//S+5qD3WZ
+NfcUoBgmfPC7NXCoKrEVXk5QKh3efKnAkMQrxdWRiwSuenf4Yk4fWXcTyCXsMPVB
+qjcZRuOpow7tU9AuBoMyJ1XrznHoubdnc29iGN51Hrhvp/uNxjsCgPgQtpL/8znk
+dgfzXU5CYJDYHa6fubUTHVZfLKbzBEI2XY1nqVu+QEO86tkY9Ef4PFMknThTAJDC
+ph3xIx/sBb5s3c/XH9JgWEiyO3rMEzZecgF34OJgwnc5gl63a4k1cF0cxzkCZYi3
+k6XI/RkkRzdN1CSdCapbDJDvuQINBFoFEeUBEAChZUqlI7FLQIY6GEo0bhJ4oMp2
+jQi22zb9ZmqqcmRbWfNKfCfm/cXNDabccqzPRTWezq6hVYYPz6cSnzXpxPBIQufZ
+IoMVLKDbTS0RTFVwQsYu9qGdZ52J2bq6qMWK0I2n6lECNkbOB0bZ3aPxe3yw4McP
+6u+SU+b0ArMvIGqq1cmKSpkAQB0kBK/gGzEj26d30jMSN393BZ/ESEs7PZyaie3O
+CdT71Cmh6xNxv0IwmgbUo54diXL9hEYTrI3hPyCKFeAoiTjlpz9ah7DPoOHgd9lD
+Rd4a6VdMrdz7m5aFWo/NVuoty9spGYLG0p9N7zSaUAdO/96mn+W18hbL7EkU7/Db
+Ubt5ZP34YOI46aI8YRZKiTq6NI4WglZDxu9PFGoCx4lyvhgKOwcQHySverAyb0Y1
+qeNCL9uk6oBHB2bXlAhBBOORtL5rGD+ICCuCV4g1ZEoN7sJBMxNMXORzRZ1crdlr
+10lld/Mg0udl2Hgatfx+i+Y0ae/W0Ibr417H5q7iHr85ivTQ6mRU3hMuzQSoWZK8
+vixjvOK401Gre22q5jq1IPinACcu6VUto9Wbo8C1msSsWgHrqLRFeqp18BoIVY5s
+QCvcsGlyD7MdJQohpmJ7al/kNVOidhGf7TtcSolWF7gLZacMRYbGWhbDhpOIhIpl
+jiWTg8oWRl9KPbwzBQARAQABiQRyBBgBCAAmFiEEZyxle+BrSzCWnEpXRhRJwl42
+uY4FAloFEeUCGwIFCQlmAYACQAkQRhRJwl42uY7BdCAEGQEIAB0WIQRxHyjVEOHg
+vL1fa/6UNugL+6RpCQUCWgUR5QAKCRCUNugL+6RpCSgsD/40XzObgPRpbIRQaJL1
+FgynrXUh3dJHdqB5Yi/pYshFuI+nnjpAGTyYyk75WlfvUmzY4HgNmh9yCjWketc0
+SdulPkWQ093Y38bQ9WGVQ7NLnZ47AUTuImqEdKcR4wu9F3nGD+cyNWE5fao62tYd
+hlzrP1rLz8kALtswc9PVYLEKnqNCBtlGoWdeW7K1lYVG4666/uYvHzOzsUQ0MqVT
+HDjpvxEcVRA0EW47m2TVj6IYAsM+0J93aFRr4OKXf4bu1ejxRz4Pdx73QsjeZwlN
+5F4FpnmegdUbNR3azeGcF0qiOjPCNu3xi5lDFPKCRZLnCAqMsvv92Z/GWryNAuDj
+H9tsmbDUwYXc1QUbdsu+p2jVm79yPgJUIvcy/kwOd0/GYUDOme2NvhF252aOO6Mt
+OnTCrQoX0mIY/IisIjwi+2LEpQVyNDu7AGu581LYFGhBDUqiy5CyQ2neHS+k9iq2
+06dVdqETpiybizUZm2aQ8FlRV0j6PVKrqAzi0cMYJC+Gh/fNvx61goJ1tEDdh+LK
+Mw0Js7OCtH7Wu1D0U/qDl3137PIBSv10BZ3SkbZDqivV5YhyGhvEewiXsbamE6VZ
+AHGZ5pfd/0tkqAW9UQqw1AdqYBsAtE4yeU63xPcz7B4VyyIdRNxnjQiEg+SEpDyy
+Gl2kGtt+cIbEYZovTrrW2cM0FzGhD/4rRIDfd+IvhZ86BbYoIv4oreiZVjIhFAYI
+7e0DfVliBXNOHFErghu3FisUrfTM5g7RHA0Snk8OGO/Yu2mSXYKVvygIlfi3i+7B
+0eZxhZEOsHXgO3v4WtY5/67Q1XXF9J7MY9Ke9gqp0E8HRFsECfEoSCRdaaic5PIT
+veUEkHs6q6W+J5ULNTqdWsmSdgNWQh3Zbhh0Ih9m9nioAlZHaKnEZXGt8GsUimr7
+ffRuYgxF+kuWT8UwQu0Tc47QrYgZIpxH4WI6Rc6qKAo/4DLK2Q3Y15kJFqi8He0t
+U7fWXMtrdQxxkz94WTFokISVVRZxSfZ8VkGjVHAgk6NVBgp+2zjiwfwS16qbOUOY
+ikR3WTCbyStdePLaXgAFxA7g/pl5/f0IF3/IoGdTGjWoRqnBZG7NfP7bYF1CKe4f
+a87Z47LriyL70BFosJqBNMJUEorS9w8sBbnmMUdpGMyk7PH386W95ib7AEOtRttL
+uzYetY4LljxgMsloRgYX+Kg5i6fkntG6rod8LNYg7jWObWaIqlPoTo1RNoujYAnE
+qdCDQHoUOgtZ4v6+QaxI3WV1KPBsPb7SAjuphubIQVK/6qHse9OoWVwWAABXHFqX
+2qV4dyq6mq87ohTcRrZqt64ekD8H3Qe4xkYSzsWZTc0qovhs+G+dSTJ709xuV2EP
++YMbPW0/IQ==
+=g11H
+-----END PGP PUBLIC KEY BLOCK-----
+
+pub rsa4096 2019-05-15 [SC]
+ F1261BDE929012C8FF2E501D6EA5D7598529A53E
+uid [ultimate] Martin Hickey
+sig 3 6EA5D7598529A53E 2019-05-15 Martin Hickey
+sub rsa4096 2019-05-15 [E]
+sig 6EA5D7598529A53E 2019-05-15 Martin Hickey
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFzcLlgBEACsmjtsbfMuKiKBl3yV5FsQBxvmNyhIwUJMtjgm5CMFcOLD+jDw
+mExfsE8sM5fqfS5P7NFHn3V6NY/GyKNH3DZHGhYwDw/vG6JfHo1s9IzhjySuWEtL
+7GUCJBKXk2cDfk4p0lHRgEtoYjG/sRMgk3y7WTR/W0McxllcrQQBB3RREbz8y7r7
+atJCeec36SSZgXqsyXAESx5dx7qRTdIwObPTCGxBdj2ZkgzT3D35EExdi9I8oM6L
+bYOyUPy0aEj/FX6HVBOIWNGB0z8TYXjwY6/3gJG1JhaFZK1zvYogJ3p8jO07bTwo
+/AzYAG4NoV4TqTyFPmb0d0+wE+lZOWA3FfF0YtYnNe3KPmPJZ/TXdTO6kle24UTy
+Q9GK2s8QB3V9NA09/YoSF1qdjRfL5jo7XnRJztfFgIqW118I4EKSF+kz3hCMxH1Y
+iCvHIHFQs+WX6g1bXHDI8JWe7VDiCVYwMxap8o/vtEKoETH9fjOEO/f/YF68hqpX
+7eYTacDEV72qikHz/O0hNyeS1m/AnavPrd5RQi53vOT/KhwM+wC4a1bAywQUDZDW
+KkSEkTqjzcSryj3DJR6EZ9y4F11Kt4TZoxHvh59UCcVyaTZPl/YdcRWom6eGo/5U
+K1MFeF7fTK9ZVuJnvG6av2/W7Sbz9KaJxLHhUNAQ+ytdVkN9xfXrx1HP7QARAQAB
+tChNYXJ0aW4gSGlja2V5IDxtYXJ0aW4uaGlja2V5QGllLmlibS5jb20+iQJOBBMB
+CgA4FiEE8SYb3pKQEsj/LlAdbqXXWYUppT4FAlzcLlgCGwMFCwkIBwIGFQoJCAsC
+BBYCAwECHgECF4AACgkQbqXXWYUppT5IFA//b64QqKN/ookqqeKEUMUOMoZUTi2t
+4HPtzX/nqOXDb0zyIyaJaJlgxz+LuoN8CrSrwnmTY/ibKsFS7xkFRIeKYSb9b2no
+NPb8F0SVtxYFQJ8d4WU1snAWFJd8aMe3+z8w15Mqz1Sd1lS/sN5s101rbh8jtFZD
+NnAZqyfUgIhVq243XfhP4/mHPinpXjjF+APlMbdsOqnWgxzp8E9hpCd/YLb6KY0j
+JbwryzH52ha9ZDMdMipH557+Xutcl4Wyn8RsJy38J0qBvy2p8AMZIYotw6pSCedi
+7Iva+EitGSXXgRWbR6O68JvUgrFDOjcPKSQy7AlwhTase+b4OA9c3DgSxR5SMBR6
+OLYaIuDeVY2Zjr0ydFdxrfQzlHget7axRH0aaMimyCNfRa3HJea8ffF/Ssv2meUF
+IPIhYLn7SBrVoTISu38S6WkhBBkDiHAW7nqV+mWR3cnVjIzIjW56bI06NZ4kqtvk
+D9TX7b+KV20cSjjbSGI70023oHFoJSpLsj9+otvPwNrYC2oD0qTLBfNMkpcktnnw
+I2uynQrPNbQVeA+cKrECJeyl2yAC4WXvP4ZefvFZX6RnL9HiiZ+pDyBt6Yq3A9AA
+NhRd8zEAKNwH88tFmWMinTzCZz04bKvql+E7A3MAaR8WS3BG3JfLXMqOKiMfCHr5
+4Gn3rD4UGtFfxoy5Ag0EXNwuWAEQAKuxVJDOjG+xuaaO2Z/6BQfTaz6/zgzql/pR
+UHInKSt5ts2LGdRhfvsNBzGBhoneLWZ8PivHRGSZFsFj5Nzy9/DIkopdHSZhP/zB
+aqihHgFJTKxKBfrhP60bYQGBkHNMVwqbFuck24DUCzrMyJXG15f252aY7ByCIIem
+SHbmPww5q6HPEPS+hHE4ka4N4s+vqL+oK8ktq7lnZCX+AZ4jIuMAoh/C851hLcr5
+EK+a6tXa2yRJtJfj44GX6+nBVm2w+3eHqOpD7JM7NqWmo41+qg3t2J3zHQf/0ejP
+ej+OcVdEBD5zlJL+CNZ9PCMBUOrb+IbqY3ybmJieipOJtOCY8nwUyCueyTmq1tso
+OwUsGB9hIsVY11wNgoNgrA6PhExGxcM5S/0Rt4+y/pwFjnqYLXBXyBSjXzzmpjhn
+zERjmANlI8QLKHDdShgboDUt3Ynw+D/peTS9iJMIPuUTrcGcKgw4+6FNKACnJ5l7
+Wvz7apgD8QmxnSZMquul23bGihhbQMITWvdF5KEHE06Ah1bOzB3KXBEVx00Y0tO/
+hsY8XH4T/pEKv9FsIF6R4o2k/xm6jR9eZutABVIrizMHkZzjjo1ZC8b15olrZvLa
+/DtNHzV5nPPSvGZPcey9BYk6b5GGCfT/EiWtJz8Nxm7/cCYRvuuZnGCxriH6XPww
+v8kPNihfABEBAAGJAjYEGAEKACAWIQTxJhvekpASyP8uUB1upddZhSmlPgUCXNwu
+WAIbDAAKCRBupddZhSmlPikmD/9UrspSeSjwaXSj2vCpO1pWm6ryVQc2ZzyMnXvq
+j5HLwzaVsN8HM/YADK5FL6qqhxrROOZdSHjS92sxk2Rab23gGRKbwDUJmerheZ4B
+ZXG40fDOPv45PZ8V0Kn9bzliNpPBFPjoaI8X1AKoIXyUqEy98Y/zhnLDhW/+yPrO
+gznPfO5ds75+u4xOx9pTfGpdwt6qhfCdNHUoZWsAw/6pafqrCIvbHjGvmMJyYENS
+dl6sPYBeiDkJkH67sGvJghjedhNznnXJ8+sm701eTqZkmpxzc0jvzwgnnYb0rAzS
+uU3QNj9w5HcGQd/pk29Ui8A4VWLJOUcDCVa/CIQMQqQDPYJKxaj7XgE+dQ9MxQ3a
+O0wgpEo2+4BaZ4I/qP8CgaE9q4IopMhNKPR1IeEFUmTsIzLVAktS/InshFWWUp5e
+mEss8kiqxU9bAGZvWopllCaPJQTDZElQpW84Z0afyVLPp47CoKcXBSMsITFt3mRf
+ZXAA6h8UlSgC7FV1YT4p6qsHqQ3cLERdTSrQFLmaCb2yRCR2V9d0RiMaIwUmnbld
+g1jeR4weO3LLghuWpfZHruDrDU2ZvOAObQIQdHBFmCHejA/gilf0MUdJ1h2gApuJ
+m3MUub704EDCTSqz9LJc+4/NbA2esZj7mExCtsMEqaoHW7BU4ws6BRHTyeHgi+Le
+1qneNQ==
+=oCPv
+-----END PGP PUBLIC KEY BLOCK-----
+
+pub rsa4096 2018-03-14 [SC]
+ 967F8AC5E2216F9F4FD270AD92AA783CBAAE8E3B
+uid [ultimate] Matthew Fisher
+sig 3 92AA783CBAAE8E3B 2018-03-14 Matthew Fisher
+sub rsa4096 2018-03-14 [E]
+sig 92AA783CBAAE8E3B 2018-03-14 Matthew Fisher
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFqpgxYBEAC1+yf/KFw2AQlineurz7Oz8NyYMlx1JnxZvMOFrL6jbZGyyzyy
+jBX5Ii++79Wq1T3BL+F/UFhgruQbbzL8SiAc8Q55Ec7z/BVxM7iQPLCnFRqztllx
+Ia1D1dZ9aFIw4P92kQgOQGPOgIxFRwEPA0ZX5nbZfL/teNhphW7vHaauk9xEJddm
+Pyy3l9xCRIKQVMwuCaLeH0ZZpBllddwuRV4ptlQ30MpOnaalQda9/j3VhNFEX8Nj
+nu8GHn+f4Lzy6XmhHb++JB3AIo5ZfwaUS2xMrnObtvmGHR3+uP/kblh9MzZlmL4T
+ldclyGaV7z9Z/xGwnX/+r7xna/fr3mey3GXm29BOP2sUBBQCba05X5nYUd2TjWsZ
+OZtE6sLuzUzeOTLEDu28IJoiaYnLKDNzDmuVM26xAYVWXUdCGgn+1rAp0t5OGgHm
+qTexvPmckgp3yw+tcPUkR6nh0ft7pmeoK53AQHMt6fk7plZCTuu5UvxZE/oDzt4X
+w9+vSTD5GzsNGrTYLTYUSL0muK+iM/uuJtFNJUREOucXfmWxulUsxwOB0st7hnLs
+4JmFSr3av1en1WqqdiXswOrdK2msTm4J2+fsOU1jnyF//RJmj+1KPpRDCBTzpAFS
+SzE/rRaLZBVE8k2vT0L6yBXvGJ2ONK9TkGT5fnyXu8zDu1d2Koj0c+6m9wARAQAB
+tCpNYXR0aGV3IEZpc2hlciA8bWF0dC5maXNoZXJAbWljcm9zb2Z0LmNvbT6JAk4E
+EwEIADgWIQSWf4rF4iFvn0/ScK2Sqng8uq6OOwUCWqmDFgIbAwULCQgHAgYVCgkI
+CwIEFgIDAQIeAQIXgAAKCRCSqng8uq6OOyTsD/979LDS7ONHIHNoRf7Uud40To0S
+/domtZM0rXUCBdbe5R4/xah0HvM1u8aN4OC6U7i0LCXSmEOZxQLKxKBWfX4/d6k7
+lBwuQBSlcM6cM6nDfPInT0C3o8caP8lOGeNAdOkMxrqiEO4gHNP5BvWCV+jQSU5X
+uvGhKNTMcpaf+DqZAFbR6zpdL7t5JCK0B0RRhFfaGWb19t3REukI5OF5M5SN7EtQ
+XWK/1fyzsltrjTSXgMWuxtJjBchltjme/S3XpHeeoSCm1WWh3a140tCC662ydU1u
+EZIlUrn8dfMpH0BY6bb0/4dhHvCJ3bw+zZoCzFJM/LksjP5i+Q4mUOD8PvFWh5aS
+46F827YiMdqD/eDMr1QRe66fPw5EtWTHgnf3PX+NmN8lgn2o280AkRXqkrCgl580
+B+lFwZ6hfan2F8RIHXNbF+9Zvc7Nh8bG8s4I8s6uiufmsmOuFdp47J4//q1W0HcU
+0fqajDnEhExtGkgwIsum1Ndwq2sWZT/ko7PYyC3J6mbr/MXTvd2TxtnMgG6kpyPv
+p3HlDaBw1aO5vO5mji4RTsoZi12MITIyvPsFWh0WtXkJLNaJ30bFSEx5fiJILxu0
+bBoBK0LUhB1Q+8G3Kea3+q3MuOQFnFfjPlMH6q84jpU5Lv5BaW17IeZ2kIfVYrcG
+vBvtZ5VHDzY4EhGmlbkCDQRaqYMWARAA3wYv6jbE1PjXwIUWSSO9zxQLBKg7Cn7d
+g+wwKx+N5DHjSdQBous6DGwN/wEZfXJOn14S9Yg4p4owmiyJDn0oqJ0BLdsMELoO
+imCIZ+zn3AjCWdk2b0oCOhyTwhaVhVgi8yMQruMSUG9/3lkVoFae/GMC32nmE2A0
+BOnj9fVIhIrDKt9OSeTXXRNVaRvNFo9ry8S1hDxgfQ2unD6J0mMPhLH2O7CRZDFW
+FyH09E/rhrIDvI3Z7mZw2ufGKR0YEu7fJ0BBBSbIqUOMsUnQNWomb2j/QZyYmhTS
+Hg9YRB807H3b+5GuZim+DSUk5DQV2IENEg9LDYvhDftE5COYB3tZUnvEpOvNybBl
+URxD8Kgqlb3j93l2FcD1QrIGW5VCmkkuD612ZG+NjMq0ZXlQjv6gxAYir8GTKkWt
+tS1OatDm6qe6xEFypT6nlvxOYFxLeFkVVGt4H4QW6+MXvnwMofL0G6fOhRvdlq3R
+US9n3WqzTpCwfvJs2lhYi+c3/2nwCx5G42OT9Ix0UFkYwxhGk6PRleKOMsw28PFr
+a8DVjyKGOVn+9auVhPXYQcN0sZqFl8LBDkUtaniiRD4WKH91aKYgmX1qo8sJZMhx
+t/ZoHOfoHDEEa+kLqfsWu3htyTP1gleCAA8kDcRiy1v/G8v3+p2ioI6q1qegigbr
+AqTHcWNOltcAEQEAAYkCNgQYAQgAIBYhBJZ/isXiIW+fT9JwrZKqeDy6ro47BQJa
+qYMWAhsMAAoJEJKqeDy6ro47T7gP/j/3R9hPg+kJCErlEKPqxsEOxxlaHx+f4UGg
+Zm+P6QK2SrqbrqcPhoKUXeHlbCMm2euxKTonIawgCIr44kCZvp3B8pCGUCR+M0mf
+aXGO1O6EJ3MmtlbXJ+OyBAhxpklUWdM6favuzi62fAmvwEKQf1reG/9r+toJb5N4
+KwrrdZNUaLJWhb6D0fwB+1fWJbdRnDO1rozcA+YJGhhunpxF2b2nZ5OtqNuGmbqV
+ofxL6/0lM4HqLNcUBlUyQihjk1+hzfWji95SlzIxP2EhH6gJh/e+/EDCaVVV00CM
+0n/0dEB25nAuSMGgUx2utNmfCUP84IErGzSUlXdzN20aW5xiBFU3/uSWyz80IGuy
+WeyRzksmphGdLwef+sWLKGrOJh+DkOxxpFMRaIqGEG2YViQCg3gyzjiJuI/XAdlK
+AhqwVKfRke24vgifd1tN+zeFs+m28Hpw7989vky1hDvqdpK5/fiJfqIBsF0jir/H
+AgtqmbiqemX9rUa3uDkBsvyu+Ou41l+wL6ahj9Pnu0+9hQnpeZERIyhq4LWn7gGb
+xk5y63wrvGbeS5lev//012oSzWQfSdFWqQVzMTVtOojGFWgvwRCwZiWEPQkRIV5r
+VNXtXPUdKiOEkWin01ZrwDPEyBjr3pcnu2mbgLeJETODnCRi79KA5kCtd65JbNF7
+Qknjx8fW
+=jz9T
+-----END PGP PUBLIC KEY BLOCK-----
+
+pub rsa4096 2018-08-06 [SC] [expires: 2022-08-06]
+ 76939899B137D575D3274E756DCCB9D752D35BA8
+uid [ultimate] Taylor Thomas
+sig 3 6DCCB9D752D35BA8 2018-08-06 Taylor Thomas
+sub rsa4096 2018-08-06 [E] [expires: 2022-08-06]
+sig 6DCCB9D752D35BA8 2018-08-06 Taylor Thomas
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFto3pMBEADAO8mWocOAqBUHtiLBnht3+vLnjLv1LNs2GBdMCDRza51/SzFN
+NN5pAETGbFl11zxpm9rBkyjI2xVO4OqI8TNIn6vYPTh2YVBs9UB+qRqjJt94fm9C
+tWdQ3/27I4PPrCIw5CxjLKst/GO0BjS/J228wP1JtUeyf/QH9K8hDFeov0y94IMM
+s7NFRkqZJ6tXjlDCJnDkPm3wERgY3S2I8bgr/BlGFEWCmjqD75PqHuJYjh4mmXhk
+KTeYcJh42INPzCXd3bnvF0NwfmAE70fsSOZz7H3Ox14Gs+Tn+jDC8+Or4CCaqtyE
+276d8yyyDXBlDN9IjwhjlJPfx/zMtvD+lAkGV89NwbZ+YnyUNenK2V6H86Efe36t
+MxvFCH7rOKjCNjKUE0NUbxXfYig5u6xuZKcBcJmjXbmL2dFUIaMzm8jf1NlLuzjw
+k7IVAw2Y9ZcVO1eNgeVxI+NdRsdz8qgBmDTvRhxh2n/ppc+5DDVhiffGqqlIZmYN
+NJ2bUhW0x0R2OHgOedMyKnYDGgXPI3hnmY/t48ErDwxTqVNoo2tVU1YWS3eP63oa
+8ZAiNsvYVWFWIAUi1Q+ADAhj1GdISg4VU5N97joFZA/POZrRtS43OBDIvCpaKjF/
+bu2EltTVBdGZj6fW8xndZIum9cIHMlMi+gyq/o9kipFyZ7zVEM0SB8aI6QARAQAB
+tCtUYXlsb3IgVGhvbWFzIDx0YXlsb3IudGhvbWFzQG1pY3Jvc29mdC5jb20+iQJU
+BBMBCAA+FiEEdpOYmbE31XXTJ051bcy511LTW6gFAlto3pMCGwMFCQeGH4AFCwkI
+BwIGFQoJCAsCBBYCAwECHgECF4AACgkQbcy511LTW6gkqw/9E/DZMckYjml9gN6f
+Z7jyZSzO9zP2pVKvcPvaXU+kcyKPR6r6seYt4uSOdosSsZs/xF7aSPoMezDyNli+
+W0t27DCXtnbk+LYptw6AaevkUF9+Cxe/gfXSQDxU6jtOV00KM4WkJtJ7Zty1dvk3
+PsnpPhbxUAWwULy0wF9Ab9RAXMyz/7TrgWP70EY1G/KkETUHTdSkxaoUPs67F9Y8
+c5qVQjgFVqSeN90h58w/4SF7KkS4EOy7RRyfzaBuyQPPi3fOtvsfAY/cSOVn2PBF
+Pj1RPoTREKEa0nnp9TtrlwP7v+ooIvwDeemjL1c6tlTBW67T6UM+W5hcvjegQg1h
+uLOdRtiN1HlTvOZngtegvbegGviwpdXahrNxN2mtYCAAYNELNyQOAWERGF9TUKeb
+OC1HLbZwXdmPiUlUfPN3aAnMH46qe7eSMAZK203ciZlUxowFuE01X+M3WmLESdP3
+dxv3TACiC55mGBgZm/d/1CK83KBWMlzbgfmop65xbxi/tmpJbYdqoTeidYtUDo+L
+IzJVjagvfED49o/U86C5DBr7u0mhZqnAxaWEWRBRgFi1Bnl7w3zSYYhdwGjiYTJ9
+/hejac8iqWc+RC9AJh4HW6itB3jPoEI90aVb1y8hm3UOBQTMEnI+dpvZEQPWSBnd
+tWzzQS7et8Tlq0J4/wRVcEXAlmq5Ag0EW2jekwEQAK3KxoH8N7Qc0vSkMQmo/NfO
+lEE89/KobYLDvyQMfXQJGF143eaaW2IHcE6OIT6E9IX9vnt00Lfzm0Jwdd3ur5xf
+l3GJ9r0riYVNzQ/9kMx4JkoXJ+kgaL2kVTykKERkHUvgRcLkgqVZWMMGz2sUNqYE
+XUBtEnYVsZmxQNE8X41NI26XP/1e9jctn/FgAPXMtkLrXRfRIlKzyQTz7zCX1WnK
+xeMjFjHkdBD4dP2ohoEkk2y5lDI1hlPginVPZHPShawxd+TE0Vre5i0Wp6Y+lKWL
+QtsZKefTbqBDDJlwXmUZc+eu5jEuB7qKDcwH1s8vtZhRVaKFr0kPHEZ3Ka+ODWmt
+/v6Zhh/tFkxq0mgh5OHycN3GvwMKp/fEBkRl9pM4RuHrf41XB4+/Joi+CottP5WC
+1So8ydbJnG2lYknHxPMrISvACDMLPwWp9IFpr1u4nPkkuAlCuzUkev9r3dJQNq3G
+hsTqpQgkHQTd5+QBabE1543gWOCjz9ap80RnIlSfylskR5fWy8U2XnOo7kb5TtnC
+hZtJfjpCxHW894VEx3oe8tHaUUdmwrcK3g1DSu5KrtWn976g9tOlcpmfTjqNYEJq
+93jD1aCb8yx+LHk5wypQhLpn76AE6YCJebQVxmc0AsqFOlTTRU6yPQF46qYOZD/+
+spUs4bXbxmLghLTO/VmrABEBAAGJAjwEGAEIACYWIQR2k5iZsTfVddMnTnVtzLnX
+UtNbqAUCW2jekwIbDAUJB4YfgAAKCRBtzLnXUtNbqE4lEAC+uIwA9vkHHpucTLBq
+UiwI4agcY9D0iOGohO7qJQ44MitIsiIqG3Qn1Wps1sdGdoxmFtTE8W0tMhZ+XpTd
+ZkL3G8EIhB9gyuel3H1L2vD/6YX3P9Vv4JlcpNDjc/c2i/U+/05kBMwtrgmjB/3T
+W1368I9uzfAS3SPYDUsx6nNv6iHhDYDEGOOuBWv5VDvrnYcBbysxkevB6SDZrs5d
+7fpmALMsUt8le/y9sn5TFH2CB3aKHGJHMv0RxV5iEXwq7jHPeRJCamzKTCx8/et+
+8wn8Wudk+FiqrSH72BaRb9j7n7KoBuBQB30IbbocRNwGHJHsmuyThGBBZh9Z37Qm
+r1qoSNRl+ZJy6QoAO6DVPS6FERDDXYPwrHiC8EbomblcVMYjfI9/Ln+rSB30/OtC
+4t+v83v1TPerc1FCXJc2lISs1KLlJnPh5Ykq6IffH9nUALmo5tK5FUDUUhOAxFhe
+wCE4fJI+yNIcMHotk5XSxbeSUVFaXDb4Pue/9DjQjnF5iSQGnbveEmGUaXxncjf2
+cJgcNZjd9P4XKqb1hNKpFwgm47dr3TH1/KmkFlfeBK4S/GpVsipWiB9vX4RC28EB
+QP4bc5To+ohqwuOLw6hRo0YLf15jTJknCDtfsgKQ6uiR7ai+z6fqoH3kycCCcsPc
+Y2/8LdVLydI6o8cZJDEpEexPaA==
+=vtJm
+-----END PGP PUBLIC KEY BLOCK-----
+
+pub rsa4096/0x1EF612347F8A9958 2016-07-25 [SC]
+ Key fingerprint = 49D0 9C86 C3DC 8DA3 F0A0 7622 1EF6 1234 7F8A 9958
+uid [ultimate] Adam Reese
+sig 3 0x1EF612347F8A9958 2018-01-02 Adam Reese
+sig 3 0x1EF612347F8A9958 2016-07-25 Adam Reese
+sig 0x62F49E747D911B60 2018-12-12 Matt Butcher
+sig 0x461449C25E36B98E 2018-12-12 Matthew Farina
+sig 0x2CDBBFBB37AE822A 2018-12-12 Adnan Abdulhussein
+uid [ultimate] Adam Reese
+sig 3 0x1EF612347F8A9958 2018-01-02 Adam Reese
+sig 3 0x1EF612347F8A9958 2016-07-25 Adam Reese
+sig 0x62F49E747D911B60 2018-12-12 Matt Butcher
+sig 0x461449C25E36B98E 2018-12-12 Matthew Farina
+sig 0x2CDBBFBB37AE822A 2018-12-12 Adnan Abdulhussein
+uid [ultimate] Adam Reese
+sig 3 0x1EF612347F8A9958 2018-01-02 Adam Reese
+sig 3 0x1EF612347F8A9958 2016-07-25 Adam Reese
+sig 0x62F49E747D911B60 2018-12-12 Matt Butcher
+sig 0x461449C25E36B98E 2018-12-12 Matthew Farina
+sig 0x2CDBBFBB37AE822A 2018-12-12 Adnan Abdulhussein
+sub rsa2048/0x21DD8DC880EBB474 2016-07-25 [E] [expires: 2024-07-23]
+sig 0x1EF612347F8A9958 2016-07-25 Adam Reese
+sub rsa2048/0x06F35E60A7A18DD6 2016-07-25 [SA] [expires: 2024-07-23]
+sig 0x1EF612347F8A9958 2016-07-25 Adam Reese
+sub rsa4096/0x2970B7F911395FDE 2018-01-09 [A]
+sig 0x1EF612347F8A9958 2018-01-09 Adam Reese
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFeWdukBEAC/j4xe/59W2CYAzXBgh0kuhdI4t9B/4CzYxWgpCqNqXN/IfBHn
+JUSiTKdfwU9+cNfcviDdV/UjyxbWxyvX5Zm/4Ik6XhbK7y+Cl/35TBt6d1MVNr+n
+DPeS/uJKNtb27/NwCdihGzWL8UQ0Aah3Y7EZfpy3KSTNfSfEY35XbJTHGlFMGarW
+nVArY387C64XNIO+n41NJRnLDzZbFJMv/Eq/psXLumAaav5+PuOelrfaWGNpke9C
+AgV7DoyFcK8mTRwISqIjrV9S6ENqzUFu+VcqeOw8bzNnYDwdNY0kgBQpvfiKpnzd
+yhYjFeu+OdT+sM5sXUgmM9IdB4wAbpZ2dM8uWjGe7WPSj1B5t3Bp6DtcIHl2ICcv
+lpjjrXXKwv1wdnhnUKjlS8NPjO/XGzTUnkqiO9fvbVrMEh9CRCrzn1OuZaH9RQZq
+vFBIp2XfEaFaUdvPSDNyDE+Ax1V3+cCVX1+mIIYrS7lK8X3DoXhBZbuREnxvK2X1
+hzw5Ye4GlAw5WeNJNusHmGtKvhayLi7xYjqsTAN/kAcyHm7d3xXBHYsasTpX5Bc4
+MW1nnTjFZzX/r+cOZELWnwAmkponf5PmBVefWRGvhhUtsoF+aw91pme1PF4S3QZW
+orre/udNUF3JEbMHhstGlATUMvtLyFtdR3WH7ol2IEVCIGJmI5L6Bj4ylQARAQAB
+tBpBZGFtIFJlZXNlIDxhZGFtQHJlZXNlLmlvPokCUQQTAQoAOwIbAwIeAQIXgAIZ
+ARYhBEnQnIbD3I2j8KB2Ih72EjR/iplYBQJaS/jbBQsJCAcDBRUKCQgLBRYCAwEA
+AAoJEB72EjR/iplYja4P/2eJs1aaS72z5FbdTktxX1/Jj9fFaniBVWakcUTZigOH
+pq2oJWUnziLmUOI5sE89WsEt5tmhGCF9b4105nIPG4BVaLAvuiPBF69n/7eNxMh/
+5DZnpooPLwaT3w5m6Fqkouaqs3nWBTJ92Ramph9G/j3rmrf3lPrD3xXF8fXlIk+w
+r5n2mdoJvvoezwTIts6iUAFf/hCOecmtOF2yc0Tjzqb2lsu+9OHOgID960cQmzEq
+xSJrDsXGdDPkOjTQx2faEmd6jMFzImaqkGj+Ry+rq8yzlHaQeor4aeIAGncZDjmM
+hYUXnO0ZITqVfvfm6Gu/c3NyNe4+0SpTWwTKxLv/Od3jtFMvmf1pIjNhcfAdCH5/
+HY8jxl58TL5BcmDK2tzpz1Tc9aa3hICPl3hFbwRDRbFZ/bEOdCjhAhPUmaAl8ia4
+H/XimRzqsr748G5ZP9gkSC42/3nvGgGNZQVmwedw6rOaA9EdWqv3FPE+l1ssbosB
+VAmMnaP3M7iXt+ijA6vLeRG478q4rWt63uDYDswJDJv1AXAKjEzBsB13B2JqfN0G
+m8HY2vWkaAuEta4fHRgf5hLJtPaJLjHeZ0s/c44KFKqkew3PVyaOnUG6WCpUrfjD
+FTh+j/LMKxnz0CLpIj/xbSsfnJgbeNism7YeeEQcvM9z76mRMvDL6G33X9n1Wdwv
+iQI0BBMBCgAeBQJXlnbpAhsDAwsJBwMVCggCHgECF4ADFgIBAhkBAAoJEB72EjR/
+iplYejcP/2BgJMc+vugdd/WkDJJj4TVskbn/VWvEp0aO/2ztADMW0uKs8DeRZFVk
+eWbueBobrzWP2Cg3HN282E3lsQHqPOI5VS9wvbVj1NSesH/OcOcc2ukimHZAjg7g
+cLaECJkXbjuzvKtFDVHRtzWFyJMRPPdrXcY7fzPV8bcr76VeJSz2klK7SI6xCDJz
+fbclnyE3ctLVWd5Jmm31xT76u5WgCX+RA6wH7mxET3rEHaSXI66TFzmL97tnM8Ke
+jCl2qRpOJpoUbZhhIaYa5BE7nRmPrwQ77za6JvuF7gxV1WyFkuwOGgKGx9zziyOG
+Grmp7qZnxpfWXmBSFXdhiWUvWD1PvWT75QZluGXN2hVJEb6f3HAaK7q8y/2QPBWP
+1ttnJ2lGpDfEtZCA+RUv6CfuADPF2B2pMyyWC54jT7QPfokgl6tQPotlyiGmiLup
+Kml6hd7afS6QKHFeyZYpVVk2CCWXsiFw6qk9OEGgP9eyNQcKtXZhnpql63YUjvxH
+HbTt/7OLlgbyN6AmWLRtVpb9onLEskhWJ86yeaYIQoSEP4qNZNBoekMMg+NM6QeF
+CEfRPtmvG9X9kSEbeLazyV4xzw2SGjNbmQCSExGr5e2pqKYiEjlHiAXQ+OaVHkcG
+0c43snOrCiD4c/rU6UQdPy8QMwjutoHWa5pe5hk/S5HjncmBwHfhiQIzBBABCgAd
+FiEEq6JSlZj2YmxCDTNbYvSedH2RG2AFAlwRPcIACgkQYvSedH2RG2Cyag//bZFS
+TnCa2WuTB7hWWaatEFdFZx/OoWlzwVsjh+WjAOsJa0TMRGI6VTIPYyLapuEY7+Ii
+xL72wsAdjinnhcsBTAydcyx7RJGGhMSiWYRMVP6a+rlUAQJ/YmC0dB3HREMP7aEa
+/Qgu1r05RpcLEDpzLsmbMmj7qA6Ugh5tuV7tVvHyQye/7jYADCguDRWC0C09lfz6
+lmQYEnFNo3V1meSxyPFTwp/S4gCf/sc3UTWSGTd8DE8lsQW30m7R1+zH3bw5jsiG
+GZMgkpszNpWtVB6zc14csv8okl2tBwFTklauayIVdOprXWvGSiOPGUH2QLWa86CZ
+9Wh295EHkHfB+dzB4Qn0m2QBde+r7Hhuve/OVD98oVqVbGa0E3wyX+7EdZY6TUY7
+XIXkQBqlHsdYFHNRiXT9A/mpZh2lXOd4/0m7aT6/Z+J3aV61Pq+azTLsdTs9FFIe
+bhAjAjE0gTX7No5+wRVeRDA5zR0ZCEqoBKPx5HKSzxt6rlqbeMsg87fDflDAphAq
+52D4CIxEtWJ8YbnUYvPG+emoY9hNh4x+teCwmHL0LHksQrg7bvGJxsh4driKef7f
+LItrNLgFC5we2u4KwmPYA3kXcCTtelkzkbNoYE7cHtLyfCeP5l03VNjyx5X/RRec
+7SW1hdfx0xLujD+HfPx62sfd1ml+Qv2/Ib2NUdyJAjMEEAEIAB0WIQRnLGV74GtL
+MJacSldGFEnCXja5jgUCXBE99gAKCRBGFEnCXja5jrqvEADQAzvSRqjrGeDmx2h3
+S/aF5lLrFC9LhyFaFO7WRh+6hyAIPRKIICCHH+Or3mAxaQ5mi+7tF4s9UrtRu5FT
+1gBDSu8hqGCVo0spCmbilQ9gVx6dRMjSS1UykiMWcNxksHhrzDF4hLSlhVYGUwkJ
+JQekDcgNXrpnXF11GUt1nr59MSTfvtGb/9vgMkLC+uQeyJtLlx8E9VvppKc3pNKV
+xYv682woSHy0TjOyzgA0MIpDMPcozR+E7h72pLNU7z5KfmxVlnJCBU6w8HlZ1ftc
+OT3TpA5q3OBhYpz1xpXUA0ZQuRsApOOssvKLFonpu03zeZCTAq1Zhsq0Q0N7zUly
+uPNKIB2drq7CAJ1z7R8tJ2Ouc7R2yDbuVK8M/xjgWBlOsz84cYlwFpAzlvboOhTs
+ORn0rS+i3Ng8ZaN2yG23oOmXKOpHeKBG39iigAa+vvsxxOCzo66cMfpse/spas5K
+OLBtS2gh1n8uXetGqrslXd0puXOyg9T8nI3z61QmPT6zVk8I/Q3Otcui50PfpA8Y
+I6B6Lb3vBC/weA5D1ryLA0qNW85z3lpzCZ2c+6rAP92cVA4KR6BB1znAzK/Cf+Bb
+4iy2bt2Wl94zJ5YvyAjv0KAhMde4i7z0+AJrM4BQlULSjupn62NT7nAMuYZbATFg
+jVzlIE0SLwOapcBsfblii9Zly4kCMwQQAQgAHRYhBFER2nPfEtjoEspGLyzbv7s3
+roIqBQJcEUHmAAoJECzbv7s3roIqbnEP/11fn6f5lF6zsNpB/JF6sbsPrAD/bL8+
+QxIFdPQK/acmP5SeExGh7xj9nZvnzKAm4XTSbHTyZ5WNwEq7Vgk3JY0v07suxrvf
+udGgTStmdZl+d01k4NJ11BGYBj0SQ4DG75Egl/57FsrSH6i5vjz6eqR/DJHxMfCt
+Ws2SCbKb4aQGlXTPiXFfgGtyFLWEo+iVmySEpEtrn8m5Gm4eeVtg7IUh1DU5KiFK
+xRkVOkdC/kyWAY3ig+HzbsVM8Xn3Q3S1ES7qusf+iuJoK4VJQ/HisFdBK3fOxgjo
+y8C3m/XFpX11wZ7nJfJz2mIauhoasz/EAvaaczRjVbvmg3Wpm1ogiaxmn7JnS6S2
+0GIeFj0pJudNrDngn591URE1G32kzPaAmOEYeUMP/myjNsSYjlElEemAWO37O5zV
+WFmKIcwysdPHnXJ9NjiVDOnpO/t7Xv4ZesJ88+Q/4wY/ESgkZDeA0yHMa33eSCte
+SyDv+s1psYbOI7LcTo7ONbf47C1YNEA/Qil/WTcFTCiys9WibDe0KP/aoW0okWxn
+hOxQUZV6ZNwUQ64pIpmkWWMV6jYJPotcc6NozQtqkBr/ukMx9KMGozJPfo3Rt51a
+xc/oChdnNbhXnOSbdKy1xRo1BzTUR3uELJngLnBbanvA6y0koq3Q2vc23f/oFtv2
+gXwudV/k3ZqltBxBZGFtIFJlZXNlIDxhcmVlc2VAZGVpcy5jb20+iQJOBBMBCgA4
+AhsDAh4BAheAFiEESdCchsPcjaPwoHYiHvYSNH+KmVgFAlpL+NsFCwkIBwMFFQoJ
+CAsFFgIDAQAACgkQHvYSNH+KmVip6g/+Px4J3cY58C+XXpnseL8cySMmDBD++pkD
+gxaB1OdR09L03Iy27gCXDYBsGUu4x4iPvhEAq064uMKjYp6L/nhbHhvtoziBWL5m
+Gd+RJVEzIaW2a2HDlIZ8fuzLiFjWbHz3URKYqjbT9TP3lMTHkBacx3HZ8M+9yUdI
+ppsqhPu1xgD4jDXXioLeojca/vMlTo3dkZ9zSjAhqEQRDMzN7Xp1ZzPs+uCjEJHG
+09y6/CvPH4gCEIl7Fo+m+vcLhMpRQypWTkyXVPghbrOvVkWpO2zCRFkmQeeDDldL
+x5LfxHhMxQ6nMpvX+ecEWa427Jq6stRplNU3MXCFrQ40nP7ZTniNPJw8BfpBgRi9
+FubFpa308y3gYdluYDV3H61SL9hF/3XzguwB//kK4ULCF6Aa8cyFYjqB/cosrLs2
+U+325fY9eZOjCzykRIpINyexh727AAIqPto2J7jnjIhIywiYj0ivfvg84aoYYUqu
+kUAPIBHAH/Em+vOYoGwsVMwrhG8U/rBr/VsLIGDC0qCh/AhVt2pvgZT7OQc3CHz2
+wb4NwmShF7ySaSqBJQ3FdRfC7bun5NSZX3hKNXMhppWxtRJU5PjDtg2Y1syb3+IY
+5S2gtEAlGFLjnEfYeIBF8RTmdty5ovQLu120JYmu/tCN0EY8HniuIqkI6aqG2V58
+bFuOtoQVXEeJAjEEEwEKABsFAleWdukCGwMDCwkHAxUKCAIeAQIXgAMWAgEACgkQ
+HvYSNH+KmVhLSA//a6F3PD6IzElQMTPwGG5RoeRhmAb6dee6xEJe12MBeZlHOvBF
+DE5PAfUPoIWVvoaSLPwVIMoEJDpzQ9MyHpne1I+Zy1o9S4dUZ/c3W7rlH4a6e9lK
+zcATK++k6FpWWIZ1Ff5ta9uGpxjQu9ojTixojzM4V46MCn7JxfvFiKGvGeXDHHYl
+InZKSEmzYOODZzxcYT/U9C6mWADEmMx4M2xgv3UFMAotecXAqIW5/uRZ1h8Xh/eR
+ULBGp9MSvnfxD665BqCJHNLh9/G+xr9Vi0ic239nqRUia+zI9tvO2JE4PxC82btk
+m7kRNCo67dDg6flCYv/37IWc11RwYo9sKp2S3mZqQoFi5L2JKwZy7tzI9B4/Hrwl
+NG0GlYwsiLpRMOWEwMixuLEp6vXRDriu63xKTNbhgtJrGS1FkmCeDuP1f1eRdCIH
+PIwy/zQxfVqVWNnWzDs6esxi59L+nUv1soRDGstfAWfzN9wzpwQbmy6EqetgMuBl
+F65+6f3LGcZ0Dp7tNM6M82/vvk78JOORJU8WigSQZAX4AB+NB8Z+MwpTmX/bG+Gb
+FtcCYxd/zHVyxWUJfembfd8fGJtY39oI8vCdYNe2Jq94wohYmxE6Koan+4CkoAwx
+7/67+VWSZwMOJcJsaMZP4qOMLnkrmImlF66AYe2Wm0oKsLibhEFBpSP38CGJAjME
+EAEKAB0WIQSrolKVmPZibEINM1ti9J50fZEbYAUCXBE9wgAKCRBi9J50fZEbYBQn
+D/403RTgmZMx2pkGFHdVrQFmqXoIXOScO5x8XS8OjFugjycYT6aNeHjQwVllKHLf
++Ig5saTkXoiKS03T61GUXuwPwLVTzkeeDME96dPqo+k83H+D4MifEtDxF1dZQi70
+sZPw+ITlzZOmpyQxDJa+rTbewvM+ULoXs6GNl+jxPpMlKcCpu2OwQn98SOibfDmj
+HNCYiF+Gj1VM+xg5MB+ROkLzDFDpUux2M8fJZv+fgiTfcnWL93lWxaKhBlg4ZFC4
+KkdnB4wVyazEucsQQgpsJamjK7y7jslfkVZUwOJvpuqKYLDt5yITUXx5PyqrAwJe
+0824AudafjAptcNYRtv51tSIeCw1mxAsiNBwIJmEW9JYwDED0SOH8OQJfWgMEIZc
+4Zepa51s3kKYgdh1fkguIrsSERnaUOq0qHlhywOad1rElduWmPWti1mumCDna4gc
+1ZH/YR+HyUp7ELF7sJvGQH3bNB5jziHtmqz6+nmH2XxRTY3Mhvkhp9ow5pWxyD6Q
+xkXXdPw6a5ZNGTEmmDbi2FEykJyFTXvTsFPvSKAGSXeaUqGX514hK0ZamTfLLyH1
+xdEg3BUNI2jtgFfB6BlneCDlUppNzulfhAR3AgJmIiGImTG1l79nqEgk7O0g3uMC
+JL61Vrjn4pFuBU/SG7Rx4WcnMXBfUo+caUQQbnP1QUGK7IkCMwQQAQgAHRYhBGcs
+ZXvga0swlpxKV0YUScJeNrmOBQJcET32AAoJEEYUScJeNrmOZRkQALfTx1/VoQDN
+MIIwxDW0vku9DLD8AciUs6V3B+IA2ISwbraHHji6kUEoVUMSCnTEIHJVr8L0oeMF
+/87o7yUvYgUtWOt416icqGlpA1dGtQLLffyUNv3eCjW9db8+snLZUHsGkeLCowBI
+eb8fcPMkNmNNW4YQxIs/di6spV6rCqR+PpuqsiyAHYHOl1z2RpSUA1wUt5oVcVrf
+36so+m2gXwtnzx9Z331AlXhSrULPwD+lvd35+gEWpXw2SwD6Rd427URvAUWI7ai6
+eP78OWi2PlAAFbqEuMrwNvYC79hxwB29vEJgO2V3f95UA3d2bJU17pvK+/nYpPWA
+uuqLDN3Ydqkoa++5HoqIdeh9uW4oiiGnMAkvH24012GpiXN37T45cH8LnNj0XooN
+tYuiY/ZoCTw4gawtMwSWSl/htDrkLQKiUSiKdZqBLVXO2wRSjLjFPJEkq+eIBBcc
+hvzj2C4a0sZhk4W1gHESeeB8D6IblgMm4oLa8Fn+4YhiwvbK9Dgja/7iiICP8bbO
+3/9smCWOnnsixho5Jkd5IXWF0+tcfHVR/l+M8bCf+c02IGE/mD7RMFcxv3jdFYU7
+/HsbwU9fCDSCAgXszwM+232kALGeur0riRJ42X1RNOzh82cF1wzYyxHR8JZMtcIt
+x2MWc/n2wJO3swXnItKI1Vy4fjRu2PcJiQIzBBABCAAdFiEEURHac98S2OgSykYv
+LNu/uzeugioFAlwRQeYACgkQLNu/uzeugiotag//YwAVNMHNLmOeAzSOzEf6Z6yK
+2WDgEhsUt9Ykhy2pSc2vUD7jIXSAPTJYI7yY1flDmOe3kEVXXeVcPYAli9Ii5Eq4
+DYJBC0FGboMbzdwh8P8RZGnhusB9MSlXYi2DnWH+oKGS9dQFnhpzn/lm0nl8tpL3
+FrnwhlshNpgYYqIa29yO1EHskiFVLD6pL1W/DM6lMFlmTMjRb+y8eyZtbpCIdrY7
+uhDRVwvJPYegj34KR+8OMo1iDvbckee5AR1Dc8L44KmB3Nm9AgW9o+bEz/kYz5pz
+Eyv9ibthtagBpxU8kyfSuwH1Z3X3qzgpq8QVNVH0Y/5+sWAOEi2NkCHR2z4W757f
+HO16Xz5SZQ5jBrqNFm6u7CLy0X96Sr5FRcefZl/gjqlgNqNTt/iJP48nfKvrEkn2
+1OlFKRFitEC3QOWDTeI4uDFgS/OIUQq7AgqAaxERR3/kbAaVh2R71AsYuXyD57dW
+Eo9PJkle9gSMvWUqRC8/0eSR4zgrwirkmXNQVEj55l2Z5y1kWv95NB1kozJb4aJW
+TboHAHzklAMK1Gw8AMkjsA5PZyXQGkM+kXzUvE4TLC3qnsr5w05yJT2teOaWGK3s
+3ZOtu2WuvuOXo0qd7oBzkF9850LQ83wEuwIfc7XcbaB0pyb6B3EtyZ+pMlRrzREg
+EiX+bB98Q3qhNopjIDy0I0FkYW0gUmVlc2UgPGFyZWVzZWRlc2lnbkBnbWFpbC5j
+b20+iQJOBBMBCgA4AhsDAh4BAheAFiEESdCchsPcjaPwoHYiHvYSNH+KmVgFAlpL
++NsFCwkIBwMFFQoJCAsFFgIDAQAACgkQHvYSNH+KmVh/AA//YVA5eJBbCQQKp1IA
+VWf1vqLdE13hxlw4MZOf4+2119l8RHKwS/mio9ZfmtoTHLqgiDFPEARQZQf5fjmr
+Vl4QqZbOzlhbU1bFCE0i2I4Lypj5TAY2j6WRKwc11mKYmWM7gayMjvKvPrL9s+nH
+sFC8foAkYC3nBeHR26AooLUjOi+jKD554vLKWRxgHMwS54s/U+n3OejxTF87Wdi9
+fB/65tlTw0vt2lrAf6LUaKjKj4sef771TMgXYuJijkbvzP4ShrezBPAdWabf13du
+EK+O1FVURkTZYSpOB0etDyDV6DXD5amz9NNO/N+bfV0/2dNY0Ez3cjko8WuhQRu2
+Q2PrJ5CLRN88KLgrDu8lFLmrQY14OrnWaQb3zVA8LMPhg1jUtXGB71zbhmM92BPU
+rRE+zO8cWOq1ERCw7GesQ6LwsKTI+ceXBmWqH0woxbBQlE0A2RhUam2LS7/etBuw
+VLBqXM3/rGYeIWL8j1fx7yF7xnrly+7BBR46B1rdGPupmRG8U+xu6H5qnHgl5VQK
+j63XHRyP8jew4ZkUSU1+4ueoruWDLTxa85DDpYKux/+8NQyhlfBxw2BJOegzBEF9
+0ddcRg5jkH/rdFXz7lFoZe9oE/wVzPMmzLqroKvwDI3krNuTL1QB+j5fAWl6+N5W
+y3afspmmDA0ql1+6Cmr6UhLh6C+JAjEEEwEKABsFAleWdukCGwMDCwkHAxUKCAIe
+AQIXgAMWAgEACgkQHvYSNH+KmVhpXRAAlauaug8H776O0qOVX7njKwyHUoJS4Ddj
+PUA0XzmFjrLrC4CylxQ5zVnQWi2QAh1FEDVrTWX889kkbPPo+9RK82bkdwMP8+GN
+Bv+Vu2SnJX4haDXooyT1BsmKvN5ypm/G4Xc0oWFwCXFJDxYtEhKKq25PRtP/KS89
+HOqvsD2SDfK2xpufXR6zyvCeXRwQX3iiyq8tR566aXpUg1mDcCtJpb1HGk4M/LO9
+9Ph1aOoHaqSAB85MK61rnYFNqRGZB3Ge91j3Xp188YZW6WFmC+YzdAB0+qGfWHLO
+mT8HmI1X2mPuHdRtYk3AYYVgSSLJDwMdpvYoethPUiOGLraDQSufdEkAcMwuU2n+
+NuRbejtssInsdJuI9ug6hvbkDkj8gD+khPnvg/epSuOGGWckM6SOwkel5lYRH4pk
++Qu3zGj0k0mcOBucvQMpzGJfSac4bhj5TNOAyAMMjYGCQpRaJh3ZhI7mUfix2ex2
++d7xru/amMjTZ7WQ5kpz1EQN7aeXOgtNRZQy9G93dw+cZ7WBJT1MQ0KwhITs0KGG
+b078Z+nwuKVeTDPGxNaYNcYPFDjmfEEZ1khLrD0hT62qOjkO8KdfNcgfEn/xwb/q
+eQoHvT1y5iIyu66DGiuFU0Kwbtq46/5rgT4EgZKX9D/j5oywMTtkOCalnM14bSkg
+vD2WUX+zMbCJAjMEEAEKAB0WIQSrolKVmPZibEINM1ti9J50fZEbYAUCXBE9wgAK
+CRBi9J50fZEbYBVcEACat/K5p4dxhimNvLfUNRMz6t5mW1P0nMeLPQ9R0thp7FAX
+NIHRGyaoT7Kn4EISw0j2Y1icsAgg0G4tx00jIrwnFh3olK1bbUXeIgq9v3OR6rv1
+rW68C9KMMtsg+IPrv310MWqhxh1+yfiQFUFbLLTMUaZBXUCRYYt02vIbM06NNf1z
+mXaBef98KB8PGpYZ9QAhF2yDHVPgSyIJs2cUamiyEyeJhuXuullbrV5m5XhdxY7N
+bgseDGuQcmx7gPmaVJmlYUurFy8N1amodSnAthWyfINUGu42SszDqDagz2XF9R/I
+Eq+4/noOdktyHq9bPGzSwTcdFoEpji9ufiT69TXYSZG+oH2kBCkhIX+Pt5w68phD
+D7uK04N9CdNLUhEUQdZHXm+NWv5GGbXjEzkpZ4raXVe/i4hBDRL4ayVmDkbfVWtx
+FBSyMV3LX7Rcr2rSFKBv9Yo2yBQMx/V/tYMeE9i369Z7jhslEsJc/4tFtLtCp8ck
+il9j/Sj5KfYVYxzzl2g1OWDGTcpX9AO8W+T72iSbF2d12lSxa6XQJIumCZk9A0MF
+WbNTVK3rbmreFwo9q/1xIcu6QakiICqUSnBkU6yM3V4AR6v4Dco9xtJ4f8DHZ0c9
++MrS1LOw212EQo4TR/fflBg7hPVhAd+4LwZvjaa/Pn8Om+eUyZjucSiWuVroBokC
+MwQQAQgAHRYhBGcsZXvga0swlpxKV0YUScJeNrmOBQJcET32AAoJEEYUScJeNrmO
+M38QALMXs9/RAJwnZbwqyZBPI18Zmih+k/2OiryfOCfC9J5kE7dHx+MeSr4AVi0Q
+rACXG1kLuvieXSq+kVw85NRqGWufEEXyK4730YNFFaBUH3KIBUc/zyZcIBLUlnkg
+Gj/lzI0ZKxysEp4gMjPsXPVSAl3aRcUPofbjoNz7HQP4E3Lhy7XzOj+up9bhqquL
+i1QKoOYddhrTKnXyONtM0VmJpYMgefVqR2CExJ/8XsNEknYpHbpynU7KpziJ1OYG
+xacP44r5T1B1YeEQFfrtumMNPbsdKU9RnMo8AUcUnYE6DlrMNb+FWefuuRNg0qie
+hfBO7eIqNHWwaEmlAw80FVa2HoHk7EALDpo5Lp78V/0CHRwdNgIoxDHM46AMvLqq
+iFQ9wMsTHqVqWHLFAfDxgfjM9pWuxXk8R5+8KHyHQ+dY/tYrNBrqu0QV90pGN1k6
+sk7UI8B10SgqOwzOiddthiq62wmUuKWGNq3mepgAldPVJAfpFN2tEBx6/H/UUwBE
+nH6t8NQcHjZw4zh3g2BRq6Ze7vk2YLlCRTKTOBWpfv8qu5DXz66V0/GcQVGC4LIF
+Wtjh0mckHdSRME1JJQdMcSO3+qlE0EOOhPpB/aIVERyju2lXQbXXh8uRMkaDBlJo
+HPgqpRwQ3ThHbiL3WkpGzCjod6lBxUZLauYZ21pl4X302sz7iQIzBBABCAAdFiEE
+URHac98S2OgSykYvLNu/uzeugioFAlwRQeYACgkQLNu/uzeugioEHQ//XPCaFz0K
+N5TJXF0/3s+2ufTYFXeHc9G7EEBfMk1kv/pObFgXx3H7V85XUyMUrj/BBEG96y6R
+aKcsbkySGhL+l5meymPSrRGY5xMw7hYGrvzpNq99VT3msH+j/Mqz3in4EmgXev/b
+7ZBrEVN74M46294//QiWSRaTO8bfKpS3kEixShJQcy4gRDkvjl+FgMxevjWsH9Bf
+0y7pY3A4TFgMDqCd5R4Ptf+D8wrY9Tc4Hc+BM6DPfg8b11QeXFlAdBqW2tlwmnuW
+U/joLeFXwwsQa0Dlg/vveGVfO4KoBMcsfFxQ3XleKIRH/mcSuQFf016MDhI5bZYP
+T7SvkPK0sVkmJt3wGJmuJiTM6HEvMyjGSXYfAHJxePNetQS6oI5A9bw24NPTTHm8
+sPrEd5hIPLZ9kx9y3MwsTjx+/AZ67u4/BrPsFzNdyDp31aKT+g8vP3YTgESs92cy
+vzNGNgJp5grvtDHc/lqe7rQWJYCO6uf9SnuWYQpAW7jnI6rMXctFFDCLwVFH5VGM
+cbq7CjBbQ/fY9fREiWl+TeKQSBr7DV+ssqRxUfzZSYWRnZaDajRQS041qCFDyUhj
+A26P04hT2n1x641ytvO1wvFa8of76Dos1USMeUFV3eQicY98C4p4sxEBCUmIBaOk
+rTgaEDezUt63yR66Uc3p7PsjDaFwjsALKny5AQ0EV5Z26QEIAL1rcALBlQxGsY5Q
+RhIvi351MeZsK0A4hrDQp7pFFjbqlA52UUkkDuyl8/1zES8ITe+l48F3NiDDGS5s
+q6A9ubHCMCjz/NIHL9bTsb/7wyQNRBO+nuqBBvZg80LsWT8b/jg2fLXghIbWrg+w
+r2UcxAV+ObOkVC+rnkxWrbHCnss+e3oEsgkO+8VWpROoRFMsGTf7lqOwgTaYYxe8
+VGo5y8OiMIPJdFDysp3VHu8lnGJZbix2awsJUqyEd+OKqYNKqfY43PCFpVW2m7pp
+A85UvwdGVEDSy1iymjjZKHyWXb7emKweBhWFKbL7kpNSkwqV8qutGLfdO/jf6+4r
+xRtwBkkAEQEAAYkDRAQYAQoADwUCV5Z26QUJDwmcAAIbDAEpCRAe9hI0f4qZWMBd
+IAQZAQoABgUCV5Z26QAKCRAh3Y3IgOu0dN9iCACXC+h3mueHUFTmkNUG0c4OqemT
+RCmaXIbt46kBnzYXx0AsHeoZEYXWW62Sl8auHfaL8zPpOEFwBCY0HCVDQ+joWPJo
+EnHvPZs5DusNnVNkCfy/T7ClkTW8py95tIUfz1aJxcM8q6cXCQuCR1DciK/t1hi2
+c5NOIVHmQGZ4k/o49iEdgq3lZB7EumKxMYItQk6WMl3kX/7Nr9B1oc4SZ/7hhEn4
+rWA33Qvld1qeZmm7lUZGZP9y9U9I6AoJARHwvF3hvFjOvI0O7L4LxU75ee3W3vJJ
+1ZkPzwwLBY3T6m9CIaqOOtxeQg0dlfRBX6DVpOB9ogNnFYwwmc1HX55FKc5J5bMQ
+AJKy+Gs61XNZalag+l9huvilhiUxffg3nijjLcF0Gj9p7JJrqlG2MODTpLBABYul
++yckitJOU8MaIznVOIBTH7IfBtqzS8RxNiAZnpEWi8KhXV6U8nqhz7r62iPGTa8X
+8DpHWLcIJyS79CagsN8XkJRKG7d8R4wBHvv4oumvyTk6C44Uxg/+pX10hV39Ct/r
+BEnt6aiIdbkxfDSdEub703l8SBOjaPeXnpAAPcvY/f3h6f/pGfYFqCdr+vvRBf0k
+Z+DpWXRAYwbl4G7sexffwlYpC3cxLM7ZyntD2srC1XXGY5fGfSQNhDb3PsHCbbOb
+jhM0vksTgCE3D+4JUx3FciNSuZMcL5oGP7TxehjJGJOQT4ehUQg8B00KAeYKdase
+p1AwECB7G0SvEMUqjPkFpWSjArZ57BDui8I8ZvpGNTVfZWGgzMeh/E6611yhxfus
+dki8YND/u9WfjAQ2scMUCi3/7DpzDLP68cp2UGuGXRMs+I5cvwYKdlWbz1r1Rydm
+2eShFsZE7SnUwlEeaypm4IZGUcbmLJYK/qX4lFsJ4oa6VdfSUPx7dUUGUbjqBgyc
+q2gdHSkDsnY9xRmIThE7UarDVeA5GqM/QVXB+xxG8tjabUV7HV4YLURdVKDa4Gdp
+1+bpKSEHugsBBXfgpTl/UnloW9VbhyvjYWtUTsWm0tgBuQENBFeWdukBCAC4LXGN
+UKmFNwyk612coxLXln38Ezqr9BkD4SWPeD0uFEKyBlrTndQUlfGq+2eEmvxGzeY/
+ElPSgm9+xQSiWEaPRxFfJ6J5gzbVJAOJZJ45KLkfKokoj/Ao0wLA1GwqJx86kmUL
+akR8zSZAv2XgT5Y0gE6i5sKmUBPTanJu+QBxi0L7/9W644PdbZmcxoiNszQ3zSVF
+WcoZOB7p8r9QxgW3EeDyfzfi+zvXRgI2hCkGvrxOzkgQurgs+EEypVkBcLwYUHWM
+woYzI+J+ny95jQpEhSYo9MW/uwGua0PjMpcMDA0ddqaqsc1pSUYOMsaq+Ddfv/EF
++/Fwn4KjdT73XazXABEBAAGJA0QEGAEKAA8FAleWdukFCQ8JnAACGyIBKQkQHvYS
+NH+KmVjAXSAEGQEKAAYFAleWdukACgkQBvNeYKehjdZeOwgAtixW73UK6gyyBsvC
+PNW2n7HjRc02049cUcHz+s0D+wMa2xpYIN1EPQBTrcpL7mZZeKmxKzYA9vj3RuaW
+ocoChTBAmQzinTFT173kV1MpQgbSP0sgS+6/p2tSJ+HxmzzNsV5UMwV61IN8xbFB
+N4t+GzWyIh4etBkpUiDjzZg9w9E1pAD9UaNAmNGfv3bt3+A8w5H3KHqMWxfl6/+Q
+Urw4j3v86ShJknPeQ3WHsO9J53QottQuWidswvZ3QG7bAZUjbUPwSCcbjllooIKL
+M4ZPc//4dEnvFl2FLQeIxWm8B61wNA/BZJAAWd1r6tkztulKgkL60NuvkwiodR7p
+pQ1t9rDjEACwzg4ijOl0zN/TE1XxgRaf9avhvQ0mVcqU8Hp2OKFjesdYMsgroXtd
+0KN4S00QJJhTpdgT7MMRCZATzPw5jzdnqjxJoJuwYzaszMTqKGPnFJdBnPQutyiX
+T4gp56u0wH6CmrPFwYHKq6NNGr3bPuYG/d+pCwt18Zt13KmgEWaEdgDmfylTrnQk
+hWzmhAHgCzwn/aJw6sN1GkCfQD0cxdUrAm3Ttt582ImLpBB4tDhPlroHtxw/KTPN
+SMCM0pSQ9jompssPvFjYRMExqLsLZAVWrpK0uvrWom0pkWzvjBqXC4EczxpjLepX
+1AIi+hHYDzW2MizcTEe5jYUpwAr0N44Cnw80RwIHJM1O3XLQpaVGW91hgLjWp81A
+5FmqWPO7Qo+EQtg/zAa7F9ukHGsl+Xa/+Lx6PuoRwOV2sKfFfJ+7xolvwFLta7Lf
+Hu55PURVcw24CMCCyQPcOOoqZEhiAOwNtDq22c1T8x1GyvI9WsZRLT2XGCntDavA
+pXkYs9ZKW6OQ5KWKhkw7ocvTF4Aq5fBrv7noWtN9mp4mfMBaOsZRsuaqQoKRqwvK
+RJDZ4+wzc/Chy/N3fSa22n7QLxHyFDqBSARBGy4hoXgaf3Zqk3SglTvZK1wkIpyB
+hqHZQIYxbE5/KRJuiqcZ//UtmNp/q7FFu/Ytx22lsE8wWHGzZJdavLkCDQRaVQcB
+ARAAzA+ZDFUZ739XOAiZGunhUyQ3g68sN19x4M+Qay95ZPFwl3HLgV46WBDY3x87
+DMpvYYJqLOF/tKlzRymm+7QpyLtIWKX5f8TKGKrV0+8vY+h7SyKaRVNbu5HqPDU8
+ViXzMleQxgy6T39HIuHdAPo9ceEOGM+XB0ESpA1eRjeRJGF6dC1Ric8nUZRMnmTw
+y8xGugv0n7ET47v22cW8TVs2k/ociPVLCF/Qws1FeJRp0CDbg7YFcbqoD4cV1On5
+SypMRnSmhjm9GI3hw2JNM73XLH1lSuHKKIMtUifaKkpUL0RP+Nq+QYAzu8ruUuwX
+pEy/WyiuP+qj67rzQOsqRDUUMAVtAr2FH27kECAHDxHlFAB/ukp0/WAh0oT7tX25
++nM+XcWCoNFRMDhPAAYhlWDyn+iPuCFPdzR5Jgx3hyvgKPDRmrIwhs3VmWEF/dPT
+XpCyIbgSSCEF8JOv0h8m3K69tWWTxv5j8j1gVlZ0mVdv55lnqQtybxPoVnFrAznr
+g/30+vsyoh5dH3cc9MteUh0qYRqDH8Q5wc0benZFRwxH9E3tV0P7NhO7h1H9l0Cq
+wwyrOPEdnySUD0xdBupC2zoqdjCB8l4RQidryWcPcItSs0J6p79NLqdHStBJZogf
+EzfPqL4J0y2Dv4EFQs1LCPlxaLS7TMrjZKdecrsmRHJwofkAEQEAAYkCNgQYAQoA
+IBYhBEnQnIbD3I2j8KB2Ih72EjR/iplYBQJaVQcBAhsgAAoJEB72EjR/iplY7AwP
+/2APBujg1Q/pXeDxLgxs8eGYV6DpTtAJkOYF15A7cQ/2WcmSJ8GywCpjkVgItqLf
+UT/mI01vuJMaQM/aOFQiRHmlfdS7KEYzc2W5zLb/PA6XK8OjELGP2ZgMsTSy8MOm
+ILtxxhPlGRaQWI7zEA3YDYfRg+uP10z8KpFlOg4tNdbXaA7RLdz+x/zP75Hv7C1D
+9wJMLO0I4fmK4sepGq+Zk/pFpuXRMwjO0eZXLSE6sO5P0YF6HrU8TReXAE7gHuzB
+gcKIYF9oNertp4LhplYhrHkN/rg5b/CbRW7+C4jbwszYzQL2S2Hx03TFasp/jTgb
+oX5X3ISY0zDw53aE4mcI1nhYPosiY4BQ647C8SkwLZEixb7mS0pW8HdELRIBJPDQ
+llursCS2hDZsBPS1PcvZsAkrTscsUADvdryZHqo+TkizO+HO+oRBRqltAPHTiBSl
+13Hjd1Bv+wX/hexVe+Ru1i5i6e495nsvFx3S3b/iCpPpmRYXiWBoW2taR1WQz8/r
+0OChc/OrJIg6HZ+sTAnoIGFFlc7p0hrf5jKaO6p+LQCHc6IAcKXvYBLxMOK0i6BR
+BlA4kJPTfla4LmKRg/T/xow/naen/aM9mQCs7k2UAoeqNZ6IfQ6G5BZ81H9JNvHC
+beriLZDBuRy1LJRjBmZEz+UDBgZoR9oz5DOLh8dGVpkt
+=HZO9
+-----END PGP PUBLIC KEY BLOCK-----
+pub rsa4096 2014-05-13 [SCEA]
+ ABA2529598F6626C420D335B62F49E747D911B60
+uid [ unknown] Matt Butcher
+sig 3 62F49E747D911B60 2017-08-11 Matt Butcher
+sig 461449C25E36B98E 2018-12-12 Matthew Farina
+sig 1EF612347F8A9958 2018-12-12 Adam Reese
+sig 2CDBBFBB37AE822A 2018-12-12 Adnan Abdulhussein
+uid [ unknown] technosophos (keybase.io/technosophos)
+sig 3 62F49E747D911B60 2016-10-24 Matt Butcher
+sig 461449C25E36B98E 2018-12-12 Matthew Farina
+sig 1EF612347F8A9958 2018-12-12 Adam Reese
+sig 2CDBBFBB37AE822A 2018-12-12 Adnan Abdulhussein
+uid [ unknown] keybase.io/technosophos
+sig 3 62F49E747D911B60 2014-05-13 Matt Butcher
+sig 461449C25E36B98E 2018-12-12 Matthew Farina
+sig 1EF612347F8A9958 2018-12-12 Adam Reese
+sig 2CDBBFBB37AE822A 2018-12-12 Adnan Abdulhussein
+sub rsa2048 2014-05-13 [S] [expires: 2022-05-11]
+sig 62F49E747D911B60 2014-05-13 Matt Butcher
+sub rsa2048 2014-05-13 [E] [expires: 2022-05-11]
+sig 62F49E747D911B60 2014-05-13 Matt Butcher
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Comment: GPGTools - https://gpgtools.org
+
+mQINBFNyROIBEADL6FVlqQPC2DAZS8RGYs9Kiqpu486QI6070Nq1l950XxUdudkm
+dM8TH0FluDkq/RtQQmVHIwBdL4n/pH7EfKTUy4ggYIs9v2VPhMp7DVlRVKIXKoHl
+qQu9I2VI3UNM8j+cQkisFgVrzHi93SHxRKRfJM/qPkQYmzsnBRH/2YAodSOmWybf
+TZJToPtkRXqPMm+ZAAtfyhwvwPiXfSnB3/0t5K4WCdhQP601l3fifyaZVVF9GX3Z
+n54i080HXYhdxr32n8xPi+EDPv7Sh3XuQZ+zmYmSTxZ12mBIZgzwCJH9Uy9XzmE5
+LrZhf/s4mus5VO7ZxqOr/pZ2edzu3Hae9SwVa96kntHK4Oc5Ja6AYK17dibRG7m6
+1AInGbpJ5oJMvm3MwQbxLXtonZuMr3F+ivdBqrnwjpGHiTfeeuBGasx3WIJwBvzv
+94rldvEERAc92eMNEW4G+9tK0w2R8SYP4njWZUKM7ngXRPxA5/vRYj8pzpr/uiFi
+YkkzOTo5beueqdAyqaV7GOG2bmzt2Lc5PSGXK/Ew8sLAiOV4ug2QysNMw2MdjF7v
+ek6Hco8U+Ir5YQnt4B+t9piDg3w45WGdNfAe5roPZtB9yYox6Iy34fo1GmX4qUf7
+3i99UrZ/B+wgCRjHxsqborquMZnX9S0BeQFm2RV/0S2l5A5NT6yHB4B0hwARAQAB
+tD90ZWNobm9zb3Bob3MgKGtleWJhc2UuaW8vdGVjaG5vc29waG9zKSA8dGVjaG5v
+c29waG9zQGdtYWlsLmNvbT6JAjcEEwEKACEFAlgOZHsCGy8FCwkIBwMFFQoJCAsF
+FgIDAQACHgECF4AACgkQYvSedH2RG2DUXhAAtZGIlCFk8GkhxoUFZgcR+AfgKG39
+bcEdDVulGX0r7LpVO3pF0V7KrY/Hz55fVrQjF6UMS6TF4dB/j4U4ylIdv9UUyQUJ
+O9bPJwcYLbSLURqA75NeA7XVSHwvbm6hTCcdnwPxvxkfisd/YUN75mlDNeZEEXs/
+/n+2AxlPX8eQt6n/3RlYYGrekle7EUO8IJcqS8jfSloxkUBO201BubU8lg9bmE4W
+uiav6Dgqs1V4q6jheGz+c6BD/PYOysQiet+1Ot0GscvIKgW0w60q7ilzzviOK3eg
+fiq57W0Oc3GI4ihrfH3ppC3kcJFyAe/zle25QxWHZWfnNZ12ZElK7vIQnl1JzwVb
+sICj+y3j2MUkczHtf4KJZe/7lr1G+No1mFYmDu+GZqT/eSmADOF+IrkoCZgR/jMr
+O9Kgfh5U9B7spbHGkA7eZvH68FHRxqvXnUgBFS5hE9oQyTR2EmBF+hpx8T4K2uIo
+NSCoq5pTD3HNEZqBZ+E1NQCGv1a8YiyLjeI32vljH52pjtfbW06Nfb4rI+/BrMU2
+82gzVHxiN9O5Ba8YmBLbkZYYTW0+9rF5w0brTxRS4IfokNNeanIJ+w7CuUhEyf0O
+yOa0DRxUEvHIq6UFibJWzei2dzBIyHovdIQelkmFr2Oq9LDqsBtSZH5quGzeJ2N/
+atK1HR1GK5CNwRaJAjMEEAEIAB0WIQRnLGV74GtLMJacSldGFEnCXja5jgUCXBE3
+0gAKCRBGFEnCXja5jrJlEACarEjVOWmmZlNkHqajs2rEUJzM+qKThr0QMOd8UvYh
+ZpC0+IPOXLjwXpKiZ+7sPw8YaGHw5NK36jWPy+cPdoDppZfRYHp+/0cmK4GI3DH4
+9x/jW3yG9g8ckYCKYscrhev3AeD1UwjjiiQhS5m15/TTOLPGtu4kcWyeTcdgFMo+
+sdiD1w81XA2/zCTJptsDw8AIxJEk+rqBP46qy7kPpawCsO+x1f17tleZ+5pZPYCu
+G3vuaC9ggcKIp9K2oifH/Qn1YE4G8Dz9KqDsS3Ucg50PR2tpd2nXQCoWatezNxED
+tyNblmx29JJFjSMs9nKNdddDmwWHM8+CNBS9mXRs1BttxtWAPmz2Y/9U4wvQ6V0H
+NxZd3JUItOqxkoxVavdMQrbRDLgI8qVXA9LXABJaJ9SccOJfAK+zJVSVcOqrGoVK
+7jQyBoHsMbbGl4P2EJtFNIUOiAvo+y0cA6oboAYnqgBr3ghOXWa7uiLB2zFhREro
+0VoGlqCjbH6JdvjDcC4Vf9mxtVP42605phBmd6OCDXjTmgn+KToRLKd2i8b/eafZ
+5djwipOpyHHxIQ5N+qSI1jxh+58P8x7502kMTHzCoAdxnWk2CT67Imggby3xh8IM
+jJmvah5NM5a0eFIGZs8HNuhkbtJBuF6WzVoieBbin+O6/7zvNaS3x0ZcYJPZUpWa
+dokCMwQQAQoAHRYhBEnQnIbD3I2j8KB2Ih72EjR/iplYBQJcET/bAAoJEB72EjR/
+iplYOrAP/1b4FsE7QxzYgU2ulBkqDGe9eSWPwuqWORwqpYNPy9UNYKrDn7LO4mfT
+GKvVozfR2e8YjDNsP9PfqPjk7OenqiWkzDgwAZFKoFxbu0RFtxA+aMNVOG6ks6g/
+LJH3uvKxqaK0oUTntB9YusdS5B7JOcSzDo9uw+2mRyavxs7aitJmcMmrU6GySmGu
+t5Nutsr0j1k5vB7lFNu7PYmc/rQyF7UK45+Q5RSzW7lsvudR6VM7qjE+eHfOOB+t
+9Kym2siSrCcwsBsrqGtumXksG3KUFubDr6VG7nUX1y0CkZ8FdtdWnsyssuJy/cUz
+sGhoZIXhnP8LAvVS2/0g5U+94K3TfFPrlhq8Dgt4EWOr8icL13QY1ZhlQNW861KP
+HEOTtUoNJPg7DafrkB377cfwANk8K3iJAMrWK11TR1obr5brMPFvRqeb1OsDeTmf
+PGJkm3DePTKydUNvLwd9FwdG9wsoZVGrn7aRQ59OUn5IdAnuZ5Q9eWnEJf03pTNp
+sJ/6cH4XCLy7bM1iim6oknLKpUFWRFxOgMKVeFNQO1h1D96u21bYDXnbKyc2vlIw
+sBZbKkHsxWr8AzmCOrWb1DTJO5sYTpsBQkQANQt/IUpNbg5eMC7zdyHUpLEyqQ2E
+kfrWOoqoTowXv6xZ7Wdd5/OJHwn4PnsKac4ah3tOMzhQYOAgel+/iQIzBBABCAAd
+FiEEURHac98S2OgSykYvLNu/uzeugioFAlwRQU4ACgkQLNu/uzeugiqARA//YEMd
+eLItDPOCtLlEYJZ9N3VheUA78IER84cena7RDI38Rra7sh5M+msNJJTYH+mXK1B/
+2Y8tIHo870I300vQLLDXXjGDFWuQRDIXgNkVpk8M0msNqtvTps1Pmf7fxpSeI24a
+dGwlyz3oCUELp8bXuyY7LTrNMa8LjNSbS5TdCF0xteuMZdDyD03jDO/fz44Oabtr
+fdaIrzDRbw42AxnzR8wrhlR55+EFxWizWERqPLxhXYYhcGk0PyGdZUzcP9YJmjiV
+h0605ct+ykiIm0RQ5/YGWkKRC8LIRDYW7NB9Hwv62kjw2pSKcOWm9kaGHOjfieCd
+XnBvAPv3sAdcfgx5bP44a2Sh7Bsh8BIqrbsAAG+9b07h7IMM6MCFFxd2smNsp74n
+gPR8k4GF7vfVvZherYCB3EhPLoudoxf/u0Ock2Ssa31XStZ+jHb6a/keEPFGnygg
+opNDfw5BlUsys7wSEDOSTE3cdiE7B0hWxC5Xw80r3SxONk3jPczraSG/EVmKndX9
+quFboecUIXGBbsx79tUolKTMOQrVP7KIM9ltbpvQShy6RYpWa0dKTRuUgMijqiB4
+A1SR5gvVgKs/xzy2Bw9TAH2ayGc/r+mTpwpa6eOhu3NOYhqqkENlZ/IsKnX+dX55
+UvSVexlttUIjxCKjvH61Pmdi8meNhEesjVYnsbS0MWtleWJhc2UuaW8vdGVjaG5v
+c29waG9zIDx0ZWNobm9zb3Bob3NAa2V5YmFzZS5pbz6JAi0EEwEKABcFAlNyROIC
+Gy8DCwkHAxUKCAIeAQIXgAAKCRBi9J50fZEbYCnkD/0WpXKEaTdXwqy7fm87An1H
+H6HcHDR95+Ldu8XgmSZq4nbkDc0wjDdBD5Tp25QSUznzJ4pKO/Wd7l6C4fhqTZn/
+vldDpRXl23bqvRHmWVkXH/EKZxh1y9TnID7Ysy9H9qRVdFm/yjM9EqrD++/vowYW
+Sq6ekosXdjTZWuXVBnirnM/MwSZ/3w1tyK+zfbzA5XR/pscPbTO/UuKdmUbwz4yt
+QjSQg+awJ2iRko0USvDG1t7PyMdDNfF+gbzp6qdI/NUo+XicRzCtmxfKR88vD5yE
+FD0DY/6xl9172XpB3h5aI1jg2LTDLr0IIlO2KHRkqs9piqJuHL8uA870ZMvLJN9g
+JryUny7b5PJlmaYDJPc3TmiMUUHTkrcmJq4Knlh7WtrDX8avbc6T8lWOCakn3cNC
+X3O7RW37k953fF3GSgv8otDlySANW20fG5bPN2gvElfHi4LFP9hAXESZUDYuOasu
+dRUHMkqc9BAMqqgrgrrY9Qmk1aE+udVTcVICRoUoZyBFVzDsRQL+c1zVBk/kJ8oL
+e9cqcdpZbkvVDLtPEyA+b4icX41woqiTRfK28BbKCSwSXkqi+vo9pk9Uwy++S7OS
+D3EOjZhox+Zi2Ijcpzb++B2mxX5yroRrPWvHrxIsAKs8ogO9undz+rJbqgZr1PoF
+rV+wpMe0ckRECvGqEz0BiYkCMwQQAQgAHRYhBGcsZXvga0swlpxKV0YUScJeNrmO
+BQJcETfSAAoJEEYUScJeNrmO1T4P/jAUMiKYNqUlYpCV+mvzVwUQWIyPYdgzqO9R
+AmvI1ELCDT1BGB9pLeeUwFXQX/+8+7lGAVLynL7FPPVkkatblVIQKFgvL7XmU6gb
+o73DpslX6hn+clYeYXUs37XToffVIFVwIQkWusZ+X9BkM2TeV5fgoJ4mhCh8ys5g
+RHKuXYnqCIHfPj033GIhSn1DZRecKPWeb07zYZI4SHsBYEM7xfN4eUEXOjIlRXea
+O6hS6N3vBTinn7LnHkRDD9mUemTruBtab2F9Nk3+njzpafMb4IprD5+GGdRacGOq
+VNWFlZDYyy+3Qv5A7mXBYGCaTtH5Jlz4oEibFXvvVzD3IgwFCvmU1S+UD+9l+u+z
+Nk3F4l07BuulhX6Ek55CoI3kbMCovFjPFrXWghT+/XQy6GaEhQmQ12rhUDBBjS2s
+29NImvHyBGX/FHY0udt4fF/h5O0eRw7zqmGeen4yOu60cEi9MVesRz+GZcbdXupe
+RghrhXhfE6NHcp7ciyK0+Y8f3dpeXVw3na4EOraR4w4ae+SJUt56Sbudqn7S6Kxj
+UCKql68VWHfhh1ibdbv1wl8bAHqtSt0FQlG5mUAcN6R/COO6uK07H2rJWtmIiDAG
+nhcSyLY+5SjD7LtRYvZr+SP2EWQ8wHopjkkGfvG1gXl5NQ8gaVRzErkuc4S3yHMC
+47j+0GsBiQIzBBABCgAdFiEESdCchsPcjaPwoHYiHvYSNH+KmVgFAlwRP9sACgkQ
+HvYSNH+KmVhSoRAArTJp7zUs6pp/+JTFfJsRHbqUBP13KAoZCtaV6auJf+MA6mFD
+TD0DpVKdKBGjKna+W/qFn/8lpIjxL6YtQ3/W1j+d+uhd2OPb44atpXNuxArpCqoZ
+zAyx0ELmgP1YbZ/DIRKv0v0nFsmP4jd14pcclFKGLqh/tK66n3+mOH7zSqltljV0
+9A4evtkI/29/Jj2I31j2rthk+gJmAYiksXVIZb3Hoj4VjFaW0D3/d7Bc5LaUCY2Z
+6GXa208UjBfumKRtSWGXDaz4LmxoS3+H3xfnm3APQIryaSc8daBY0BjDwORa2gUB
+9rddEtSWbVZvJoIdAa7shLvR+eYubMCOjmHc5cV3rG0AF+5pymOv+Z9pAIj8Uzfs
+kXtmIkoXPRfubeb6rNx66fKakgjXqtcGfe0VdYg/VJiheVedPmqBvePFvuUGvROa
+TzDdKxKqi+AR3+JALfcue42xbNCTqWW+iercuKz6gpNukfwuDciNMrH+Ggg8K/FL
+x/NEQbVTA+IFZyuBtiRv37gDNf+gRK1buA1OJg6rS1US8CE/brOWEhSDXN1wJ9wM
+JHtM6xj/Td/L8v7BYOXbq1ffuuXeX7OOa2NF86yTthS+Hx07y6ivaBRIWhf0DA6I
+lQkoKtJJ8dgWtzRgH/Dl18nhgjdqhyaQXnBclxv0B8M3tbpeoJYBRTM/7haJAjME
+EAEIAB0WIQRREdpz3xLY6BLKRi8s27+7N66CKgUCXBFBTQAKCRAs27+7N66CKqz6
+EACB4UuPAH70NzoHo9utcD9bzMj0PRi3GKh6MMm0CsumM360HfN9RftOrB+Y3mjq
+Oyl4onqz4hWKWWQayUsI3T0YiDwtV3zeGkvyKGMB2gZN/duZplHiSj95Jv7HPQRL
+kVo8rrEPboI+EdCCOypZIu8K9vfs/fTrsx14dEy85cOqv4J2is28zOapFoR+79gN
+pErktx0ftcv7e2fxXQB5sUAa8k64bRNuVoFXz1HH6T+7641DwQutGAEFWug/Ythj
+vytNBlcq0bxpzVwC2RAbPrnJdRu8f1XM4jBx9mJz6NfHGvSjEtlAuc53Y9DvJEcZ
+uKwrN2NmtJ0dkO81NaU6B6oT9dwTaJ/6hwHq0WNvPeDcoUZxrh0XXyuhjR/p5MoU
+/0TeiwA6HByO+/wQRL5ZODUag8xlsnXHMxwz6F/mqo6OirJzflJdJkm9kL4UKjCB
+r/jzOmf6WVQEfjWTFmv2empmxT3Z4ahR60DLRCGPlc6v7N7QshbH74b/NfbP7CPt
+SNqQwiPzSTiNjr1SZhMFJ/Zu7HS+/ysXyPw6Ku0s+8zQtkstV9+Oo/mpfm27yDih
+scWIZTmc3RiZmL6eURA9tijdB7ZNuXxTyKkClUfnkiba9zdBCZT+n52zXY9aR4OE
+KEYFXe/x2A2hON02AP/lzgjEpg/3vaSfLrzk0wMeh+yYjLQpTWF0dCBCdXRjaGVy
+IDxtYXR0LmJ1dGNoZXJAbWljcm9zb2Z0LmNvbT6JAk4EEwEKADgWIQSrolKVmPZi
+bEINM1ti9J50fZEbYAUCWY4aMQIbLwULCQgHAwUVCgkICwUWAgMBAAIeAQIXgAAK
+CRBi9J50fZEbYCtzD/9WqoGpj/rKKoqoToj3hInc3Nv/Nxj9quJc2Z4gxmnwYlB+
+KhZeDlfCytkFFYXgl4bB6KcpnI/OW+hynxR8jT/wIvD5E3wIUCRVJfbdmKBiSha5
+KMDgLGmVpJbVG83s7mN6BlgYPxUa7dFXI43mRBkt9hCnH7U4vwx5rtLlR5FEU6EL
+sjYiWc/zyjqZFLHbnlJ8tt09zKTVDF4SfdJz1vpDCD1exY7LZtsaL1SpE+fuTq+5
+/Z6MvMQk4bJcEbXzrIF1U7C7xIoTv/npv+eb0xdiPto1UKs6C1o2rIdvxbrDc8zz
+pWMRSPjBaOuey01rFKrkpSlxuX1h6HQSDyN2Q7WmeezLh3RgoTwrEnmy/Qi5Ze21
+pi2ygMtUadxTzZRi/IC77s4FOlrnqx27AonEzRQHTtKXhLKrrXD6HQTerf7W9v/l
+24O/QIAdX/JlhYWHQGPHAWe/3o30XkeM/Bhlt29SAnxeWhTo3oa1EudXrAe745eM
+rA/pdAHWgqIBi5KZP0j9nQRtxXN/ZP3ASKjs1CKw4OnwpIWotUkK0XgMemJTgBYR
+FmNUPpUCiLTiZxfJbcQt3khDOfQ53iR3xSLP788MHO5/zGqKcOgnjFlyc8QLdLSb
+db52l79ZaeXamakEEfaZRnR3wtZjWvLk+JnC9nWEVflICnLwKpLoph+ceVe8j4kC
+MwQQAQgAHRYhBGcsZXvga0swlpxKV0YUScJeNrmOBQJcETezAAoJEEYUScJeNrmO
+ZJgP/2yhVoDQbp6T1ngsl079C3ZwyDY//TfKXUwAJJgHo84IdrLWhYYTCo1/2nm5
+rAqmDlq3OJsUMucwj8opocEIBM2HWcRcwFJgwC3Caq6w0vLzmt9Qm5eGIwSPGH/Q
+7w1YQj+6x++xyYuVdmChVIIgQy5TP2cIuM+c+T2Zq2vTGKV6VNKQpVH/o0ymB7zx
+5ZSJdsQGuNWDvZbwsVrYsbbgEy72iO7fVvc3aYUVXL1gvJjAh4GUKApsLWhJGG/G
+HoYvl0PSTpb++HOGwtwbG+GG4ELbISfyrs4JfMUvRA2hd7MZD5BTvO9hzWeFAOJR
+ze5gsDkUCMeJ2D1yoVONHhmaZ8k9xy88p/NOC3iYamoixO6vVkOsCbOhzHRVlj1a
+VV4Zs1RZ6kMgm0HBgGFjj4IjWZy39G+JWfjJuLpRAVOwPv3Km2ertTtJJjkSaTA8
+TRG+/ZjgEse+Gio40aeBhm/2LvM4A1oHe/gzZXQZrHKIl7sy9ijQowj1wuqs+oqz
+gdjjBp/DcU3qbTo0vJ6ACvRjQVcIhIdOypkn31uhUSA1CtHT3TNau4/D+B7YNtWZ
+egeVXWvzFZukXkYzvbDjMn9t3PYHMPKaPYynBGuFPO0fMBXouh4qOfXywFemB+zu
++ccZol3zPzBhdxInOhWi6wjMBSY1zae0S3Co34CSylZ2lu+kiQIzBBABCgAdFiEE
+SdCchsPcjaPwoHYiHvYSNH+KmVgFAlwRP9sACgkQHvYSNH+KmViCHw//dGg9ochq
+Wuh344h8SSqq7G5d+Hch7EIMCykPlDCkmO0/FsKEmMQ2nMySpkm1zM59pHvADlbu
+tbhtLIk8kAjsfyZF2alpTCn1gxRVe/aXBsgmAf/Op2jf92zPkov8bXw7x1oFZ3ew
+fWFR8bwG0OEK8kr9jpkCs2lRv7kG6g60ptsCWDkJGiXpEyovUF0W3ZpCU3RBVUIC
+D8xMTBJXOiCYtux7uDpGJ9iYBGD0eUWxg5OUZs6Gmid2sr+rV4WIoBJEgGUMq37f
+d+loYNwm36GQmU3ytWx3ZduCruNRf5XSdws+nJU0rPb51CiacPp/g9PR/9f9i9/a
+yzao/7pA9/0mfiAXHveK39iNqFH4V0B4hOUzRWWWJJNvZw195LridOcmmgOLTWQJ
+iWErD0VvzZRrf5vf8sdsRoXx1gHrgb2ana3ThfRl+7gE9jgkrEZxGZBh6eaxyxpc
+eTJBtGjcAgATlKSZSrm9ZLI3Jyz+1R+uEj1LPY6rc05c1XU2l6ucoMGRvu/Vs2m6
+XBiyJeqX5yARdVbiTMbmGI39SxoZ4//KJoFjs71+FxT+sZ3syfQyQJjaS8++qB/e
+zmhF4Ab2wh987um42q3Bzl3NXnERFO0Rq5R3ksgBb2ns93Sc6WQPV62pUFxzeHX5
+BcW6vVjL8jkrJuMipKetoZGm5Aimf3oydJ+JAjMEEAEIAB0WIQRREdpz3xLY6BLK
+Ri8s27+7N66CKgUCXBFBTQAKCRAs27+7N66CKrU0D/9tCR/N64xwcY1eq5tEjFb0
+9T0L/aP39hcCOWeMwD2AcA5qSM1Fr/gqCs18db9JqZOcTEISrStzQ/ciGj3Dsnlz
+7LjYVicQjwNK39YxedfAuU1kPAd2k4KujpE8o9b0Q25nsTOth1ZZyXJIIsrywwVS
+CG0shyi2GASuZOXIZOdkYI/SIPojZdQKG8Czd37Lo+2mPDqG5lTL+LUX0UoqEFR0
+AJfsBkZhUodUAJSxzT5sn9ZyBOabAbdFhiwjMHTyvFOdFzIfc5pS+/OqQqdhwxcw
+/FyCFyN5WgC6nRxEZqvW2jbB7xphLPWOWxWbogwD4QACQO6ih6pyUAvkcPGRTJRS
+j9AuJ7cX1iul0hwxjFifoJGnDyNB0oDo7qyfcOQ5lKlYE6BWQiQHcE8UX4pn0Tk8
+z18pSjh79LxFUgKH9Rvv2eIjE0V/GYFh/RiPxopBRGcqnpo4F5mOvLbeNMN/lX2s
+3g8rkpVLa/QWf/d+goak/zVWqLmC/5OMTFnEWrcTu6dVycoEiyfKgf8LRZ4YrSbv
+jkeHNuYwM5KVgv+umLOw/p18mUueYmEvpsmQ55Ri2UxxhNWizm3xEtLo1jLGfBUu
+7RwuZ3AuR8HMBeOHsWe1/MSpZgmbL0V31mfC0M25CrBs0qBUB1QxpCLv+cSa3Acy
+CMfQS9ln8Ydzm2sHPsLZA7kBDQRTckTiAQgA12JICQ4oNax8PaljKomTwuFTCrm4
+6j7Z7HsBM579lqkQmsNaBu8euQF6C5WJUE4aflBIa4Q8vqinZirkdUNvkj2jGdKW
+XG+KwGluvbd8IhCvD9ITV52/Sj0V1PqZMcKktRpEczn1KY5BjILXKbtlp1eVa7Ha
+VMHHge01c2TH6jttOtasUFBkT0jD/Zd4fO6l1e9cN3e7hhIO6HGqcrhNIaHD1ikG
+6VjJU/ndP5qkzwErqlWF2H+TThWaY/PO0zXp5pXQ8geBWPfnw4B6ZvKzoHM54vd+
+aotgoDrNpWMkksm16oAvctXkg/WSt3mzNIHQHQZSYN/uorXek9R8664MmQARAQAB
+iQNEBBgBCgAPBQJTckTiBQkPCZwAAhsCASkJEGL0nnR9kRtgwF0gBBkBCgAGBQJT
+ckTiAAoJENzV9eXvMsNFID4H/j9fGdHyPQLDvH363lsGx62l5zlX1vL8rjleZMTR
+D+JRQJ3MjSgEIdEE8gYLyRmetPsrbQKpOu/uGVs+Ef/SDFj89VhK+661DfHwcahN
+XHPTjcNi6OUlE2Z0DXdxgb4czMZkDf79ga/sf72S1uJNQb83GfYN1QfLq+MXsBmF
+LfYU5RkoF7obgVQFAs5HYf3RqCribdNEhGEZPPG6wNcp5DC1UvrpotldqwHZltFS
+dPPPUT5S/kpcRtqL/bilPc4Pb7qKQR1Huacy1ca1DAEP+TvhvgMmm6ExVAYiV1TA
+ZBfOUYC+Czn7ZOGJ8Q4AN0yno4IOsmwBrxaUx9+38I3rb04nLhAAyUUZZGp4Zfj1
+bJ/pOxZ2H2BqX3fstN9tVvZu47D2DoeF6T6x02HIV7oVQ1/haMnjP7rtsWNjrl32
+RkMkbvwqsnQwcZJrylQTxYuzy4IGXak/tlEcesspsG6O34pvPoZ1c+q92jofPOzl
+W2xnSTtKlt0Fu/m2WNg6s8tfec7emi69J6Pl+XMAmQkihXF+j4QuXYzSV4G97W2t
+AMxo5d3GIQ4UzcxhEvTH5s/S12iGT0xfy6G3yEqTzFgByA94BWN/plzgaaV0bNDs
+uK16Sp6c8gldHs5o5uI9wtJa468dj5Ll9zJZOdC3UN3kGDY1T5jnctnfgLpU081c
+tfz7tr1URFiq2LYlxpEUC/OUFyilHuM17RacLLAM6+9s2bYFD2uAOfQyUJaUD2z2
+/9I7WRaDbL0DMhn/QZPdhLZSMuuoaBEu99NWBGHMfVpDmmPQeBLTS5l4Q7lbrf6f
+zLdb+3Vuhifl6w4UTPC7Wb4qowjtIiaqdtqpsqm2LE62xsvd230wWT+ipGhBx/B4
+soOh2lVXkEGL+nEPTljBxkumkZOTxJl/EC3cFEtVKGCw9Rid8nUmc/v9LcKaJQDO
+AZ0oAMc8eSyxKGaW0pePlHn8k5cds1w2ZsSCXuDGNYHATp4Gm3izEGLRsex1KYq+
++dysRCx2EZMDsaCAUbXNrt12FNhgzh+5AQ0EU3JE4gEIALyimTnOi0q1WouENJKQ
+RlpBsZ25Cxp+kc3Ttws65cFYV+3682KMRelDvZ073JRlyMbEmAsxCitrmsKfI8+9
+3TVg9XS5R9RynMpRiyi1m6sHLbeXG6LaWaT3gyzu9VC6EGoadf+l8/emQD2WeDJl
+fHJr+QivlGM7hdMvjewj7Wp4+x0JclhNsgjYEUkF4ajy/q+A98YyGFybpOwqRoLv
+U5WXQGxuh0LiUjvpLyrIEEFcvASCNOAJgpN92G7nsNDsXpWjwmUDYwM9uJipbM+M
+kyWJykiaii9tYg/AzsFN9Cr0+w07IEX8IfWmY9iGjOC3eISwX/vx/jtrI9Mj8Ei5
+RKUAEQEAAYkDRAQYAQoADwUCU3JE4gUJDwmcAAIbDAEpCRBi9J50fZEbYMBdIAQZ
+AQoABgUCU3JE4gAKCRAZcP/x+neS2tEuB/4tvkwlS/aJJles7+n9gzlcWHeRHECG
+0zTrmQr99uTvBaYewB6gSJK1YrM/ocOH2k6e2EAfYw+bgBXcOpb3NQePZ4vLCAkl
+6J40ktwyWBOs8uCAdBX5Ngkxhiz5oNaxQqnBU+xfovsbQJrxj0S28DBXGDR6npI1
+vqjrsYBoPeo4YZu6pUAp6wW+7eC4eHVK/NIogw0XxA3VRzwvzLK+aUI5RbzyWwYY
+PDfzXrQRqeUqCF2bnnsXjDHxfqjfoWrnK+ATGFZgjbF2wHhPDRRHqAx/ggn8K/R4
+rvhEKFIqxQHrfZgQgsWregiv46Ph8DBEGcniqeIS5kRQi5y71IB7ndb9cB4QAJIb
+BQaB5GhsjVw5bQTsZWMDLoweaR4kqP2eXgx6HuhRw1XcP0ZNNv5//L7tv6tmeXgb
+RO17JzCw8+g2ZFq8Wbd6v9MFKefh+FT75Vvb9jV6h2NtQlteKQ9mpXVpcxZ0pKDb
+hPzrjcI3Xo/zjYHFjTk2VAxWVPtamBN2eCGc3ggWifYnmuCctxHlTZNyDyrfPwJ+
+Vj8VuTsjd/7b8VVLd2lpzF2m9M25z4zNgxzldAAr4F+bIqjPJUVY29pZFyKKqcBG
+zCYBTlB+yiVqjXOyyYQKwE3nkG7UrlsQdQEI/wjqBJtDpQ/w7NLPKwx2633dVQAT
+omPKujL3klFlIdof/5+JUDzmg2mC9ATCJ4sgTAIodo6hHACQT2OuKmAHuCI1oqBs
+7A3H1pPk3HZVKy7LbdQTy7QTzpBiUHklOKWlWj+ugWeABTZZK5U9cm9vq2mT+rcB
+Wu94GriSlDo3vobC78nMDZc68eV18onQpWTlzRsTVVfOjll/8ddtruVkCVhtfRxE
+ANQIfZg7P8oNxVDAX+jIsTDxjh8r+S1wsUQcTNop6JMicDbxrBRB13vYIY0Jg4+Z
+9WUiKCaM69kbgcJ7tTp0skcJ+rYcjVkTz2/P33/FA8BMDUwCR2FovRnmq9pVjAAP
+hS0eN8yqaR533ire0Ur5Vif6+z4A0ifVTZ2hY96B
+=nEJu
+-----END PGP PUBLIC KEY BLOCK-----
+pub rsa4096 2021-03-12 [SC] [expires: 2037-03-08]
+ 4AB45F1CB0D292975C6371436E2A23D806B6E6DD
+uid [ unknown] Matt Butcher
+sig 3 6E2A23D806B6E6DD 2021-03-12 Matt Butcher
+uid [ unknown] Matt Butcher
+sig 3 6E2A23D806B6E6DD 2021-03-12 Matt Butcher
+uid [ unknown] Matt Butcher
+sig 3 6E2A23D806B6E6DD 2021-03-12 Matt Butcher
+sub rsa4096 2021-03-12 [E] [expires: 2037-03-08]
+sig 6E2A23D806B6E6DD 2021-03-12 Matt Butcher
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Comment: GPGTools - https://gpgtools.org
+
+mQINBGBLvGsBEADHfZXD7feUfyNQoCwmDYCmygvIGKJxGkgiyxecbGieggOGVbNy
+1N0F2w/HHHW7uanlCsrB/wKnSmkNxkp5m1vfcmg+AorjshBJZCjvNZAX78yOGOZk
+7UQivwPhRWvJ8fnzwTd7ls7bz7mggPT0wVuBsrHtr6mfioxxmVq5ChTHKER7uFRL
+23bd11x6hurfURgDuYPrCaLyrvHmQs7CCe2pxJVLFH4kXyzNoea4jZEbOPGNLXB/
+war4QJaXtk9rLqEQ6fp0iM/s7N61eEcrj18HDLj9CTUB66UMTlDKUZUV+36502Ae
+I6lrrFSx8KUvK9fcpdcxXKYoaY5t6BIBUS2JK8fCrTgyBdTPQ1J7z5N4GvwYonf6
+FBsQpC2aY7wBAqFEbZ8xhdB/A6gY17542OSDhcto3ovdrbLkPaPKHUDz9WRDdR1U
+VKAkNeqaf6h00cyEjM/IN8+Ni+Bwz1hUrwN/9qcKkhsaJK+D2z/f+Fq08+8wHm7A
+rf/azwtiTT21S/Qwmg+ISkmHJiUueuL9IIIJv0tsgxZ6MsYF9tP2NxjBcmtketTE
+h/oygKhFDiK8ybSRftCatEzJuf53cfe4fNIJpacUbD/QM8tGgwrXOpAz26Flm8Ki
+drw6re2mvxnDKOua7dyukq+JHR5SBEzKv8WmaNEgzEDxPdaMa6+7mLcVOQARAQAB
+tCVNYXR0IEJ1dGNoZXIgPHRlY2hub3NvcGhvc0BnbWFpbC5jb20+iQI4BBMBCAAs
+BQJgS7xrCRBuKiPYBrbm3QIbAwUJHhM4AAIZAQQLBwkDBRUICgIDBBYAAQIAAMZ7
+D/42lpQArXi7unDfG1K5dksGWv50S8dPy93APKZkxSqmO/LxMxOSUUq6N5NSh5FO
+WV3o9Za0u0IfKN+cje4ldkRGaxAEmoPLRaB26lztv9AzkaBUh6c4q/MsUiuExJMN
+l9P7los6B8kCtxddq3TjTXf1FVPxT3U6Orprmh9BNsIdw/N9K0teUJjEBl5ui7i9
+WqVvbbTy3I34ae2tCdN98iwHVpkfm/VYuvqtKcgzv99FcasvAWLPr+z9fG5iOx54
+WthG2UCXf4k75W8Ddd5TD8n/3JaVZX8UUq7EiURRD2fFtqMce4PCDYia2MZybjio
+qJOvxMGOr981JMI5uN+2gVKe+A2p9s9ittvHtnHQxVWd1O+CGFQg87+js+0BB4hi
+WcYGdDPh6GhpYx38In3tBHxzIfCitvKMOvovFpV1j1kYaMCENrlaO2C2DWHALCX7
+unpvrSb3gNnCFzB86+PJkwOSRcWxERdGY8soZacTDoTqUrwCraR4/KgZk6JK8jKH
+t3w/a9igvwmzZuUrolAiv41zywDupl/wYOA6uUmvi8GxWCGZ5sHRuLGxm+Tk2QyA
+QA6seNaun7OE4gvrTtuA/2AYAy/NVqdVdjHN4oOIFPnsoRfW+ltvWsQ2fBsyG0mW
+A0JT7aicKCa8aZZ6ZQtP4zbKMYxJW4n042hiYcgrdCdumLQpTWF0dCBCdXRjaGVy
+IDxtYXR0LmJ1dGNoZXJAbWljcm9zb2Z0LmNvbT6JAjUEEwEIACkFAmBLvGsJEG4q
+I9gGtubdAhsDBQkeEzgABAsHCQMFFQgKAgMEFgABAgAAWkAP/0KjQDI1HyFIT5GG
+j0yufkcmRZrsXSy57eUpfL1RY1OGqTnB/dS4DL6OJX1GaXOlfj3lwjiDl2Y1pHAk
+oncv6n5AAXWfvWxkDJzxqyo8A6FhS+fOgoXaKBPAH5/1CgilNzABNIlRmHwJ4uAw
+TFP8v20Ug6gqaW9lSH2PXtZKKf+gH6lBB4YwNnzehnIteX30PWhhZ1SUib0jJCoc
+6H156wo7G6INzZepg+hqI1ly/XYg/XzL7qRvIREtALOs/7qU04+x1ny4Ys6G1ZAP
+hI0sxfcy+qbSqzb5+7oYg/UwrbwIhs81HaTyQLa4FOYKGPyg1GkeJpzo9EENRgoy
+u1Dmd/7S/Zbszj4kakF7INMByolvbHvl3FMLAILj6DwFxakI5kd1V9XemYPSRoLA
+wzeUlzYHrK5tD1Q+EdmTGBpmVghFuN0ov/jja9tInF/ZXra4GdeCdksatbkUHP5p
+xb8BCGmJQtJJ0ncxdn3zwJSl+5qFtdaTmMrc9p20QYiwKuMupHL6+hkdhwncbRux
+S8x0dUm4Fn5EnEcejRiLu6Xs6cmUURZyWXEkcUW2i3+cvj+1dkp/HPkStWrBceyb
+VarypHX5BhBGThdWiDT/Gl6W7uycFGm8kEUF9bGgSvly1clwRskj0cc6IZnSXmNq
+/+efhKkDyQC3krStcwT2/HzvtLgDtCRNYXR0IEJ1dGNoZXIgPG1hYnV0Y2hAbWlj
+cm9zb2Z0LmNvbT6JAjUEEwEIACkFAmBLvGsJEG4qI9gGtubdAhsDBQkeEzgABAsH
+CQMFFQgKAgMEFgABAgAA3TIP+wSoWwwicctBVV0Mu3zX+9TOC/QT3pf95la5PgIV
+fu6S97h7ePphk0ORRFe4qW5f7IM0iXWTN455h1ngnZGXn5tG3JtkUY616AnmK1fJ
+MHRZRCJmeD8u5SzCCZGBlL+n3Hp6gOR7q14hhgkeg4oPiFKSF75LJos4JYEeCIYN
+WyUa2yjz/glnzrA/zMeRQ+acRXj/Aa1MlwiDukxpIaHzB8U0xm+V6AgWdNzP7T8P
+Daxidjgkjk3GGAK741z37avP9MFYUTd/Pq6Z2uB5xFuaB2xD5gJcvVYMBJQtYmtt
+AmbzEZwYsROmkfCmS9jmlUFaMbKdAl2do/0feX7Hw29fhVT23tYD2d9Zm39CFXOm
+tIb4SDcteyqeIOhQkLZgKLwJiwXkaLsHPVZlQljzvkQlW4qRGvzxyCWWr4PZovQG
+ZSyFcO3XJk2hswijbhM3rQOxtOL9GJ9U+khnghLfmet5otSl0Gm1yW+ub7AynXi9
+JT+kMv2QZfPP+jZjIeBLC3yItI6K/+0qI53JMswKDvQ8qnmeVj++dquSSnSozXpa
+npqxrjxAhZ905UrPKqzxd9lJUegfB4khUBC/IuE7HTkFnZz/I+r6IfJ031YZK/lr
+eeCQm6DMvoehR+4vgo+APdvclMmmCWd4TBTFBhtOZvLX5HfMU++YZC13AeDUmzOp
+edRWuQINBGBLvGsBEADtGQcj2nLThgu9QBKN7Q4TCwywd/RTyJCZm2aq6NVs2iYP
+NGd49RmHdzYbiSgOaSSIYODevDB0KFK0/D3YMjEE5oBpf94MxGDOfq/tVEVOjiOR
+rwW7YaKGpxoD0q9QB+CI4+w3Dhu5Yiaiun+carXPfhxaOvoYq26heLipZ/cztgRK
+16bqoAn/Kl2/yY3kfN2YRBgHFaLwkKFAKD39QxbxrCTB6YuGLhGOI+BLv47WlECi
+TnSM//k80jQVEjuvoXZaFQO0/A8O7vIXF2TarVKO2I2HPlCt4q09ub6rmmqn2MGj
+2gwYR1lv1vQZMVevJOe+4gwGKPCicIbp+JX2CN8n9lorS/PlYkUSNZehNhEaBKUK
+yl5WFY00oGtjYKwRwStN9m3JwNPAQES9EYipGi4YGdsrTa+MtsIZQdnbaMVA9wlU
+sNMyoTBjaGr79Gu4cPLISy3mNy6LRivlEeE3pxcziUj3k/6dLEUFgTfgmH3dGJ2o
+c1fqF7RPJ0hvzqh6pG9lx5nkUtpG+s8FC7hDDnuqVXCS+4rPe13sEFRlM6l1YAiC
+hXeApBhvpqB71ydiVR/yHua1H9b49+1eVeWzfF6XPtUSSCkwH7W1ZWx+8yUBi6zz
+GUgmGNJ4m0GglCDPXsP3w7WNJoPAU15LNsi5z59bjGou3OkI5czPTKF7Q73znwAR
+AQABiQI1BBgBCAApBQJgS7xrCRBuKiPYBrbm3QIbDAUJHhM4AAQLBwkDBRUICgID
+BBYAAQIAACVcEACY7aIw03LMedYRsWogFn6IkpdbqRVEYP5Zjglky8MFIOQv81j7
+Zg99BB4V0lyvSMSlFmom4BE+Sq6EO3uuqC7WR+7GL3p92AyIF9EJIOAg9FFH8eRn
+jk1jA12Zdx40V6okWpy3C/OY6D6du17G6AJ1NExfSWtbxXknFAbsv2azQpJ0ATdK
+xEPun0PGlOhsg+Bu33k7tQ2P6/4dJT8c2e8QBy/kedj3mGhrb9Ymy0VdOn12P7kA
+oVl9TvvQV64f9YSToQzDjHTSP8dxiEV7a8SMD4cm/7sTLF1a7LW8lD405jxqll8a
+dtj4+yY/rfSN/rDVoTDBkc6habYL0G97j70o02nZYJtukkIQvSYdYARE0OUdwb+y
+SZWuTxT340LDJHUwmDpFyk6L6MTaCwlFPoi4+0FDpjdOngEMjMHe92vWT1gGhk6B
+uOKbA/wFozjv87y8T6bCJ+dA1/TqhUT7UJBKJozXpOpcYapI59ZmTVu5V7WwFJvK
+JlWm8DSDpOI75JRRy3DTX4UmYg/nRX5pfLPsxq2JQW/QnjPLPJ/y+5Y++b92wWrP
+AirPev6SluPhLJ2mswaK3THlhOZulKO/VIEJ6g50m5Vj3hdYf6sR603yK9rP+3iu
+IagTQt2SGfW3Ap0RO3Yt+w29BpZ1CZ5Ml4gAYkXz0hiiMnVRhlcLIOHoFw==
+=h3+3
+-----END PGP PUBLIC KEY BLOCK-----
+pub rsa4096 2018-12-08 [SC]
+ 208DD36ED5BB3745A16743A4C7C6FBB5B91C1155
+uid [ultimate] Scott Rigby
+sig 3 C7C6FBB5B91C1155 2018-12-08 [self-signature]
+sig 134FC1555856DA4F 2018-12-13 [User ID not found]
+sig 62F49E747D911B60 2018-12-17 [User ID not found]
+sig F54982D216088EE1 2019-01-05 [User ID not found]
+sub rsa4096 2018-12-08 [E]
+sig C7C6FBB5B91C1155 2018-12-08 [self-signature]
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFwMUAcBEADplQ+msULZ4kt01bXDvZ66MSVe5Fi1cPqAa/5/ZtaHZWSKrcN6
+K0cadpozJp74HSZzORLYV/50EGwXU+OG1dFe73FbsTCgQyLCbh/OjT+Exq553g2D
+/IB4/6/vCs9XXiYdKot3P2NsHI6RqeGqgW2IkFVsMXO2Lq1XKFTWQniO7PhHW8nG
+Trub7HrxR6i9KHtVtxLs+XoXY7Jj0gB7WyRkYjHLXti4VtvcBq0WK3pSgEIy5MwR
+WDepmle8n8EJZrh3T323YM41MXKGT00wCSKMbSHJO7QssiOda9XluC175HDfihm3
+q5OKV2ZYIbChsQxuJz1Y97hwZ5KkLn//W2pxTdOElOcynFpQNx7D4b6UTP2DCCRc
+n41SiDIyHg25cUXXAkJWlYRD1koGfLBipJA0DcKqlh3W+8zNfngZ0PSxwFtJwSre
+Zx5I5uHAgKO5nS4hLxGYUMv+MsSKHMYR6qkqFg1Eal6tTa68bPFTbzypDmMUKXZT
+sZtZ79WoIUU9D3O+F+Z9rxwaQ3Dv7J49FdbLPB3zqENqX7OWHZ38m5dsweTFhQi+
+4AaDLEMiqMi27SfPkF1/+JDc1SOoLVo9QgukqhFlz6qEIbud7LUfpeKBRNJsfdr6
+HE3cH8MWHInnlJ49De1oLl5bwAwScQig5jmv5DZxN5qdTg64vgoreBLgsQARAQAB
+tBxTY290dCBSaWdieSA8c2NvdHRAcjZieS5jb20+iQJOBBMBCAA4FiEEII3TbtW7
+N0WhZ0Okx8b7tbkcEVUFAlwMUAcCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AA
+CgkQx8b7tbkcEVUIPg//cK8zaAOUIFClQ31TeQnwHsJwcBMeRRegUjt4cIYSjv3F
+oNe5EQKxgpwT/1iegvRURjxHbhSE0paQ9wsi8Tr5Ygs0DJLDp4PJVZtVHJ5qB3XT
+VuCiqyb90kIG+ok1m8FHBwpeU7o2a05InYwJLWowBQTulhrS4VKzrPs2kMu4z5Am
+5sbs70f0hRLUXGmEAFUnZm+lpF48/PCMNSPrxgZ6rtNqtXq8oCyPNvyO8Ou130tv
+TSoHx5Tobz3RnSeDXpidC/Z1rQGq4Spi+a0WwC9BCArDvtOrQBoeQFgdpy5OsE/t
+QfQNEffyLRLlAZgXewOC+PeF+xrz+ku/rMBlt/h0h5UcMS3joUHTRZyQoA5dzWZh
+K8pMXYppHXJtxl0fNSG1rH1vsTu+Mjxmd9eBwaDxnBFwzBOukiwSUgW1r2DyZ7S+
+25ZW62lcz/E9+sgWV/PGR+YKGbsDdnraPdQEc95k8NQnYsX/7YO/WXJfP7Cdqd8X
+dAAVW6/dGUp2i4QogEYk+GeJz3rJJK24Wzgf2FJ32FuSH8uOQUH93h5QZbJAD+pD
+R2qZAZjCHYAgvSuqnmDlefvG95TwIVzy3OFWbLwp6YyBdyZrdpTafER4zk17f0HF
+xS3z3LG0LsEWHShxccjzjoAUeppAU0Qojag3kKLoCwveambIdjXIxliBr/S2VSWJ
+AjMEEAEKAB0WIQTlYbNM52c/3RO87D71SYLSFgiO4QUCXC/89gAKCRD1SYLSFgiO
+4YfMEACS7/90VvyhbcQYB/2N6dYYuVd4tMRpM7jgWO6LqDDh3C9S2NI2bzwxGFBR
+nqV9N6fD+yLug/dtAPq5D4i7AXzRqPA8XQ2ky/1EJOv5EmOl6NYnUZafEBMDMai6
+F6XOji2JR7b6xlRC7GwzUdMR1rn8eyCxuJobgB+wMzfcSAnaDsH1qU7a5ohEewtQ
+IgEcLLmiLapCqNXm0l5oIYQQypbRrogw4ePw8KcLDreRtPCpPdLIThqdfkXLQ69Z
+ZwGd1+Pg6xgu7MJggnNq2RYortW/Jx9WUFs0Jj9c+Yz5pPeQmc1jjF4uHvxfi0gt
+wFqL+bu+HfxI/Hiudkzzp3v+Llmjk6RypJowLLk5cxMxv7XMfK3cLLDO5uw0pPiq
+l1f4u4P3YeRLv5YFh8SRrk/PadEqFr10mIOmreASq9LivJkhGK9eqQ0X7zQfHmDq
+S3nw62ousqlmEld39MIAMn02Ak9i9CD2M3G2O5gCAcvnFlWblx5CN9Pjc6kOb52W
+eDMYisUmKnIkChzvAlfh8PhvQfLUpKN1AmzUOcXJokpu1Yx7OGaoDnfpXSHS8fe8
+pu0jMsEhlqsNxNS6y5N0tWjxjYg7D1Qpeq3O4oft4HiO3ZfMPI4V1tatfeohnjic
+UJkpVsS4RZybu8aqNGc8i/ggagiWc50oydK8Lp906XfOcEert4kCMwQQAQoAHRYh
+BKuiUpWY9mJsQg0zW2L0nnR9kRtgBQJcF85IAAoJEGL0nnR9kRtg0OMQAKqpxGtA
+uaMknrZxnxu+y4FXXrX/W2TLlF7Ns71upXhitwSWk0pVJi+OZUvIGj+8yCj2MEg6
+o5qBJPyo8TuwIh1YfxBYigY5Hmt/uVVbKBM/VXyKDxzGrSts+r73cxf3BpPfyANB
+a90LjKHvFv0czu5sfiRMHU9GCOehnBukdZ9PhOOcRuUlHoHlSf21x9kxa1tUFPVJ
+eeoVOOnONDK6Dzmi6GoGRTq3X/HZ2JjhcdYSn37z/KxmZ/SNiwat60gw5zJHTh0a
+dM54hwsdsp48/avpF8BlTgXsoH5dVdbaOTyNGXBbQaoL09FY2x2eXaFCP3RbMWK8
+TpWh0Ijs/3JLFJ20jrvZqsmxmwIX25TmBb4UoR3HSEHFasYoIxv/me6V4oB7D1SJ
+F3q5scPnRzV3I5wCRljKJcNHQbNb/c9Xnt5UVnLHRtkZW/NUw93H5Bq15dkq4U6i
+prC0EgjfiWy2Esd9TiGb5kRuN0/duUY/d7dewp1tJGDRZACwWwHyYkPiPA6gzQfN
+83yk29evZeX1rSBslnXSLzuwhVJc28KNZCeEcC2o1JninqfqoYnypgSFOS5BK6ZG
+5YJD0gkKFX+ImC8OSKsIJ6QyrzyOBb7UwRcK5qlvYZGYgrt+B+mFDpxWPkzgpfMe
+CvE0nUCgKDNg0Yvhr5w9JhK+49Qn6TuTADMIiQIzBBABCAAdFiEE2o9xo8issez5
+FC4eE0/BVVhW2k8FAlwSzMYACgkQE0/BVVhW2k8jEQ//cV4+ke8eHhwwxCPZd+lA
+mvgzalwSiZZ8H0EgAB2cK0LXEFe07XGKxe2tDkf5FDIQcNm7sIk0OcLhJzYX0p9P
+A7ZzO2tZu8QuZlUqt4VnDL7B3xeW2Sh3STEmw80wubkgauRRysflAHIw3edchnIX
+9Hq55MLBBAplQFkpA/Y7arg3Bn8v/8YQlULc30xRO8EoyxD+zyl+Ic+xFtFUxNc/
+2dkqkdjq0Ohq89wTGTy0jaSI8INhZTGqR0cEYQPKZD+PXUUym/TaPKJKXagqxmu2
+XXBv6QPp46a58viBxMj6+fl1JJH3DxNF9YY++7Xp8CckA3TKDA9hxOJK6wbrTzDB
+DB+tjcwR4ff8QLv/CV3psyk2fX9CGCBdr0k0SCMQSFcHM8pKagkySjG+EJ1Tcflf
+UDY2LD33n2BBIdCaQTu6u+Zeqq2e6R3UXm/raXuGrxUxzvOQBIhb7XaC6nhfDu8k
+07yN/Tjwp+rgHt9ouH4pfFbGpvaIomBJq6pkTOk9ywDtHlSatqoVkbrbKpNzmwf8
+z7pt+ICtKqAAWQTPFPD83h6elP26GKlsyXyhT7HNmKUHaXInEbaD+IoCJ+wY/O3i
+gHV2Dn4QllSBSBhYlhl6utmP1zqwJJ0rI39mPS+nMXOhGB+bJ8EzAF/3N5J6y3TG
+FnMEJD8qgdpDEgztjHUSAy25Ag0EXAxQBwEQALqYikkk0Ur4gn9PjxtjW4OxS5J4
+e/u0UyOsv8znFM7CG9Ndha9rQs/7c7NEf3e+K6a7XqhzDKtyGAFVFlZArxbe6X8e
+UV1OidOaH46z2vmtWOJYHIupXHlXS9LeXNO+pJjCNEAzmHbGjpkjGtNz6Opl4Uae
+LoMFubRViXhvD8pBF72dGUlp8m+U4yeXJ03/q0sR94AdTA+1OzGd2+1s7PvL5XAx
+BwXqx9gccMYhrNRPyBo/yRA+Wf4ewwluIVBMi9cpR1sNF4ITIYCH4i3mf4NJvg7P
+0sPBY0s9k2jvHGLpINbFk6PkMtaRpqmgw695szTz16Gp0j41hRnEh7KnGneEp+SU
+6A8A9UGnjM9upR/d1591I5gT2U+6Q05B8RtJQUmd3HBeHEBgftjgBR0tstH8qeac
+Xc4V81OGlf3tdYP/LVggIlv8V5cdSZ4Bd3BXYWj2TIc2RmwWA2LWf4SA6JYvhEfp
+OxOzzphlgPtZF0kneEgV/b/D/KQqEk7MyZl3gN+LNk+zX7VJ2RDeUiUnoxZDFJGi
+jsbZd7yoDLkYvGiFkcQXORs2zbucweVXXK1Gyskj0c3Ih4syYYmKS0WJHMEozJUl
+b81oa7kSc2XFArcAnPz2c1yErfzcCAlg/HImkZmAgVqAfuRyxZ426F7CYucHAOcE
+N7bpIrOkqFp3uUb3ABEBAAGJAjYEGAEIACAWIQQgjdNu1bs3RaFnQ6THxvu1uRwR
+VQUCXAxQBwIbDAAKCRDHxvu1uRwRVbFaD/0e57rP3H+1rUoGhRO0oeIveQqIdd9V
+LKXUYuwzoK3HLg3BYUDEN03RS0KyNMYlHpnjyFl5L2JuXqGiJd/eu2iRXCwUMRb7
+SPvH7gypa1NUK5te85+Y8JhXOMjwZkly3zS2nRTyvHxMn9EV7NPlT/oEVu/woPrM
+o7XzmPChuvnk8pLWBW04wg5G5atDbu5+QVZlecNCrtRYJg/Cd8alKpJSeZX7y3cy
+fe2P20Gv0UOipKWaAFL55zFLbmu7HWVumYAKs6T+X/pZqmcfMaVwodIBeRJxRIvl
+PkrBxljahaFGOdgJ6FVnmO34uoYcpd019NEr9gbPoaFWmw37h3Tnc6U5sLAouaV4
+AERWmwBPIVTizYt1h8Qj4qyBhJ+QgZMjPlRqHWPZogHfMXDQV4gw3jgvVWTMVp1Z
+gDQgrFNbw02CqPwgtFn15VNwAv/4vbyToRhc3pG54e3xwdAFM8R2uM9lHJKuHafW
+7aFUk7aA20k8SG2BsZalb6tZLGxgcZOwMdO3lnLMPu1I5oOLl4cVoUIRZxtgmrbQ
+ROaGdXGIgO7fJBXXogMxjUGhMola+v6ioFQpbOnJRAr2AUVBCrrEgHoodAufGTDu
+nk38BkgHg3LHjCbCNEVkSK2TMT69A58iwpY9WUQlphsiz4WBpafSPbv/jSlsm7uK
+TNWtbFGBRpJyEg==
+=w141
+-----END PGP PUBLIC KEY BLOCK-----
+pub ed25519 2024-07-09 [SC]
+ 7FEC81FACC7FFB2A010ADD13C2D40F4D8196E874
+uid [ultimate] Robert Sirchia (I like turtles.)
+sig 3 C2D40F4D8196E874 2024-07-09 [self-signature]
+sub cv25519 2024-07-09 [E]
+sig C2D40F4D8196E874 2024-07-09 [self-signature]
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mDMEZo2C6xYJKwYBBAHaRw8BAQdA8kCWaI+FlCabcTw8EVeiMkokyWDalgl/Inbn
+ACcGN1e0N1JvYmVydCBTaXJjaGlhIChJIGxpa2UgdHVydGxlcy4pIDxyc2lyY2hp
+YUBvdXRsb29rLmNvbT6IkwQTFgoAOxYhBH/sgfrMf/sqAQrdE8LUD02Bluh0BQJm
+jYLrAhsDBQsJCAcCAiICBhUKCQgLAgQWAgMBAh4HAheAAAoJEMLUD02Bluh0dyYA
+/i7RB6m3MXNA8ei7GD8uQVpLfCRgEFsqSS/AzAOu8NGhAQCbw1kWL3AUll7KKtiQ
+UE96nhCk+HnkQeVkWYS+MZ1tALg4BGaNgusSCisGAQQBl1UBBQEBB0CCA6Au4krL
+YinQq9aAs29fFeRu/ye3PqQuz5jZ2r1ScAMBCAeIeAQYFgoAIBYhBH/sgfrMf/sq
+AQrdE8LUD02Bluh0BQJmjYLrAhsMAAoJEMLUD02Bluh0KH4BAMSwEIGkoQl10LN3
+K6V08VpFmniENmCDHshXYq0gGiTDAP9FsXl2UtmFU5xuYxH4fRKIxgmxJRAFMWI8
+u3Rdu/s+DQ==
+=smBO
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/helm/LICENSE b/helm/LICENSE
new file mode 100644
index 000000000..21c57fae2
--- /dev/null
+++ b/helm/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 The Kubernetes Authors All Rights Reserved
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/helm/Makefile b/helm/Makefile
new file mode 100644
index 000000000..a18b83f0d
--- /dev/null
+++ b/helm/Makefile
@@ -0,0 +1,244 @@
+BINDIR := $(CURDIR)/bin
+INSTALL_PATH ?= /usr/local/bin
+DIST_DIRS := find * -type d -exec
+TARGETS := darwin/amd64 darwin/arm64 linux/amd64 linux/386 linux/arm linux/arm64 linux/loong64 linux/ppc64le linux/s390x linux/riscv64 windows/amd64 windows/arm64
+TARGET_OBJS ?= darwin-amd64.tar.gz darwin-amd64.tar.gz.sha256 darwin-amd64.tar.gz.sha256sum darwin-arm64.tar.gz darwin-arm64.tar.gz.sha256 darwin-arm64.tar.gz.sha256sum linux-amd64.tar.gz linux-amd64.tar.gz.sha256 linux-amd64.tar.gz.sha256sum linux-386.tar.gz linux-386.tar.gz.sha256 linux-386.tar.gz.sha256sum linux-arm.tar.gz linux-arm.tar.gz.sha256 linux-arm.tar.gz.sha256sum linux-arm64.tar.gz linux-arm64.tar.gz.sha256 linux-arm64.tar.gz.sha256sum linux-loong64.tar.gz linux-loong64.tar.gz.sha256 linux-loong64.tar.gz.sha256sum linux-ppc64le.tar.gz linux-ppc64le.tar.gz.sha256 linux-ppc64le.tar.gz.sha256sum linux-s390x.tar.gz linux-s390x.tar.gz.sha256 linux-s390x.tar.gz.sha256sum linux-riscv64.tar.gz linux-riscv64.tar.gz.sha256 linux-riscv64.tar.gz.sha256sum windows-amd64.zip windows-amd64.zip.sha256 windows-amd64.zip.sha256sum windows-arm64.zip windows-arm64.zip.sha256 windows-arm64.zip.sha256sum
+BINNAME ?= helm
+
+GOBIN = $(shell go env GOBIN)
+ifeq ($(GOBIN),)
+GOBIN = $(shell go env GOPATH)/bin
+endif
+GOX = $(GOBIN)/gox
+GOIMPORTS = $(GOBIN)/goimports
+ARCH = $(shell go env GOARCH)
+
+ACCEPTANCE_DIR := ../acceptance-testing
+# To specify the subset of acceptance tests to run. '.' means all tests
+ACCEPTANCE_RUN_TESTS = .
+
+# go option
+PKG := ./...
+TAGS :=
+TESTS := .
+TESTFLAGS := -shuffle=on -count=1
+LDFLAGS := -w -s
+GOFLAGS :=
+CGO_ENABLED ?= 0
+
+# Rebuild the binary if any of these files change
+SRC := $(shell find . -type f -name '*.go' -print) go.mod go.sum
+
+# Required for globs to work correctly
+SHELL = /usr/bin/env bash
+
+GIT_COMMIT = $(shell git rev-parse HEAD)
+GIT_SHA = $(shell git rev-parse --short HEAD)
+GIT_TAG = $(shell git describe --tags --abbrev=0 --exact-match 2>/dev/null)
+GIT_DIRTY = $(shell test -n "`git status --porcelain`" && echo "dirty" || echo "clean")
+
+ifdef VERSION
+ BINARY_VERSION = $(VERSION)
+endif
+BINARY_VERSION ?= ${GIT_TAG}
+
+# Only set Version if building a tag or VERSION is set
+ifneq ($(BINARY_VERSION),)
+ LDFLAGS += -X helm.sh/helm/v4/internal/version.version=${BINARY_VERSION}
+endif
+
+VERSION_METADATA = unreleased
+# Clear the "unreleased" string in BuildMetadata
+ifneq ($(GIT_TAG),)
+ VERSION_METADATA =
+endif
+
+LDFLAGS += -X helm.sh/helm/v4/internal/version.metadata=${VERSION_METADATA}
+LDFLAGS += -X helm.sh/helm/v4/internal/version.gitCommit=${GIT_COMMIT}
+LDFLAGS += -X helm.sh/helm/v4/internal/version.gitTreeState=${GIT_DIRTY}
+LDFLAGS += $(EXT_LDFLAGS)
+
+.PHONY: all
+all: build
+
+# ------------------------------------------------------------------------------
+# build
+
+.PHONY: build
+build: $(BINDIR)/$(BINNAME) tidy
+
+$(BINDIR)/$(BINNAME): $(SRC)
+ CGO_ENABLED=$(CGO_ENABLED) go build $(GOFLAGS) -trimpath -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o '$(BINDIR)'/$(BINNAME) ./cmd/helm
+
+# ------------------------------------------------------------------------------
+# install
+
+.PHONY: install
+install: build
+ @install "$(BINDIR)/$(BINNAME)" "$(INSTALL_PATH)/$(BINNAME)"
+
+# ------------------------------------------------------------------------------
+# test
+
+.PHONY: test
+test: build
+ifeq ($(ARCH),s390x)
+test: TESTFLAGS += -v
+else
+test: TESTFLAGS += -race -v
+endif
+test: test-style
+test: test-unit
+
+.PHONY: test-unit
+test-unit:
+ @echo
+ @echo "==> Running unit tests <=="
+ go test $(GOFLAGS) -run $(TESTS) $(PKG) $(TESTFLAGS)
+ @echo
+ @echo "==> Running unit test(s) with ldflags <=="
+# Test to check the deprecation warnings on Kubernetes templates created by `helm create` against the current Kubernetes
+# version. Note: The version details are set in var LDFLAGS. To avoid the ldflags impact on other unit tests that are
+# based on older versions, this is run separately. When run without the ldflags in the unit test (above) or coverage
+# test, it still passes with a false-positive result as the resources shouldn’t be deprecated in the older Kubernetes
+# version if it only starts failing with the latest.
+ go test $(GOFLAGS) -run ^TestHelmCreateChart_CheckDeprecatedWarnings$$ ./pkg/chart/v2/lint/ $(TESTFLAGS) -ldflags '$(LDFLAGS)'
+ go test $(GOFLAGS) -run ^TestHelmCreateChart_CheckDeprecatedWarnings$$ ./internal/chart/v3/lint/ $(TESTFLAGS) -ldflags '$(LDFLAGS)'
+
+
+# To run the coverage for a specific package use: make test-coverage PKG=./pkg/action
+.PHONY: test-coverage
+test-coverage:
+ @echo
+ @echo "==> Running unit tests with coverage: $(PKG) <=="
+ @ ./scripts/coverage.sh $(PKG)
+
+.PHONY: test-style
+test-style:
+ @EXPECTED_VERSION=$$(grep GOLANGCI_LINT_VERSION .github/env | cut -d= -f2); \
+ ACTUAL_VERSION=$$(golangci-lint --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1); \
+ if [ "v$$ACTUAL_VERSION" != "$$EXPECTED_VERSION" ]; then \
+ echo "Warning: golangci-lint version is v$$ACTUAL_VERSION (expected $$EXPECTED_VERSION from CI)"; \
+ echo "To install the correct version, run:"; \
+ echo " curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b \$$(go env GOPATH)/bin $$EXPECTED_VERSION"; \
+ fi
+ golangci-lint run ./...
+ @scripts/validate-license.sh
+
+.PHONY: test-source-headers
+test-source-headers:
+ @scripts/validate-license.sh
+
+.PHONY: test-acceptance
+test-acceptance: TARGETS = linux/amd64
+test-acceptance: build build-cross
+ @if [ -d "${ACCEPTANCE_DIR}" ]; then \
+ cd ${ACCEPTANCE_DIR} && \
+ ROBOT_RUN_TESTS=$(ACCEPTANCE_RUN_TESTS) ROBOT_HELM_PATH='$(BINDIR)' make acceptance; \
+ else \
+ echo "You must clone the acceptance_testing repo under $(ACCEPTANCE_DIR)"; \
+ echo "You can find the acceptance_testing repo at https://github.com/helm/acceptance-testing"; \
+ fi
+
+.PHONY: test-acceptance-completion
+test-acceptance-completion: ACCEPTANCE_RUN_TESTS = shells.robot
+test-acceptance-completion: test-acceptance
+
+.PHONY: format
+format: $(GOIMPORTS)
+ go list -f '{{.Dir}}' ./... | xargs $(GOIMPORTS) -w -local helm.sh/helm
+
+# Generate golden files used in unit tests
+.PHONY: gen-test-golden
+gen-test-golden:
+gen-test-golden: PKG = ./pkg/cmd ./pkg/action
+gen-test-golden: TESTFLAGS = -update
+gen-test-golden: test-unit
+
+# ------------------------------------------------------------------------------
+# dependencies
+
+# If go install is run from inside the project directory it will add the
+# dependencies to the go.mod file. To avoid that we change to a directory
+# without a go.mod file when downloading the following dependencies
+
+$(GOX):
+ (cd /; go install github.com/mitchellh/gox@v1.0.2-0.20220701044238-9f712387e2d2)
+
+$(GOIMPORTS):
+ (cd /; go install golang.org/x/tools/cmd/goimports@latest)
+
+# ------------------------------------------------------------------------------
+# release
+
+.PHONY: build-cross
+build-cross: LDFLAGS += -extldflags "-static"
+build-cross: $(GOX)
+ GOFLAGS="-trimpath" CGO_ENABLED=0 $(GOX) -parallel=3 -output="_dist/{{.OS}}-{{.Arch}}/$(BINNAME)" -osarch='$(TARGETS)' $(GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' ./cmd/helm
+
+.PHONY: dist
+dist:
+ ( \
+ cd _dist && \
+ $(DIST_DIRS) cp ../LICENSE {} \; && \
+ $(DIST_DIRS) cp ../README.md {} \; && \
+ $(DIST_DIRS) tar -zcf helm-${VERSION}-{}.tar.gz {} \; && \
+ $(DIST_DIRS) zip -r helm-${VERSION}-{}.zip {} \; \
+ )
+
+.PHONY: fetch-dist
+fetch-dist:
+ mkdir -p _dist
+ cd _dist && \
+ for obj in ${TARGET_OBJS} ; do \
+ curl -sSL -o helm-${VERSION}-$${obj} https://get.helm.sh/helm-${VERSION}-$${obj} ; \
+ done
+
+.PHONY: sign
+sign:
+ for f in $$(ls _dist/*.{gz,zip,sha256,sha256sum} 2>/dev/null) ; do \
+ gpg --armor --detach-sign $${f} ; \
+ done
+
+# The contents of the .sha256sum file are compatible with tools like
+# shasum. For example, using the following command will verify
+# the file helm-3.1.0-rc.1-darwin-amd64.tar.gz:
+# shasum -a 256 -c helm-3.1.0-rc.1-darwin-amd64.tar.gz.sha256sum
+# The .sha256 files hold only the hash and are not compatible with
+# verification tools like shasum or sha256sum. This method and file can be
+# removed in Helm v4.
+.PHONY: checksum
+checksum:
+ for f in $$(ls _dist/*.{gz,zip} 2>/dev/null) ; do \
+ shasum -a 256 "$${f}" | sed 's/_dist\///' > "$${f}.sha256sum" ; \
+ shasum -a 256 "$${f}" | awk '{print $$1}' > "$${f}.sha256" ; \
+ done
+
+# ------------------------------------------------------------------------------
+
+.PHONY: clean
+clean:
+ @rm -rf '$(BINDIR)' ./_dist
+
+.PHONY: release-notes
+release-notes:
+ @if [ ! -d "./_dist" ]; then \
+ echo "please run 'make fetch-dist' first" && \
+ exit 1; \
+ fi
+ @if [ -z "${PREVIOUS_RELEASE}" ]; then \
+ echo "please set PREVIOUS_RELEASE environment variable" && \
+ exit 1; \
+ fi
+ @./scripts/release-notes.sh ${PREVIOUS_RELEASE} ${VERSION}
+
+.PHONY: info
+info:
+ @echo "Version: ${VERSION}"
+ @echo "Git Tag: ${GIT_TAG}"
+ @echo "Git Commit: ${GIT_COMMIT}"
+ @echo "Git Tree State: ${GIT_DIRTY}"
+
+.PHONY: tidy
+tidy:
+ go mod tidy
diff --git a/helm/OWNERS b/helm/OWNERS
new file mode 100644
index 000000000..13827661a
--- /dev/null
+++ b/helm/OWNERS
@@ -0,0 +1,32 @@
+maintainers:
+ - banjoh
+ - gjenkins8
+ - joejulian
+ - marckhouzam
+ - mattfarina
+ - robertsirc
+ - sabre1041
+ - scottrigby
+ - technosophos
+ - TerryHowe
+triage:
+ - yxxhero
+ - zonggen
+ - z4ce
+emeritus:
+ - adamreese
+ - bacongobbler
+ - fibonacci1729
+ - hickeyma
+ - jascott1
+ - jdolitsky
+ - michelleN
+ - migmartri
+ - nebril
+ - prydonius
+ - rimusz
+ - seh
+ - SlickNik
+ - thomastaylor312
+ - vaikas-google
+ - viglesiasce
diff --git a/helm/README.md b/helm/README.md
new file mode 100644
index 000000000..37bc8abaa
--- /dev/null
+++ b/helm/README.md
@@ -0,0 +1,87 @@
+# Helm
+
+[](https://github.com/helm/helm/actions?workflow=release)
+[](https://goreportcard.com/report/helm.sh/helm/v4)
+[](https://pkg.go.dev/helm.sh/helm/v4)
+[](https://bestpractices.coreinfrastructure.org/projects/3131)
+[](https://scorecard.dev/viewer/?uri=github.com/helm/helm)
+[](https://insights.linuxfoundation.org/project/helm)
+
+Helm is a tool for managing Charts. Charts are packages of pre-configured Kubernetes resources.
+
+Use Helm to:
+
+- Find and use [popular software packaged as Helm Charts](https://artifacthub.io/packages/search?kind=0) to run in Kubernetes
+- Share your own applications as Helm Charts
+- Create reproducible builds of your Kubernetes applications
+- Intelligently manage your Kubernetes manifest files
+- Manage releases of Helm packages
+
+## Helm in a Handbasket
+
+Helm is a tool that streamlines installing and managing Kubernetes applications.
+Think of it like apt/yum/homebrew for Kubernetes.
+
+- Helm renders your templates and communicates with the Kubernetes API
+- Helm runs on your laptop, CI/CD, or wherever you want it to run.
+- Charts are Helm packages that contain at least two things:
+ - A description of the package (`Chart.yaml`)
+ - One or more templates, which contain Kubernetes manifest files
+- Charts can be stored on disk, or fetched from remote chart repositories
+ (like Debian or RedHat packages)
+
+## Helm Development and Stable Versions
+
+Helm v4 is currently under development on the `main` branch. This is unstable and the APIs within the Go SDK and at the command line are changing.
+Helm v3 (current stable) is maintained on the `dev-v3` branch. APIs there follow semantic versioning.
+
+## Install
+
+Binary downloads of the Helm client can be found on [the Releases page](https://github.com/helm/helm/releases/latest).
+
+Unpack the `helm` binary and add it to your PATH and you are good to go!
+
+If you want to use a package manager:
+
+- [Homebrew](https://brew.sh/) users can use `brew install helm`.
+- [Chocolatey](https://chocolatey.org/) users can use `choco install kubernetes-helm`.
+- [Winget](https://learn.microsoft.com/en-us/windows/package-manager/) users can use `winget install Helm.Helm`.
+- [Scoop](https://scoop.sh/) users can use `scoop install helm`.
+- [Snapcraft](https://snapcraft.io/) users can use `snap install helm --classic`.
+- [Flox](https://flox.dev) users can use `flox install kubernetes-helm`.
+- [Mise-en-place](https://mise.jdx.dev/) users can use `mise use -g helm@latest`
+
+To rapidly get Helm up and running, start with the [Quick Start Guide](https://helm.sh/docs/intro/quickstart/).
+
+See the [installation guide](https://helm.sh/docs/intro/install/) for more options,
+including installing pre-releases.
+
+## Docs
+
+Get started with the [Quick Start guide](https://helm.sh/docs/intro/quickstart/) or plunge into the [complete documentation](https://helm.sh/docs).
+
+## Roadmap
+
+The [Helm roadmap uses GitHub milestones](https://github.com/helm/helm/milestones) to track the progress of the project.
+
+The development of Helm v4 is currently happening on the `main` branch while the development of Helm v3, the stable branch, is happening on the `dev-v3` branch. Changes should be made to the `main` branch prior to being added to the `dev-v3` branch so that all changes are carried along to Helm v4.
+
+## Community, discussion, contribution, and support
+
+You can reach the Helm community and developers via the following channels:
+
+- [Kubernetes Slack](https://kubernetes.slack.com):
+ - [#helm-users](https://kubernetes.slack.com/messages/helm-users)
+ - [#helm-dev](https://kubernetes.slack.com/messages/helm-dev)
+ - [#charts](https://kubernetes.slack.com/messages/charts)
+- Mailing List:
+ - [Helm Mailing List](https://lists.cncf.io/g/cncf-helm)
+- Developer Call: Thursdays at 9:30-10:00 Pacific ([meeting details](https://github.com/helm/community/blob/master/communication.md#meetings))
+
+### Contribution
+
+If you're interested in contributing, please refer to the [Contributing Guide](CONTRIBUTING.md) **before submitting a pull request**.
+
+### Code of conduct
+
+Participation in the Helm community is governed by the [Code of Conduct](code-of-conduct.md).
diff --git a/helm/SECURITY.md b/helm/SECURITY.md
new file mode 100644
index 000000000..c84a6f866
--- /dev/null
+++ b/helm/SECURITY.md
@@ -0,0 +1,3 @@
+# Helm Security Reporting and Policy
+
+The Helm project has [a common process and policy that can be found here](https://github.com/helm/community/blob/master/SECURITY.md).
\ No newline at end of file
diff --git a/helm/cmd/helm/helm.go b/helm/cmd/helm/helm.go
new file mode 100644
index 000000000..66d342500
--- /dev/null
+++ b/helm/cmd/helm/helm.go
@@ -0,0 +1,49 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main // import "helm.sh/helm/v4/cmd/helm"
+
+import (
+ "log/slog"
+ "os"
+
+ // Import to initialize client auth plugins.
+ _ "k8s.io/client-go/plugin/pkg/client/auth"
+
+ helmcmd "helm.sh/helm/v4/pkg/cmd"
+ "helm.sh/helm/v4/pkg/kube"
+)
+
+func main() {
+ // Setting the name of the app for managedFields in the Kubernetes client.
+ // It is set here to the full name of "helm" so that renaming of helm to
+ // another name (e.g., helm2 or helm3) does not change the name of the
+ // manager as picked up by the automated name detection.
+ kube.ManagedFieldsManager = "helm"
+
+ cmd, err := helmcmd.NewRootCmd(os.Stdout, os.Args[1:], helmcmd.SetupLogging)
+ if err != nil {
+ slog.Warn("command failed", slog.Any("error", err))
+ os.Exit(1)
+ }
+
+ if err := cmd.Execute(); err != nil {
+ if cerr, ok := err.(helmcmd.CommandError); ok {
+ os.Exit(cerr.ExitCode)
+ }
+ os.Exit(1)
+ }
+}
diff --git a/helm/cmd/helm/helm_test.go b/helm/cmd/helm/helm_test.go
new file mode 100644
index 000000000..0458e8037
--- /dev/null
+++ b/helm/cmd/helm/helm_test.go
@@ -0,0 +1,79 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "bytes"
+ "os"
+ "os/exec"
+ "runtime"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCliPluginExitCode(t *testing.T) {
+ if os.Getenv("RUN_MAIN_FOR_TESTING") == "1" {
+ os.Args = []string{"helm", "exitwith", "43"}
+
+ // We DO call helm's main() here. So this looks like a normal `helm` process.
+ main()
+
+ // As main calls os.Exit, we never reach this line.
+ // But the test called this block of code catches and verifies the exit code.
+ return
+ }
+
+ // Currently, plugins assume a Linux subsystem. Skip the execution
+ // tests until this is fixed
+ if runtime.GOOS != "windows" {
+ // Do a second run of this specific test(TestPluginExitCode) with RUN_MAIN_FOR_TESTING=1 set,
+ // So that the second run is able to run main() and this first run can verify the exit status returned by that.
+ //
+ // This technique originates from https://talks.golang.org/2014/testing.slide#23.
+ cmd := exec.Command(os.Args[0], "-test.run=TestCliPluginExitCode")
+ cmd.Env = append(
+ os.Environ(),
+ "RUN_MAIN_FOR_TESTING=1",
+ // See pkg/cli/environment.go for which envvars can be used for configuring these passes
+ // and also see plugin_test.go for how a plugin env can be set up.
+ // This mimics the "exitwith" test case in TestLoadPlugins using envvars
+ "HELM_PLUGINS=../../pkg/cmd/testdata/helmhome/helm/plugins",
+ )
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err := cmd.Run()
+
+ exiterr, ok := err.(*exec.ExitError)
+ if !ok {
+ t.Fatalf("Unexpected error type returned by os.Exit: %T", err)
+ }
+
+ assert.Empty(t, stdout.String())
+
+ expectedStderr := "Error: plugin \"exitwith\" exited with error\n"
+ if stderr.String() != expectedStderr {
+ t.Errorf("Expected %q written to stderr: Got %q", expectedStderr, stderr.String())
+ }
+
+ if exiterr.ExitCode() != 43 {
+ t.Errorf("Expected exit code 43: Got %d", exiterr.ExitCode())
+ }
+ }
+}
diff --git a/helm/code-of-conduct.md b/helm/code-of-conduct.md
new file mode 100644
index 000000000..91ccaf035
--- /dev/null
+++ b/helm/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Community Code of Conduct
+
+Helm follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
diff --git a/helm/go.mod b/helm/go.mod
new file mode 100644
index 000000000..6e2d9c15d
--- /dev/null
+++ b/helm/go.mod
@@ -0,0 +1,181 @@
+module helm.sh/helm/v4
+
+go 1.25.0
+
+require (
+ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
+ github.com/BurntSushi/toml v1.6.0
+ github.com/DATA-DOG/go-sqlmock v1.5.2
+ github.com/Masterminds/semver/v3 v3.4.0
+ github.com/Masterminds/sprig/v3 v3.3.0
+ github.com/Masterminds/squirrel v1.5.4
+ github.com/Masterminds/vcs v1.13.3
+ github.com/ProtonMail/go-crypto v1.3.0
+ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
+ github.com/cyphar/filepath-securejoin v0.6.1
+ github.com/distribution/distribution/v3 v3.0.0
+ github.com/evanphx/json-patch/v5 v5.9.11
+ github.com/extism/go-sdk v1.7.1
+ github.com/fatih/color v1.18.0
+ github.com/fluxcd/cli-utils v0.37.0-flux.1
+ github.com/foxcpp/go-mockdns v1.2.0
+ github.com/gobwas/glob v0.2.3
+ github.com/gofrs/flock v0.13.0
+ github.com/gosuri/uitable v0.0.4
+ github.com/jmoiron/sqlx v1.4.0
+ github.com/lib/pq v1.10.9
+ github.com/mattn/go-shellwords v1.0.12
+ github.com/moby/term v0.5.2
+ github.com/opencontainers/go-digest v1.0.0
+ github.com/opencontainers/image-spec v1.1.1
+ github.com/rubenv/sql-migrate v1.8.1
+ github.com/santhosh-tekuri/jsonschema/v6 v6.0.2
+ github.com/spf13/cobra v1.10.2
+ github.com/spf13/pflag v1.0.10
+ github.com/stretchr/testify v1.11.1
+ github.com/tetratelabs/wazero v1.11.0
+ go.yaml.in/yaml/v3 v3.0.4
+ golang.org/x/crypto v0.47.0
+ golang.org/x/term v0.39.0
+ golang.org/x/text v0.33.0
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ k8s.io/api v0.35.0
+ k8s.io/apiextensions-apiserver v0.35.0
+ k8s.io/apimachinery v0.35.0
+ k8s.io/apiserver v0.35.0
+ k8s.io/cli-runtime v0.35.0
+ k8s.io/client-go v0.35.0
+ k8s.io/klog/v2 v2.130.1
+ k8s.io/kubectl v0.35.0
+ oras.land/oras-go/v2 v2.6.0
+ sigs.k8s.io/controller-runtime v0.22.4
+ sigs.k8s.io/kustomize/kyaml v0.21.0
+ sigs.k8s.io/yaml v1.6.0
+)
+
+require (
+ dario.cat/mergo v1.0.1 // indirect
+ github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
+ github.com/MakeNowJust/heredoc v1.0.0 // indirect
+ github.com/Masterminds/goutils v1.1.1 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/blang/semver/v4 v4.0.0 // indirect
+ github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/chai2010/gettext-go v1.0.2 // indirect
+ github.com/cloudflare/circl v1.6.1 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
+ github.com/distribution/reference v0.6.0 // indirect
+ github.com/docker/docker-credential-helpers v0.8.2 // indirect
+ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
+ github.com/docker/go-metrics v0.0.1 // indirect
+ github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a // indirect
+ github.com/emicklei/go-restful/v3 v3.12.2 // indirect
+ github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.0 // indirect
+ github.com/go-errors/errors v1.5.1 // indirect
+ github.com/go-gorp/gorp/v3 v3.1.0 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-openapi/jsonpointer v0.21.1 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/swag v0.23.1 // indirect
+ github.com/google/btree v1.1.3 // indirect
+ github.com/google/gnostic-models v0.7.0 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/gorilla/handlers v1.5.2 // indirect
+ github.com/gorilla/mux v1.8.1 // indirect
+ github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
+ github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
+ github.com/huandu/xstrings v1.5.0 // indirect
+ github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/klauspost/compress v1.18.0 // indirect
+ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
+ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
+ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
+ github.com/mailru/easyjson v0.9.0 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.9 // indirect
+ github.com/miekg/dns v1.1.57 // indirect
+ github.com/mitchellh/copystructure v1.2.0 // indirect
+ github.com/mitchellh/go-wordwrap v1.0.1 // indirect
+ github.com/mitchellh/reflectwalk v1.0.2 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
+ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/onsi/gomega v1.38.2 // indirect
+ github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
+ github.com/prometheus/client_golang v1.23.2 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.66.1 // indirect
+ github.com/prometheus/procfs v0.17.0 // indirect
+ github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
+ github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect
+ github.com/redis/go-redis/v9 v9.7.3 // indirect
+ github.com/russross/blackfriday/v2 v2.1.0 // indirect
+ github.com/shopspring/decimal v1.4.0 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/spf13/cast v1.7.0 // indirect
+ github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
+ github.com/xlab/treeprint v1.2.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect
+ go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
+ go.opentelemetry.io/otel v1.37.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect
+ go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect
+ go.opentelemetry.io/otel/log v0.8.0 // indirect
+ go.opentelemetry.io/otel/metric v1.37.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.36.0 // indirect
+ go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
+ go.opentelemetry.io/otel/trace v1.37.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.5.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
+ golang.org/x/mod v0.31.0 // indirect
+ golang.org/x/net v0.48.0 // indirect
+ golang.org/x/oauth2 v0.30.0 // indirect
+ golang.org/x/sync v0.19.0 // indirect
+ golang.org/x/sys v0.40.0 // indirect
+ golang.org/x/time v0.12.0 // indirect
+ golang.org/x/tools v0.40.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect
+ google.golang.org/grpc v1.72.2 // indirect
+ google.golang.org/protobuf v1.36.8 // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ k8s.io/component-base v0.35.0 // indirect
+ k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
+ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
+ sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
+ sigs.k8s.io/kustomize/api v0.20.1 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
+)
diff --git a/helm/go.sum b/helm/go.sum
new file mode 100644
index 000000000..b1e843f1b
--- /dev/null
+++ b/helm/go.sum
@@ -0,0 +1,518 @@
+dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
+dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
+github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
+github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
+github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
+github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
+github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
+github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
+github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
+github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
+github.com/Masterminds/vcs v1.13.3 h1:IIA2aBdXvfbIM+yl/eTnL4hb1XwdpvuQLglAix1gweE=
+github.com/Masterminds/vcs v1.13.3/go.mod h1:TiE7xuEjl1N4j016moRd6vezp6e6Lz23gypeXfzXeW8=
+github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
+github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=
+github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w=
+github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
+github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
+github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
+github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
+github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
+github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
+github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
+github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
+github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE=
+github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM=
+github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
+github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
+github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
+github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a h1:UwSIFv5g5lIvbGgtf3tVwC7Ky9rmMFBp0RMs+6f6YqE=
+github.com/dylibso/observe-sdk/go v0.0.0-20240819160327-2d926c5d788a/go.mod h1:C8DzXehI4zAbrdlbtOByKX6pfivJTBiV9Jjqv56Yd9Q=
+github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
+github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
+github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
+github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
+github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
+github.com/extism/go-sdk v1.7.1 h1:lWJos6uY+tRFdlIHR+SJjwFDApY7OypS/2nMhiVQ9Sw=
+github.com/extism/go-sdk v1.7.1/go.mod h1:IT+Xdg5AZM9hVtpFUA+uZCJMge/hbvshl8bwzLtFyKA=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fluxcd/cli-utils v0.37.0-flux.1 h1:k/VvPNT3tGa/l2N+qzHduaQr3GVbgoWS6nw7tGZz16w=
+github.com/fluxcd/cli-utils v0.37.0-flux.1/go.mod h1:aND5wX3LuTFtB7eUT7vsWr8mmxRVSPR2Wkvbn0SqPfw=
+github.com/foxcpp/go-mockdns v1.2.0 h1:omK3OrHRD1IWJz1FuFBCFquhXslXoF17OvBS6JPzZF0=
+github.com/foxcpp/go-mockdns v1.2.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
+github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
+github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
+github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
+github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
+github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
+github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
+github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
+github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
+github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
+github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw=
+github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 h1:xhMrHhTJ6zxu3gA4enFM9MLn9AY7613teCdFnlUVbSQ=
+github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
+github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
+github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=
+github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
+github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
+github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
+github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw=
+github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU=
+github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
+github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
+github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca h1:T54Ema1DU8ngI+aef9ZhAhNGQhcRTrWxVeG07F+c/Rw=
+github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
+github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
+github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
+github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
+github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
+github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
+github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
+github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
+github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
+github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
+github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
+github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
+github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
+github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
+github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
+github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
+github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
+github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
+github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY=
+github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
+github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
+github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
+github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
+github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho=
+github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U=
+github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc=
+github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ=
+github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
+github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
+github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
+github.com/rubenv/sql-migrate v1.8.1 h1:EPNwCvjAowHI3TnZ+4fQu3a915OpnQoPAjTXCGOy2U0=
+github.com/rubenv/sql-migrate v1.8.1/go.mod h1:BTIKBORjzyxZDS6dzoiw6eAFYJ1iNlGAtjn4LGeVjS8=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
+github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
+github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
+github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
+github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
+github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
+github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
+github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
+github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834 h1:ZF+QBjOI+tILZjBaFj3HgFonKXUcwgJ4djLb6i42S3Q=
+github.com/tetratelabs/wabin v0.0.0-20230304001439-f6f874872834/go.mod h1:m9ymHTgNSEjuxvw8E7WWe4Pl4hZQHXONY8wE6dMLaRk=
+github.com/tetratelabs/wazero v1.11.0 h1:+gKemEuKCTevU4d7ZTzlsvgd1uaToIDtlQlmNbwqYhA=
+github.com/tetratelabs/wazero v1.11.0/go.mod h1:eV28rsN8Q+xwjogd7f4/Pp4xFxO7uOGbLcD/LzB1wiU=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
+github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
+github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w=
+go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk=
+go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4=
+go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
+go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
+go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8=
+go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI=
+go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU=
+go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU=
+go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s=
+go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk=
+go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8=
+go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
+go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
+go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
+go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
+go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs=
+go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo=
+go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
+go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
+go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
+go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
+go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
+go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
+golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
+golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
+golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
+golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
+golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
+golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
+golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
+golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
+golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
+golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
+google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
+google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
+google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
+google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
+google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
+gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
+k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
+k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4=
+k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU=
+k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
+k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
+k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4=
+k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds=
+k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE=
+k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY=
+k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
+k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
+k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94=
+k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
+k8s.io/kubectl v0.35.0 h1:cL/wJKHDe8E8+rP3G7avnymcMg6bH6JEcR5w5uo06wc=
+k8s.io/kubectl v0.35.0/go.mod h1:VR5/TSkYyxZwrRwY5I5dDq6l5KXmiCb+9w8IKplk3Qo=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
+oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
+sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=
+sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I=
+sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM=
+sigs.k8s.io/kustomize/kyaml v0.21.0 h1:7mQAf3dUwf0wBerWJd8rXhVcnkk5Tvn/q91cGkaP6HQ=
+sigs.k8s.io/kustomize/kyaml v0.21.0/go.mod h1:hmxADesM3yUN2vbA5z1/YTBnzLJ1dajdqpQonwBL1FQ=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/helm/internal/chart/v3/chart.go b/helm/internal/chart/v3/chart.go
new file mode 100644
index 000000000..48f006e79
--- /dev/null
+++ b/helm/internal/chart/v3/chart.go
@@ -0,0 +1,179 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v3
+
+import (
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// APIVersionV3 is the API version number for version 3.
+const APIVersionV3 = "v3"
+
+// aliasNameFormat defines the characters that are legal in an alias name.
+var aliasNameFormat = regexp.MustCompile("^[a-zA-Z0-9_-]+$")
+
+// Chart is a helm package that contains metadata, a default config, zero or more
+// optionally parameterizable templates, and zero or more charts (dependencies).
+type Chart struct {
+ // Raw contains the raw contents of the files originally contained in the chart archive.
+ //
+ // This should not be used except in special cases like `helm show values`,
+ // where we want to display the raw values, comments and all.
+ Raw []*common.File `json:"-"`
+ // Metadata is the contents of the Chartfile.
+ Metadata *Metadata `json:"metadata"`
+ // Lock is the contents of Chart.lock.
+ Lock *Lock `json:"lock"`
+ // Templates for this chart.
+ Templates []*common.File `json:"templates"`
+ // Values are default config for this chart.
+ Values map[string]interface{} `json:"values"`
+ // Schema is an optional JSON schema for imposing structure on Values
+ Schema []byte `json:"schema"`
+ // SchemaModTime the schema was last modified
+ SchemaModTime time.Time `json:"schemamodtime,omitempty"`
+ // Files are miscellaneous files in a chart archive,
+ // e.g. README, LICENSE, etc.
+ Files []*common.File `json:"files"`
+ // ModTime the chart metadata was last modified
+ ModTime time.Time `json:"modtime,omitzero"`
+
+ parent *Chart
+ dependencies []*Chart
+}
+
+type CRD struct {
+ // Name is the File.Name for the crd file
+ Name string
+ // Filename is the File obj Name including (sub-)chart.ChartFullPath
+ Filename string
+ // File is the File obj for the crd
+ File *common.File
+}
+
+// SetDependencies replaces the chart dependencies.
+func (ch *Chart) SetDependencies(charts ...*Chart) {
+ ch.dependencies = nil
+ ch.AddDependency(charts...)
+}
+
+// Name returns the name of the chart.
+func (ch *Chart) Name() string {
+ if ch.Metadata == nil {
+ return ""
+ }
+ return ch.Metadata.Name
+}
+
+// AddDependency determines if the chart is a subchart.
+func (ch *Chart) AddDependency(charts ...*Chart) {
+ for i, x := range charts {
+ charts[i].parent = ch
+ ch.dependencies = append(ch.dependencies, x)
+ }
+}
+
+// Root finds the root chart.
+func (ch *Chart) Root() *Chart {
+ if ch.IsRoot() {
+ return ch
+ }
+ return ch.Parent().Root()
+}
+
+// Dependencies are the charts that this chart depends on.
+func (ch *Chart) Dependencies() []*Chart { return ch.dependencies }
+
+// IsRoot determines if the chart is the root chart.
+func (ch *Chart) IsRoot() bool { return ch.parent == nil }
+
+// Parent returns a subchart's parent chart.
+func (ch *Chart) Parent() *Chart { return ch.parent }
+
+// ChartPath returns the full path to this chart in dot notation.
+func (ch *Chart) ChartPath() string {
+ if !ch.IsRoot() {
+ return ch.Parent().ChartPath() + "." + ch.Name()
+ }
+ return ch.Name()
+}
+
+// ChartFullPath returns the full path to this chart.
+// Note that the path may not correspond to the path where the file can be found on the file system if the path
+// points to an aliased subchart.
+func (ch *Chart) ChartFullPath() string {
+ if !ch.IsRoot() {
+ return ch.Parent().ChartFullPath() + "/charts/" + ch.Name()
+ }
+ return ch.Name()
+}
+
+// Validate validates the metadata.
+func (ch *Chart) Validate() error {
+ return ch.Metadata.Validate()
+}
+
+// AppVersion returns the appversion of the chart.
+func (ch *Chart) AppVersion() string {
+ if ch.Metadata == nil {
+ return ""
+ }
+ return ch.Metadata.AppVersion
+}
+
+// CRDs returns a list of File objects in the 'crds/' directory of a Helm chart.
+// Deprecated: use CRDObjects()
+func (ch *Chart) CRDs() []*common.File {
+ files := []*common.File{}
+ // Find all resources in the crds/ directory
+ for _, f := range ch.Files {
+ if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) {
+ files = append(files, f)
+ }
+ }
+ // Get CRDs from dependencies, too.
+ for _, dep := range ch.Dependencies() {
+ files = append(files, dep.CRDs()...)
+ }
+ return files
+}
+
+// CRDObjects returns a list of CRD objects in the 'crds/' directory of a Helm chart & subcharts
+func (ch *Chart) CRDObjects() []CRD {
+ crds := []CRD{}
+ // Find all resources in the crds/ directory
+ for _, f := range ch.Files {
+ if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) {
+ mycrd := CRD{Name: f.Name, Filename: filepath.Join(ch.ChartFullPath(), f.Name), File: f}
+ crds = append(crds, mycrd)
+ }
+ }
+ // Get CRDs from dependencies, too.
+ for _, dep := range ch.Dependencies() {
+ crds = append(crds, dep.CRDObjects()...)
+ }
+ return crds
+}
+
+func hasManifestExtension(fname string) bool {
+ ext := filepath.Ext(fname)
+ return strings.EqualFold(ext, ".yaml") || strings.EqualFold(ext, ".yml") || strings.EqualFold(ext, ".json")
+}
diff --git a/helm/internal/chart/v3/chart_test.go b/helm/internal/chart/v3/chart_test.go
new file mode 100644
index 000000000..07cbf4b39
--- /dev/null
+++ b/helm/internal/chart/v3/chart_test.go
@@ -0,0 +1,229 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package v3
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+func TestCRDs(t *testing.T) {
+ modTime := time.Now()
+ chrt := Chart{
+ Files: []*common.File{
+ {
+ Name: "crds/foo.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "bar.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/foo/bar/baz.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crdsfoo/bar/baz.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/README.md",
+ ModTime: modTime,
+ Data: []byte("# hello"),
+ },
+ },
+ }
+
+ is := assert.New(t)
+ crds := chrt.CRDs()
+ is.Equal(2, len(crds))
+ is.Equal("crds/foo.yaml", crds[0].Name)
+ is.Equal("crds/foo/bar/baz.yaml", crds[1].Name)
+}
+
+func TestSaveChartNoRawData(t *testing.T) {
+ chrt := Chart{
+ Raw: []*common.File{
+ {
+ Name: "fhqwhgads.yaml",
+ ModTime: time.Now(),
+ Data: []byte("Everybody to the Limit"),
+ },
+ },
+ }
+
+ is := assert.New(t)
+ data, err := json.Marshal(chrt)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res := &Chart{}
+ if err := json.Unmarshal(data, res); err != nil {
+ t.Fatal(err)
+ }
+
+ is.Equal([]*common.File(nil), res.Raw)
+}
+
+func TestMetadata(t *testing.T) {
+ chrt := Chart{
+ Metadata: &Metadata{
+ Name: "foo.yaml",
+ AppVersion: "1.0.0",
+ APIVersion: "v3",
+ Version: "1.0.0",
+ Type: "application",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal("foo.yaml", chrt.Name())
+ is.Equal("1.0.0", chrt.AppVersion())
+ is.Equal(nil, chrt.Validate())
+}
+
+func TestIsRoot(t *testing.T) {
+ chrt1 := Chart{
+ parent: &Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ },
+ }
+
+ chrt2 := Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal(false, chrt1.IsRoot())
+ is.Equal(true, chrt2.IsRoot())
+}
+
+func TestChartPath(t *testing.T) {
+ chrt1 := Chart{
+ parent: &Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ },
+ }
+
+ chrt2 := Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal("foo.", chrt1.ChartPath())
+ is.Equal("foo", chrt2.ChartPath())
+}
+
+func TestChartFullPath(t *testing.T) {
+ chrt1 := Chart{
+ parent: &Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ },
+ }
+
+ chrt2 := Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal("foo/charts/", chrt1.ChartFullPath())
+ is.Equal("foo", chrt2.ChartFullPath())
+}
+
+func TestCRDObjects(t *testing.T) {
+ modTime := time.Now()
+ chrt := Chart{
+ Files: []*common.File{
+ {
+ Name: "crds/foo.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "bar.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/foo/bar/baz.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crdsfoo/bar/baz.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/README.md",
+ ModTime: modTime,
+ Data: []byte("# hello"),
+ },
+ },
+ }
+
+ expected := []CRD{
+ {
+ Name: "crds/foo.yaml",
+ Filename: "crds/foo.yaml",
+ File: &common.File{
+ Name: "crds/foo.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ },
+ {
+ Name: "crds/foo/bar/baz.yaml",
+ Filename: "crds/foo/bar/baz.yaml",
+ File: &common.File{
+ Name: "crds/foo/bar/baz.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ },
+ }
+
+ is := assert.New(t)
+ crds := chrt.CRDObjects()
+ is.Equal(expected, crds)
+}
diff --git a/helm/internal/chart/v3/dependency.go b/helm/internal/chart/v3/dependency.go
new file mode 100644
index 000000000..2d956b548
--- /dev/null
+++ b/helm/internal/chart/v3/dependency.go
@@ -0,0 +1,82 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v3
+
+import "time"
+
+// Dependency describes a chart upon which another chart depends.
+//
+// Dependencies can be used to express developer intent, or to capture the state
+// of a chart.
+type Dependency struct {
+ // Name is the name of the dependency.
+ //
+ // This must mach the name in the dependency's Chart.yaml.
+ Name string `json:"name" yaml:"name"`
+ // Version is the version (range) of this chart.
+ //
+ // A lock file will always produce a single version, while a dependency
+ // may contain a semantic version range.
+ Version string `json:"version,omitempty" yaml:"version,omitempty"`
+ // The URL to the repository.
+ //
+ // Appending `index.yaml` to this string should result in a URL that can be
+ // used to fetch the repository index.
+ Repository string `json:"repository" yaml:"repository"`
+ // A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled )
+ Condition string `json:"condition,omitempty" yaml:"condition,omitempty"`
+ // Tags can be used to group charts for enabling/disabling together
+ Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"`
+ // Enabled bool determines if chart should be loaded
+ Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"`
+ // ImportValues holds the mapping of source values to parent key to be imported. Each item can be a
+ // string or pair of child/parent sublist items.
+ ImportValues []interface{} `json:"import-values,omitempty" yaml:"import-values,omitempty"`
+ // Alias usable alias to be used for the chart
+ Alias string `json:"alias,omitempty" yaml:"alias,omitempty"`
+}
+
+// Validate checks for common problems with the dependency datastructure in
+// the chart. This check must be done at load time before the dependency's charts are
+// loaded.
+func (d *Dependency) Validate() error {
+ if d == nil {
+ return ValidationError("dependencies must not contain empty or null nodes")
+ }
+ d.Name = sanitizeString(d.Name)
+ d.Version = sanitizeString(d.Version)
+ d.Repository = sanitizeString(d.Repository)
+ d.Condition = sanitizeString(d.Condition)
+ for i := range d.Tags {
+ d.Tags[i] = sanitizeString(d.Tags[i])
+ }
+ if d.Alias != "" && !aliasNameFormat.MatchString(d.Alias) {
+ return ValidationErrorf("dependency %q has disallowed characters in the alias", d.Name)
+ }
+ return nil
+}
+
+// Lock is a lock file for dependencies.
+//
+// It represents the state that the dependencies should be in.
+type Lock struct {
+ // Generated is the date the lock file was last generated.
+ Generated time.Time `json:"generated"`
+ // Digest is a hash of the dependencies in Chart.yaml.
+ Digest string `json:"digest"`
+ // Dependencies is the list of dependencies that this lock file has locked.
+ Dependencies []*Dependency `json:"dependencies"`
+}
diff --git a/helm/internal/chart/v3/dependency_test.go b/helm/internal/chart/v3/dependency_test.go
new file mode 100644
index 000000000..fcea19aea
--- /dev/null
+++ b/helm/internal/chart/v3/dependency_test.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package v3
+
+import (
+ "testing"
+)
+
+func TestValidateDependency(t *testing.T) {
+ dep := &Dependency{
+ Name: "example",
+ }
+ for value, shouldFail := range map[string]bool{
+ "abcdefghijklmenopQRSTUVWXYZ-0123456780_": false,
+ "-okay": false,
+ "_okay": false,
+ "- bad": true,
+ " bad": true,
+ "bad\nvalue": true,
+ "bad ": true,
+ "bad$": true,
+ } {
+ dep.Alias = value
+ res := dep.Validate()
+ if res != nil && !shouldFail {
+ t.Errorf("Failed on case %q", dep.Alias)
+ } else if res == nil && shouldFail {
+ t.Errorf("Expected failure for %q", dep.Alias)
+ }
+ }
+}
diff --git a/helm/internal/chart/v3/doc.go b/helm/internal/chart/v3/doc.go
new file mode 100644
index 000000000..e003833a0
--- /dev/null
+++ b/helm/internal/chart/v3/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package v3 provides chart handling for apiVersion v3 charts
+
+This package and its sub-packages provide handling for apiVersion v3 charts.
+*/
+package v3
diff --git a/helm/internal/chart/v3/errors.go b/helm/internal/chart/v3/errors.go
new file mode 100644
index 000000000..059e43f07
--- /dev/null
+++ b/helm/internal/chart/v3/errors.go
@@ -0,0 +1,30 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v3
+
+import "fmt"
+
+// ValidationError represents a data validation error.
+type ValidationError string
+
+func (v ValidationError) Error() string {
+ return "validation: " + string(v)
+}
+
+// ValidationErrorf takes a message and formatting options and creates a ValidationError
+func ValidationErrorf(msg string, args ...interface{}) ValidationError {
+ return ValidationError(fmt.Sprintf(msg, args...))
+}
diff --git a/helm/internal/chart/v3/fuzz_test.go b/helm/internal/chart/v3/fuzz_test.go
new file mode 100644
index 000000000..982c26489
--- /dev/null
+++ b/helm/internal/chart/v3/fuzz_test.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v3
+
+import (
+ "testing"
+
+ fuzz "github.com/AdaLogics/go-fuzz-headers"
+)
+
+func FuzzMetadataValidate(f *testing.F) {
+ f.Fuzz(func(t *testing.T, data []byte) {
+ fdp := fuzz.NewConsumer(data)
+ // Add random values to the metadata
+ md := &Metadata{}
+ err := fdp.GenerateStruct(md)
+ if err != nil {
+ t.Skip()
+ }
+ md.Validate()
+ })
+}
+
+func FuzzDependencyValidate(f *testing.F) {
+ f.Fuzz(func(t *testing.T, data []byte) {
+ f := fuzz.NewConsumer(data)
+ // Add random values to the dependenci
+ d := &Dependency{}
+ err := f.GenerateStruct(d)
+ if err != nil {
+ t.Skip()
+ }
+ d.Validate()
+ })
+}
diff --git a/helm/internal/chart/v3/lint/lint.go b/helm/internal/chart/v3/lint/lint.go
new file mode 100644
index 000000000..0cd949065
--- /dev/null
+++ b/helm/internal/chart/v3/lint/lint.go
@@ -0,0 +1,66 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lint // import "helm.sh/helm/v4/internal/chart/v3/lint"
+
+import (
+ "path/filepath"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/rules"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+type linterOptions struct {
+ KubeVersion *common.KubeVersion
+ SkipSchemaValidation bool
+}
+
+type LinterOption func(lo *linterOptions)
+
+func WithKubeVersion(kubeVersion *common.KubeVersion) LinterOption {
+ return func(lo *linterOptions) {
+ lo.KubeVersion = kubeVersion
+ }
+}
+
+func WithSkipSchemaValidation(skipSchemaValidation bool) LinterOption {
+ return func(lo *linterOptions) {
+ lo.SkipSchemaValidation = skipSchemaValidation
+ }
+}
+
+func RunAll(baseDir string, values map[string]interface{}, namespace string, options ...LinterOption) support.Linter {
+
+ chartDir, _ := filepath.Abs(baseDir)
+
+ lo := linterOptions{}
+ for _, option := range options {
+ option(&lo)
+ }
+
+ result := support.Linter{
+ ChartDir: chartDir,
+ }
+
+ rules.Chartfile(&result)
+ rules.ValuesWithOverrides(&result, values, lo.SkipSchemaValidation)
+ rules.TemplatesWithSkipSchemaValidation(&result, values, namespace, lo.KubeVersion, lo.SkipSchemaValidation)
+ rules.Dependencies(&result)
+ rules.Crds(&result)
+
+ return result
+}
diff --git a/helm/internal/chart/v3/lint/lint_test.go b/helm/internal/chart/v3/lint/lint_test.go
new file mode 100644
index 000000000..221de8572
--- /dev/null
+++ b/helm/internal/chart/v3/lint/lint_test.go
@@ -0,0 +1,243 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lint
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
+)
+
+const namespace = "testNamespace"
+
+const badChartDir = "rules/testdata/badchartfile"
+const badValuesFileDir = "rules/testdata/badvaluesfile"
+const badYamlFileDir = "rules/testdata/albatross"
+const badCrdFileDir = "rules/testdata/badcrdfile"
+const goodChartDir = "rules/testdata/goodone"
+const subChartValuesDir = "rules/testdata/withsubchart"
+const malformedTemplate = "rules/testdata/malformed-template"
+const invalidChartFileDir = "rules/testdata/invalidchartfile"
+
+func TestBadChartV3(t *testing.T) {
+ var values map[string]any
+ m := RunAll(badChartDir, values, namespace).Messages
+ if len(m) != 8 {
+ t.Errorf("Number of errors %v", len(m))
+ t.Errorf("All didn't fail with expected errors, got %#v", m)
+ }
+ // There should be one INFO, one WARNING, and 2 ERROR messages, check for them
+ var i, w, e, e2, e3, e4, e5, e6 bool
+ for _, msg := range m {
+ if msg.Severity == support.InfoSev {
+ if strings.Contains(msg.Err.Error(), "icon is recommended") {
+ i = true
+ }
+ }
+ if msg.Severity == support.WarningSev {
+ if strings.Contains(msg.Err.Error(), "does not exist") {
+ w = true
+ }
+ }
+ if msg.Severity == support.ErrorSev {
+ if strings.Contains(msg.Err.Error(), "version '0.0.0.0' is not a valid SemVerV2") {
+ e = true
+ }
+ if strings.Contains(msg.Err.Error(), "name is required") {
+ e2 = true
+ }
+
+ if strings.Contains(msg.Err.Error(), "apiVersion is required. The value must be \"v3\"") {
+ e3 = true
+ }
+
+ if strings.Contains(msg.Err.Error(), "chart type is not valid in apiVersion") {
+ e4 = true
+ }
+
+ if strings.Contains(msg.Err.Error(), "dependencies are not valid in the Chart file with apiVersion") {
+ e5 = true
+ }
+ // This comes from the dependency check, which loads dependency info from the Chart.yaml
+ if strings.Contains(msg.Err.Error(), "unable to load chart") {
+ e6 = true
+ }
+ }
+ }
+ if !e || !e2 || !e3 || !e4 || !e5 || !i || !e6 || !w {
+ t.Errorf("Didn't find all the expected errors, got %#v", m)
+ }
+}
+
+func TestInvalidYaml(t *testing.T) {
+ var values map[string]any
+ m := RunAll(badYamlFileDir, values, namespace).Messages
+ if len(m) != 1 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "deliberateSyntaxError") {
+ t.Errorf("All didn't have the error for deliberateSyntaxError")
+ }
+}
+
+func TestInvalidChartYamlV3(t *testing.T) {
+ var values map[string]any
+ m := RunAll(invalidChartFileDir, values, namespace).Messages
+ t.Log(m)
+ if len(m) != 3 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "failed to strictly parse chart metadata file") {
+ t.Errorf("All didn't have the error for duplicate YAML keys")
+ }
+}
+
+func TestBadValuesV3(t *testing.T) {
+ var values map[string]any
+ m := RunAll(badValuesFileDir, values, namespace).Messages
+ if len(m) < 1 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "unable to parse YAML") {
+ t.Errorf("All didn't have the error for invalid key format: %s", m[0].Err)
+ }
+}
+
+func TestBadCrdFileV3(t *testing.T) {
+ var values map[string]any
+ m := RunAll(badCrdFileDir, values, namespace).Messages
+ assert.Lenf(t, m, 2, "All didn't fail with expected errors, got %#v", m)
+ assert.ErrorContains(t, m[0].Err, "apiVersion is not in 'apiextensions.k8s.io'")
+ assert.ErrorContains(t, m[1].Err, "object kind is not 'CustomResourceDefinition'")
+}
+
+func TestGoodChart(t *testing.T) {
+ var values map[string]any
+ m := RunAll(goodChartDir, values, namespace).Messages
+ if len(m) != 0 {
+ t.Error("All returned linter messages when it shouldn't have")
+ for i, msg := range m {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ }
+}
+
+// TestHelmCreateChart tests that a `helm create` always passes a `helm lint` test.
+//
+// See https://github.com/helm/helm/issues/7923
+func TestHelmCreateChart(t *testing.T) {
+ var values map[string]any
+ dir := t.TempDir()
+
+ createdChart, err := chartutil.Create("testhelmcreatepasseslint", dir)
+ if err != nil {
+ t.Error(err)
+ // Fatal is bad because of the defer.
+ return
+ }
+
+ // Note: we test with strict=true here, even though others have
+ // strict = false.
+ m := RunAll(createdChart, values, namespace, WithSkipSchemaValidation(true)).Messages
+ if ll := len(m); ll != 1 {
+ t.Errorf("All should have had exactly 1 error. Got %d", ll)
+ for i, msg := range m {
+ t.Logf("Message %d: %s", i, msg.Error())
+ }
+ } else if msg := m[0].Err.Error(); !strings.Contains(msg, "icon is recommended") {
+ t.Errorf("Unexpected lint error: %s", msg)
+ }
+}
+
+// TestHelmCreateChart_CheckDeprecatedWarnings checks if any default template created by `helm create` throws
+// deprecated warnings in the linter check against the current Kubernetes version (provided using ldflags).
+//
+// See https://github.com/helm/helm/issues/11495
+//
+// Resources like hpa and ingress, which are disabled by default in values.yaml are enabled here using the equivalent
+// of the `--set` flag.
+func TestHelmCreateChart_CheckDeprecatedWarnings(t *testing.T) {
+ createdChart, err := chartutil.Create("checkdeprecatedwarnings", t.TempDir())
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // Add values to enable hpa, and ingress which are disabled by default.
+ // This is the equivalent of:
+ // helm lint checkdeprecatedwarnings --set 'autoscaling.enabled=true,ingress.enabled=true'
+ updatedValues := map[string]any{
+ "autoscaling": map[string]any{
+ "enabled": true,
+ },
+ "ingress": map[string]any{
+ "enabled": true,
+ },
+ }
+
+ linterRunDetails := RunAll(createdChart, updatedValues, namespace, WithSkipSchemaValidation(true))
+ for _, msg := range linterRunDetails.Messages {
+ if strings.HasPrefix(msg.Error(), "[WARNING]") &&
+ strings.Contains(msg.Error(), "deprecated") {
+ // When there is a deprecation warning for an object created
+ // by `helm create` for the current Kubernetes version, fail.
+ t.Errorf("Unexpected deprecation warning for %q: %s", msg.Path, msg.Error())
+ }
+ }
+}
+
+// lint ignores import-values
+// See https://github.com/helm/helm/issues/9658
+func TestSubChartValuesChart(t *testing.T) {
+ var values map[string]any
+ m := RunAll(subChartValuesDir, values, namespace).Messages
+ if len(m) != 0 {
+ t.Error("All returned linter messages when it shouldn't have")
+ for i, msg := range m {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ }
+}
+
+// lint stuck with malformed template object
+// See https://github.com/helm/helm/issues/11391
+func TestMalformedTemplate(t *testing.T) {
+ var values map[string]any
+ c := time.After(3 * time.Second)
+ ch := make(chan int, 1)
+ var m []support.Message
+ go func() {
+ m = RunAll(malformedTemplate, values, namespace).Messages
+ ch <- 1
+ }()
+ select {
+ case <-c:
+ t.Fatalf("lint malformed template timeout")
+ case <-ch:
+ if len(m) != 1 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "invalid character '{'") {
+ t.Errorf("All didn't have the error for invalid character '{'")
+ }
+ }
+}
diff --git a/helm/internal/chart/v3/lint/rules/chartfile.go b/helm/internal/chart/v3/lint/rules/chartfile.go
new file mode 100644
index 000000000..fc246ba80
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/chartfile.go
@@ -0,0 +1,225 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules"
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/asaskevich/govalidator"
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
+)
+
+// Chartfile runs a set of linter rules related to Chart.yaml file
+func Chartfile(linter *support.Linter) {
+ chartFileName := "Chart.yaml"
+ chartPath := filepath.Join(linter.ChartDir, chartFileName)
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlNotDirectory(chartPath))
+
+ chartFile, err := chartutil.LoadChartfile(chartPath)
+ validChartFile := linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlFormat(err))
+
+ // Guard clause. Following linter rules require a parsable ChartFile
+ if !validChartFile {
+ return
+ }
+
+ _, err = chartutil.StrictLoadChartfile(chartPath)
+ linter.RunLinterRule(support.WarningSev, chartFileName, validateChartYamlStrictFormat(err))
+
+ // type check for Chart.yaml . ignoring error as any parse
+ // errors would already be caught in the above load function
+ chartFileForTypeCheck, _ := loadChartFileForTypeCheck(chartPath)
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartName(chartFile))
+
+ // Chart metadata
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAPIVersion(chartFile))
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersionType(chartFileForTypeCheck))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersion(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAppVersionType(chartFileForTypeCheck))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartMaintainer(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartSources(chartFile))
+ linter.RunLinterRule(support.InfoSev, chartFileName, validateChartIconPresence(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartIconURL(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartType(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartDependencies(chartFile))
+}
+
+func validateChartVersionType(data map[string]interface{}) error {
+ return isStringValue(data, "version")
+}
+
+func validateChartAppVersionType(data map[string]interface{}) error {
+ return isStringValue(data, "appVersion")
+}
+
+func isStringValue(data map[string]interface{}, key string) error {
+ value, ok := data[key]
+ if !ok {
+ return nil
+ }
+ valueType := fmt.Sprintf("%T", value)
+ if valueType != "string" {
+ return fmt.Errorf("%s should be of type string but it's of type %s", key, valueType)
+ }
+ return nil
+}
+
+func validateChartYamlNotDirectory(chartPath string) error {
+ fi, err := os.Stat(chartPath)
+
+ if err == nil && fi.IsDir() {
+ return errors.New("should be a file, not a directory")
+ }
+ return nil
+}
+
+func validateChartYamlFormat(chartFileError error) error {
+ if chartFileError != nil {
+ return fmt.Errorf("unable to parse YAML\n\t%w", chartFileError)
+ }
+ return nil
+}
+
+func validateChartYamlStrictFormat(chartFileError error) error {
+ if chartFileError != nil {
+ return fmt.Errorf("failed to strictly parse chart metadata file\n\t%w", chartFileError)
+ }
+ return nil
+}
+
+func validateChartName(cf *chart.Metadata) error {
+ if cf.Name == "" {
+ return errors.New("name is required")
+ }
+ name := filepath.Base(cf.Name)
+ if name != cf.Name {
+ return fmt.Errorf("chart name %q is invalid", cf.Name)
+ }
+ return nil
+}
+
+func validateChartAPIVersion(cf *chart.Metadata) error {
+ if cf.APIVersion == "" {
+ return errors.New("apiVersion is required. The value must be \"v3\"")
+ }
+
+ if cf.APIVersion != chart.APIVersionV3 {
+ return fmt.Errorf("apiVersion '%s' is not valid. The value must be \"v3\"", cf.APIVersion)
+ }
+
+ return nil
+}
+
+func validateChartVersion(cf *chart.Metadata) error {
+ if cf.Version == "" {
+ return errors.New("version is required")
+ }
+
+ version, err := semver.StrictNewVersion(cf.Version)
+ if err != nil {
+ return fmt.Errorf("version '%s' is not a valid SemVerV2", cf.Version)
+ }
+
+ c, err := semver.NewConstraint(">0.0.0-0")
+ if err != nil {
+ return err
+ }
+ valid, msg := c.Validate(version)
+
+ if !valid && len(msg) > 0 {
+ return fmt.Errorf("version %v", msg[0])
+ }
+
+ return nil
+}
+
+func validateChartMaintainer(cf *chart.Metadata) error {
+ for _, maintainer := range cf.Maintainers {
+ if maintainer == nil {
+ return errors.New("a maintainer entry is empty")
+ }
+ if maintainer.Name == "" {
+ return errors.New("each maintainer requires a name")
+ } else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) {
+ return fmt.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name)
+ } else if maintainer.URL != "" && !govalidator.IsURL(maintainer.URL) {
+ return fmt.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name)
+ }
+ }
+ return nil
+}
+
+func validateChartSources(cf *chart.Metadata) error {
+ for _, source := range cf.Sources {
+ if source == "" || !govalidator.IsRequestURL(source) {
+ return fmt.Errorf("invalid source URL '%s'", source)
+ }
+ }
+ return nil
+}
+
+func validateChartIconPresence(cf *chart.Metadata) error {
+ if cf.Icon == "" {
+ return errors.New("icon is recommended")
+ }
+ return nil
+}
+
+func validateChartIconURL(cf *chart.Metadata) error {
+ if cf.Icon != "" && !govalidator.IsRequestURL(cf.Icon) {
+ return fmt.Errorf("invalid icon URL '%s'", cf.Icon)
+ }
+ return nil
+}
+
+func validateChartDependencies(cf *chart.Metadata) error {
+ if len(cf.Dependencies) > 0 && cf.APIVersion != chart.APIVersionV3 {
+ return fmt.Errorf("dependencies are not valid in the Chart file with apiVersion '%s'. They are valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV3)
+ }
+ return nil
+}
+
+func validateChartType(cf *chart.Metadata) error {
+ if len(cf.Type) > 0 && cf.APIVersion != chart.APIVersionV3 {
+ return fmt.Errorf("chart type is not valid in apiVersion '%s'. It is valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV3)
+ }
+ return nil
+}
+
+// loadChartFileForTypeCheck loads the Chart.yaml
+// in a generic form of a map[string]interface{}, so that the type
+// of the values can be checked
+func loadChartFileForTypeCheck(filename string) (map[string]interface{}, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := make(map[string]interface{})
+ err = yaml.Unmarshal(b, &y)
+ return y, err
+}
diff --git a/helm/internal/chart/v3/lint/rules/chartfile_test.go b/helm/internal/chart/v3/lint/rules/chartfile_test.go
new file mode 100644
index 000000000..57893e151
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/chartfile_test.go
@@ -0,0 +1,278 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
+)
+
+const (
+ badChartNameDir = "testdata/badchartname"
+ badChartDir = "testdata/badchartfile"
+ anotherBadChartDir = "testdata/anotherbadchartfile"
+)
+
+var (
+ badChartNamePath = filepath.Join(badChartNameDir, "Chart.yaml")
+ badChartFilePath = filepath.Join(badChartDir, "Chart.yaml")
+ nonExistingChartFilePath = filepath.Join(os.TempDir(), "Chart.yaml")
+)
+
+var badChart, _ = chartutil.LoadChartfile(badChartFilePath)
+var badChartName, _ = chartutil.LoadChartfile(badChartNamePath)
+
+// Validation functions Test
+func TestValidateChartYamlNotDirectory(t *testing.T) {
+ _ = os.Mkdir(nonExistingChartFilePath, os.ModePerm)
+ defer os.Remove(nonExistingChartFilePath)
+
+ err := validateChartYamlNotDirectory(nonExistingChartFilePath)
+ if err == nil {
+ t.Errorf("validateChartYamlNotDirectory to return a linter error, got no error")
+ }
+}
+
+func TestValidateChartYamlFormat(t *testing.T) {
+ err := validateChartYamlFormat(errors.New("Read error"))
+ if err == nil {
+ t.Errorf("validateChartYamlFormat to return a linter error, got no error")
+ }
+
+ err = validateChartYamlFormat(nil)
+ if err != nil {
+ t.Errorf("validateChartYamlFormat to return no error, got a linter error")
+ }
+}
+
+func TestValidateChartName(t *testing.T) {
+ err := validateChartName(badChart)
+ if err == nil {
+ t.Errorf("validateChartName to return a linter error, got no error")
+ }
+
+ err = validateChartName(badChartName)
+ if err == nil {
+ t.Error("expected validateChartName to return a linter error for an invalid name, got no error")
+ }
+}
+
+func TestValidateChartVersion(t *testing.T) {
+ var failTest = []struct {
+ Version string
+ ErrorMsg string
+ }{
+ {"", "version is required"},
+ {"1.2.3.4", "version '1.2.3.4' is not a valid SemVerV2"},
+ {"waps", "'waps' is not a valid SemVerV2"},
+ {"-3", "'-3' is not a valid SemVerV2"},
+ {"1.1", "'1.1' is not a valid SemVerV2"},
+ {"1", "'1' is not a valid SemVerV2"},
+ }
+
+ var successTest = []string{"0.0.1", "0.0.1+build", "0.0.1-beta"}
+
+ for _, test := range failTest {
+ badChart.Version = test.Version
+ err := validateChartVersion(badChart)
+ if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) {
+ t.Errorf("validateChartVersion(%s) to return \"%s\", got no error", test.Version, test.ErrorMsg)
+ }
+ }
+
+ for _, version := range successTest {
+ badChart.Version = version
+ err := validateChartVersion(badChart)
+ if err != nil {
+ t.Errorf("validateChartVersion(%s) to return no error, got a linter error", version)
+ }
+ }
+}
+
+func TestValidateChartMaintainer(t *testing.T) {
+ var failTest = []struct {
+ Name string
+ Email string
+ ErrorMsg string
+ }{
+ {"", "", "each maintainer requires a name"},
+ {"", "test@test.com", "each maintainer requires a name"},
+ {"John Snow", "wrongFormatEmail.com", "invalid email"},
+ }
+
+ var successTest = []struct {
+ Name string
+ Email string
+ }{
+ {"John Snow", ""},
+ {"John Snow", "john@winterfell.com"},
+ }
+
+ for _, test := range failTest {
+ badChart.Maintainers = []*chart.Maintainer{{Name: test.Name, Email: test.Email}}
+ err := validateChartMaintainer(badChart)
+ if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) {
+ t.Errorf("validateChartMaintainer(%s, %s) to return \"%s\", got no error", test.Name, test.Email, test.ErrorMsg)
+ }
+ }
+
+ for _, test := range successTest {
+ badChart.Maintainers = []*chart.Maintainer{{Name: test.Name, Email: test.Email}}
+ err := validateChartMaintainer(badChart)
+ if err != nil {
+ t.Errorf("validateChartMaintainer(%s, %s) to return no error, got %s", test.Name, test.Email, err.Error())
+ }
+ }
+
+ // Testing for an empty maintainer
+ badChart.Maintainers = []*chart.Maintainer{nil}
+ err := validateChartMaintainer(badChart)
+ if err == nil {
+ t.Errorf("validateChartMaintainer did not return error for nil maintainer as expected")
+ }
+ if err.Error() != "a maintainer entry is empty" {
+ t.Errorf("validateChartMaintainer returned unexpected error for nil maintainer: %s", err.Error())
+ }
+}
+
+func TestValidateChartSources(t *testing.T) {
+ var failTest = []string{"", "RiverRun", "john@winterfell", "riverrun.io"}
+ var successTest = []string{"http://riverrun.io", "https://riverrun.io", "https://riverrun.io/blackfish"}
+ for _, test := range failTest {
+ badChart.Sources = []string{test}
+ err := validateChartSources(badChart)
+ if err == nil || !strings.Contains(err.Error(), "invalid source URL") {
+ t.Errorf("validateChartSources(%s) to return \"invalid source URL\", got no error", test)
+ }
+ }
+
+ for _, test := range successTest {
+ badChart.Sources = []string{test}
+ err := validateChartSources(badChart)
+ if err != nil {
+ t.Errorf("validateChartSources(%s) to return no error, got %s", test, err.Error())
+ }
+ }
+}
+
+func TestValidateChartIconPresence(t *testing.T) {
+ t.Run("Icon absent", func(t *testing.T) {
+ testChart := &chart.Metadata{
+ Icon: "",
+ }
+
+ err := validateChartIconPresence(testChart)
+
+ if err == nil {
+ t.Errorf("validateChartIconPresence to return a linter error, got no error")
+ } else if !strings.Contains(err.Error(), "icon is recommended") {
+ t.Errorf("expected %q, got %q", "icon is recommended", err.Error())
+ }
+ })
+ t.Run("Icon present", func(t *testing.T) {
+ testChart := &chart.Metadata{
+ Icon: "http://example.org/icon.png",
+ }
+
+ err := validateChartIconPresence(testChart)
+
+ if err != nil {
+ t.Errorf("Unexpected error: %q", err.Error())
+ }
+ })
+}
+
+func TestValidateChartIconURL(t *testing.T) {
+ var failTest = []string{"RiverRun", "john@winterfell", "riverrun.io"}
+ var successTest = []string{"http://riverrun.io", "https://riverrun.io", "https://riverrun.io/blackfish.png"}
+ for _, test := range failTest {
+ badChart.Icon = test
+ err := validateChartIconURL(badChart)
+ if err == nil || !strings.Contains(err.Error(), "invalid icon URL") {
+ t.Errorf("validateChartIconURL(%s) to return \"invalid icon URL\", got no error", test)
+ }
+ }
+
+ for _, test := range successTest {
+ badChart.Icon = test
+ err := validateChartSources(badChart)
+ if err != nil {
+ t.Errorf("validateChartIconURL(%s) to return no error, got %s", test, err.Error())
+ }
+ }
+}
+
+func TestV3Chartfile(t *testing.T) {
+ t.Run("Chart.yaml basic validity issues", func(t *testing.T) {
+ linter := support.Linter{ChartDir: badChartDir}
+ Chartfile(&linter)
+ msgs := linter.Messages
+ expectedNumberOfErrorMessages := 6
+
+ if len(msgs) != expectedNumberOfErrorMessages {
+ t.Errorf("Expected %d errors, got %d", expectedNumberOfErrorMessages, len(msgs))
+ return
+ }
+
+ if !strings.Contains(msgs[0].Err.Error(), "name is required") {
+ t.Errorf("Unexpected message 0: %s", msgs[0].Err)
+ }
+
+ if !strings.Contains(msgs[1].Err.Error(), "apiVersion is required. The value must be \"v3\"") {
+ t.Errorf("Unexpected message 1: %s", msgs[1].Err)
+ }
+
+ if !strings.Contains(msgs[2].Err.Error(), "version '0.0.0.0' is not a valid SemVer") {
+ t.Errorf("Unexpected message 2: %s", msgs[2].Err)
+ }
+
+ if !strings.Contains(msgs[3].Err.Error(), "icon is recommended") {
+ t.Errorf("Unexpected message 3: %s", msgs[3].Err)
+ }
+ })
+
+ t.Run("Chart.yaml validity issues due to type mismatch", func(t *testing.T) {
+ linter := support.Linter{ChartDir: anotherBadChartDir}
+ Chartfile(&linter)
+ msgs := linter.Messages
+ expectedNumberOfErrorMessages := 3
+
+ if len(msgs) != expectedNumberOfErrorMessages {
+ t.Errorf("Expected %d errors, got %d", expectedNumberOfErrorMessages, len(msgs))
+ return
+ }
+
+ if !strings.Contains(msgs[0].Err.Error(), "version should be of type string") {
+ t.Errorf("Unexpected message 0: %s", msgs[0].Err)
+ }
+
+ if !strings.Contains(msgs[1].Err.Error(), "version '7.2445e+06' is not a valid SemVer") {
+ t.Errorf("Unexpected message 1: %s", msgs[1].Err)
+ }
+
+ if !strings.Contains(msgs[2].Err.Error(), "appVersion should be of type string") {
+ t.Errorf("Unexpected message 2: %s", msgs[2].Err)
+ }
+ })
+}
diff --git a/helm/internal/chart/v3/lint/rules/crds.go b/helm/internal/chart/v3/lint/rules/crds.go
new file mode 100644
index 000000000..deedeb0f2
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/crds.go
@@ -0,0 +1,115 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/util/yaml"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+)
+
+// Crds lints the CRDs in the Linter.
+func Crds(linter *support.Linter) {
+ fpath := "crds/"
+ crdsPath := filepath.Join(linter.ChartDir, fpath)
+
+ // crds directory is optional
+ if _, err := os.Stat(crdsPath); errors.Is(err, fs.ErrNotExist) {
+ return
+ }
+
+ crdsDirValid := linter.RunLinterRule(support.ErrorSev, fpath, validateCrdsDir(crdsPath))
+ if !crdsDirValid {
+ return
+ }
+
+ // Load chart and parse CRDs
+ chart, err := loader.Load(linter.ChartDir)
+
+ chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err)
+
+ if !chartLoaded {
+ return
+ }
+
+ /* Iterate over all the CRDs to check:
+ 1. It is a YAML file and not a template
+ 2. The API version is apiextensions.k8s.io
+ 3. The kind is CustomResourceDefinition
+ */
+ for _, crd := range chart.CRDObjects() {
+ fileName := crd.Name
+ fpath = fileName
+
+ decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(crd.File.Data), 4096)
+ for {
+ var yamlStruct *k8sYamlStruct
+
+ err := decoder.Decode(&yamlStruct)
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ // If YAML parsing fails here, it will always fail in the next block as well, so we should return here.
+ // This also confirms the YAML is not a template, since templates can't be decoded into a K8sYamlStruct.
+ if !linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err)) {
+ return
+ }
+
+ if yamlStruct != nil {
+ linter.RunLinterRule(support.ErrorSev, fpath, validateCrdAPIVersion(yamlStruct))
+ linter.RunLinterRule(support.ErrorSev, fpath, validateCrdKind(yamlStruct))
+ }
+ }
+ }
+}
+
+// Validation functions
+func validateCrdsDir(crdsPath string) error {
+ fi, err := os.Stat(crdsPath)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return errors.New("not a directory")
+ }
+ return nil
+}
+
+func validateCrdAPIVersion(obj *k8sYamlStruct) error {
+ if !strings.HasPrefix(obj.APIVersion, "apiextensions.k8s.io") {
+ return fmt.Errorf("apiVersion is not in 'apiextensions.k8s.io'")
+ }
+ return nil
+}
+
+func validateCrdKind(obj *k8sYamlStruct) error {
+ if obj.Kind != "CustomResourceDefinition" {
+ return fmt.Errorf("object kind is not 'CustomResourceDefinition'")
+ }
+ return nil
+}
diff --git a/helm/internal/chart/v3/lint/rules/crds_test.go b/helm/internal/chart/v3/lint/rules/crds_test.go
new file mode 100644
index 000000000..e435b8ea3
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/crds_test.go
@@ -0,0 +1,66 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+)
+
+const invalidCrdsDir = "./testdata/invalidcrdsdir"
+
+func TestInvalidCrdsDir(t *testing.T) {
+ linter := support.Linter{ChartDir: invalidCrdsDir}
+ Crds(&linter)
+ res := linter.Messages
+
+ assert.Len(t, res, 1)
+ assert.ErrorContains(t, res[0].Err, "not a directory")
+}
+
+// multi-document YAML with empty documents would panic
+func TestCrdWithEmptyDocument(t *testing.T) {
+ chartDir := t.TempDir()
+
+ os.WriteFile(filepath.Join(chartDir, "Chart.yaml"), []byte(
+ `apiVersion: v1
+name: test
+version: 0.1.0
+`), 0644)
+
+ // CRD with comments before --- (creates empty document)
+ crdsDir := filepath.Join(chartDir, "crds")
+ os.Mkdir(crdsDir, 0755)
+ os.WriteFile(filepath.Join(crdsDir, "test.yaml"), []byte(
+ `# Comments create empty document
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: test.example.io
+`), 0644)
+
+ linter := support.Linter{ChartDir: chartDir}
+ Crds(&linter)
+
+ assert.Len(t, linter.Messages, 0)
+}
diff --git a/helm/internal/chart/v3/lint/rules/dependencies.go b/helm/internal/chart/v3/lint/rules/dependencies.go
new file mode 100644
index 000000000..f45153728
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/dependencies.go
@@ -0,0 +1,101 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules"
+
+import (
+ "fmt"
+ "strings"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+)
+
+// Dependencies runs lints against a chart's dependencies
+//
+// See https://github.com/helm/helm/issues/7910
+func Dependencies(linter *support.Linter) {
+ c, err := loader.LoadDir(linter.ChartDir)
+ if !linter.RunLinterRule(support.ErrorSev, "", validateChartFormat(err)) {
+ return
+ }
+
+ linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependencyInMetadata(c))
+ linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependenciesUnique(c))
+ linter.RunLinterRule(support.WarningSev, linter.ChartDir, validateDependencyInChartsDir(c))
+}
+
+func validateChartFormat(chartError error) error {
+ if chartError != nil {
+ return fmt.Errorf("unable to load chart\n\t%w", chartError)
+ }
+ return nil
+}
+
+func validateDependencyInChartsDir(c *chart.Chart) (err error) {
+ dependencies := map[string]struct{}{}
+ missing := []string{}
+ for _, dep := range c.Dependencies() {
+ dependencies[dep.Metadata.Name] = struct{}{}
+ }
+ for _, dep := range c.Metadata.Dependencies {
+ if _, ok := dependencies[dep.Name]; !ok {
+ missing = append(missing, dep.Name)
+ }
+ }
+ if len(missing) > 0 {
+ err = fmt.Errorf("chart directory is missing these dependencies: %s", strings.Join(missing, ","))
+ }
+ return err
+}
+
+func validateDependencyInMetadata(c *chart.Chart) (err error) {
+ dependencies := map[string]struct{}{}
+ missing := []string{}
+ for _, dep := range c.Metadata.Dependencies {
+ dependencies[dep.Name] = struct{}{}
+ }
+ for _, dep := range c.Dependencies() {
+ if _, ok := dependencies[dep.Metadata.Name]; !ok {
+ missing = append(missing, dep.Metadata.Name)
+ }
+ }
+ if len(missing) > 0 {
+ err = fmt.Errorf("chart metadata is missing these dependencies: %s", strings.Join(missing, ","))
+ }
+ return err
+}
+
+func validateDependenciesUnique(c *chart.Chart) (err error) {
+ dependencies := map[string]*chart.Dependency{}
+ shadowing := []string{}
+
+ for _, dep := range c.Metadata.Dependencies {
+ key := dep.Name
+ if dep.Alias != "" {
+ key = dep.Alias
+ }
+ if dependencies[key] != nil {
+ shadowing = append(shadowing, key)
+ }
+ dependencies[key] = dep
+ }
+ if len(shadowing) > 0 {
+ err = fmt.Errorf("multiple dependencies with name or alias: %s", strings.Join(shadowing, ","))
+ }
+ return err
+}
diff --git a/helm/internal/chart/v3/lint/rules/dependencies_test.go b/helm/internal/chart/v3/lint/rules/dependencies_test.go
new file mode 100644
index 000000000..b80e4b8a9
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/dependencies_test.go
@@ -0,0 +1,157 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package rules
+
+import (
+ "path/filepath"
+ "testing"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
+)
+
+func chartWithBadDependencies() chart.Chart {
+ badChartDeps := chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "sub2",
+ },
+ {
+ Name: "sub3",
+ },
+ },
+ },
+ }
+
+ badChartDeps.SetDependencies(
+ &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "sub1",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ },
+ },
+ &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "sub2",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ },
+ },
+ )
+ return badChartDeps
+}
+
+func TestValidateDependencyInChartsDir(t *testing.T) {
+ c := chartWithBadDependencies()
+
+ if err := validateDependencyInChartsDir(&c); err == nil {
+ t.Error("chart should have been flagged for missing deps in chart directory")
+ }
+}
+
+func TestValidateDependencyInMetadata(t *testing.T) {
+ c := chartWithBadDependencies()
+
+ if err := validateDependencyInMetadata(&c); err == nil {
+ t.Errorf("chart should have been flagged for missing deps in chart metadata")
+ }
+}
+
+func TestValidateDependenciesUnique(t *testing.T) {
+ tests := []struct {
+ chart chart.Chart
+ }{
+ {chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "foo",
+ },
+ {
+ Name: "foo",
+ },
+ },
+ },
+ }},
+ {chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "foo",
+ Alias: "bar",
+ },
+ {
+ Name: "bar",
+ },
+ },
+ },
+ }},
+ {chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "foo",
+ Alias: "baz",
+ },
+ {
+ Name: "bar",
+ Alias: "baz",
+ },
+ },
+ },
+ }},
+ }
+
+ for _, tt := range tests {
+ if err := validateDependenciesUnique(&tt.chart); err == nil {
+ t.Errorf("chart should have been flagged for dependency shadowing")
+ }
+ }
+}
+
+func TestDependencies(t *testing.T) {
+ tmp := t.TempDir()
+
+ c := chartWithBadDependencies()
+ err := chartutil.SaveDir(&c, tmp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ linter := support.Linter{ChartDir: filepath.Join(tmp, c.Metadata.Name)}
+
+ Dependencies(&linter)
+ if l := len(linter.Messages); l != 2 {
+ t.Errorf("expected 2 linter errors for bad chart dependencies. Got %d.", l)
+ for i, msg := range linter.Messages {
+ t.Logf("Message: %d, Error: %#v", i, msg)
+ }
+ }
+}
diff --git a/helm/internal/chart/v3/lint/rules/deprecations.go b/helm/internal/chart/v3/lint/rules/deprecations.go
new file mode 100644
index 000000000..a607a5fb4
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/deprecations.go
@@ -0,0 +1,94 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules"
+
+import (
+ "fmt"
+ "strconv"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apiserver/pkg/endpoints/deprecation"
+ kscheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+// deprecatedAPIError indicates than an API is deprecated in Kubernetes
+type deprecatedAPIError struct {
+ Deprecated string
+ Message string
+}
+
+func (e deprecatedAPIError) Error() string {
+ msg := e.Message
+ return msg
+}
+
+func validateNoDeprecations(resource *k8sYamlStruct, kubeVersion *common.KubeVersion) error {
+ // if `resource` does not have an APIVersion or Kind, we cannot test it for deprecation
+ if resource.APIVersion == "" {
+ return nil
+ }
+ if resource.Kind == "" {
+ return nil
+ }
+
+ if kubeVersion == nil {
+ kubeVersion = &common.DefaultCapabilities.KubeVersion
+ }
+
+ kubeVersionMajor, err := strconv.Atoi(kubeVersion.Major)
+ if err != nil {
+ return err
+ }
+ kubeVersionMinor, err := strconv.Atoi(kubeVersion.Minor)
+ if err != nil {
+ return err
+ }
+
+ runtimeObject, err := resourceToRuntimeObject(resource)
+ if err != nil {
+ // do not error for non-kubernetes resources
+ if runtime.IsNotRegisteredError(err) {
+ return nil
+ }
+ return err
+ }
+
+ if !deprecation.IsDeprecated(runtimeObject, kubeVersionMajor, kubeVersionMinor) {
+ return nil
+ }
+ gvk := fmt.Sprintf("%s %s", resource.APIVersion, resource.Kind)
+ return deprecatedAPIError{
+ Deprecated: gvk,
+ Message: deprecation.WarningMessage(runtimeObject),
+ }
+}
+
+func resourceToRuntimeObject(resource *k8sYamlStruct) (runtime.Object, error) {
+ scheme := runtime.NewScheme()
+ kscheme.AddToScheme(scheme)
+
+ gvk := schema.FromAPIVersionAndKind(resource.APIVersion, resource.Kind)
+ out, err := scheme.New(gvk)
+ if err != nil {
+ return nil, err
+ }
+ out.GetObjectKind().SetGroupVersionKind(gvk)
+ return out, nil
+}
diff --git a/helm/internal/chart/v3/lint/rules/deprecations_test.go b/helm/internal/chart/v3/lint/rules/deprecations_test.go
new file mode 100644
index 000000000..35e541e5c
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/deprecations_test.go
@@ -0,0 +1,41 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/internal/chart/v3/lint/rules"
+
+import "testing"
+
+func TestValidateNoDeprecations(t *testing.T) {
+ deprecated := &k8sYamlStruct{
+ APIVersion: "extensions/v1beta1",
+ Kind: "Deployment",
+ }
+ err := validateNoDeprecations(deprecated, nil)
+ if err == nil {
+ t.Fatal("Expected deprecated extension to be flagged")
+ }
+ depErr := err.(deprecatedAPIError)
+ if depErr.Message == "" {
+ t.Fatalf("Expected error message to be non-blank: %v", err)
+ }
+
+ if err := validateNoDeprecations(&k8sYamlStruct{
+ APIVersion: "v1",
+ Kind: "Pod",
+ }, nil); err != nil {
+ t.Errorf("Expected a v1 Pod to not be deprecated")
+ }
+}
diff --git a/helm/internal/chart/v3/lint/rules/template.go b/helm/internal/chart/v3/lint/rules/template.go
new file mode 100644
index 000000000..38e602b7e
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/template.go
@@ -0,0 +1,353 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/validation"
+ apipath "k8s.io/apimachinery/pkg/api/validation/path"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/apimachinery/pkg/util/yaml"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ "helm.sh/helm/v4/pkg/engine"
+)
+
+// Templates lints the templates in the Linter.
+func Templates(linter *support.Linter, values map[string]interface{}, namespace string, _ bool) {
+ TemplatesWithKubeVersion(linter, values, namespace, nil)
+}
+
+// TemplatesWithKubeVersion lints the templates in the Linter, allowing to specify the kubernetes version.
+func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion) {
+ TemplatesWithSkipSchemaValidation(linter, values, namespace, kubeVersion, false)
+}
+
+// TemplatesWithSkipSchemaValidation lints the templates in the Linter, allowing to specify the kubernetes version and if schema validation is enabled or not.
+func TemplatesWithSkipSchemaValidation(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) {
+ fpath := "templates/"
+ templatesPath := filepath.Join(linter.ChartDir, fpath)
+
+ // Templates directory is optional for now
+ templatesDirExists := linter.RunLinterRule(support.WarningSev, fpath, templatesDirExists(templatesPath))
+ if !templatesDirExists {
+ return
+ }
+
+ validTemplatesDir := linter.RunLinterRule(support.ErrorSev, fpath, validateTemplatesDir(templatesPath))
+ if !validTemplatesDir {
+ return
+ }
+
+ // Load chart and parse templates
+ chart, err := loader.Load(linter.ChartDir)
+
+ chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err)
+
+ if !chartLoaded {
+ return
+ }
+
+ options := common.ReleaseOptions{
+ Name: "test-release",
+ Namespace: namespace,
+ }
+
+ caps := common.DefaultCapabilities.Copy()
+ if kubeVersion != nil {
+ caps.KubeVersion = *kubeVersion
+ }
+
+ // lint ignores import-values
+ // See https://github.com/helm/helm/issues/9658
+ if err := chartutil.ProcessDependencies(chart, values); err != nil {
+ return
+ }
+
+ cvals, err := util.CoalesceValues(chart, values)
+ if err != nil {
+ return
+ }
+
+ valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chart, cvals, options, caps, skipSchemaValidation)
+ if err != nil {
+ linter.RunLinterRule(support.ErrorSev, fpath, err)
+ return
+ }
+ var e engine.Engine
+ e.LintMode = true
+ renderedContentMap, err := e.Render(chart, valuesToRender)
+
+ renderOk := linter.RunLinterRule(support.ErrorSev, fpath, err)
+
+ if !renderOk {
+ return
+ }
+
+ /* Iterate over all the templates to check:
+ - It is a .yaml file
+ - All the values in the template file is defined
+ - {{}} include | quote
+ - Generated content is a valid Yaml file
+ - Metadata.Namespace is not set
+ */
+ for _, template := range chart.Templates {
+ fileName := template.Name
+ fpath = fileName
+
+ linter.RunLinterRule(support.ErrorSev, fpath, validateAllowedExtension(fileName))
+
+ // We only apply the following lint rules to yaml files
+ if !isYamlFileExtension(fileName) {
+ continue
+ }
+
+ // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1463
+ // Check that all the templates have a matching value
+ // linter.RunLinterRule(support.WarningSev, fpath, validateNoMissingValues(templatesPath, valuesToRender, preExecutedTemplate))
+
+ // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1037
+ // linter.RunLinterRule(support.WarningSev, fpath, validateQuotes(string(preExecutedTemplate)))
+
+ renderedContent := renderedContentMap[path.Join(chart.Name(), fileName)]
+ if strings.TrimSpace(renderedContent) != "" {
+ linter.RunLinterRule(support.WarningSev, fpath, validateTopIndentLevel(renderedContent))
+
+ decoder := yaml.NewYAMLOrJSONDecoder(strings.NewReader(renderedContent), 4096)
+
+ // Lint all resources if the file contains multiple documents separated by ---
+ for {
+ // Even though k8sYamlStruct only defines a few fields, an error in any other
+ // key will be raised as well
+ var yamlStruct *k8sYamlStruct
+
+ err := decoder.Decode(&yamlStruct)
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ // If YAML linting fails here, it will always fail in the next block as well, so we should return here.
+ // fix https://github.com/helm/helm/issues/11391
+ if !linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err)) {
+ return
+ }
+ if yamlStruct != nil {
+ // NOTE: set to warnings to allow users to support out-of-date kubernetes
+ // Refs https://github.com/helm/helm/issues/8596
+ linter.RunLinterRule(support.WarningSev, fpath, validateMetadataName(yamlStruct))
+ linter.RunLinterRule(support.WarningSev, fpath, validateNoDeprecations(yamlStruct, kubeVersion))
+
+ linter.RunLinterRule(support.ErrorSev, fpath, validateMatchSelector(yamlStruct, renderedContent))
+ linter.RunLinterRule(support.ErrorSev, fpath, validateListAnnotations(yamlStruct, renderedContent))
+ }
+ }
+ }
+ }
+}
+
+// validateTopIndentLevel checks that the content does not start with an indent level > 0.
+//
+// This error can occur when a template accidentally inserts space. It can cause
+// unpredictable errors depending on whether the text is normalized before being passed
+// into the YAML parser. So we trap it here.
+//
+// See https://github.com/helm/helm/issues/8467
+func validateTopIndentLevel(content string) error {
+ // Read lines until we get to a non-empty one
+ scanner := bufio.NewScanner(bytes.NewBufferString(content))
+ for scanner.Scan() {
+ line := scanner.Text()
+ // If line is empty, skip
+ if strings.TrimSpace(line) == "" {
+ continue
+ }
+ // If it starts with one or more spaces, this is an error
+ if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") {
+ return fmt.Errorf("document starts with an illegal indent: %q, which may cause parsing problems", line)
+ }
+ // Any other condition passes.
+ return nil
+ }
+ return scanner.Err()
+}
+
+// Validation functions
+func templatesDirExists(templatesPath string) error {
+ _, err := os.Stat(templatesPath)
+ if errors.Is(err, os.ErrNotExist) {
+ return errors.New("directory does not exist")
+ }
+ return nil
+}
+
+func validateTemplatesDir(templatesPath string) error {
+ fi, err := os.Stat(templatesPath)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return errors.New("not a directory")
+ }
+ return nil
+}
+
+func validateAllowedExtension(fileName string) error {
+ ext := filepath.Ext(fileName)
+ validExtensions := []string{".yaml", ".yml", ".tpl", ".txt"}
+
+ if slices.Contains(validExtensions, ext) {
+ return nil
+ }
+
+ return fmt.Errorf("file extension '%s' not valid. Valid extensions are .yaml, .yml, .tpl, or .txt", ext)
+}
+
+func validateYamlContent(err error) error {
+ if err != nil {
+ return fmt.Errorf("unable to parse YAML: %w", err)
+ }
+ return nil
+}
+
+// validateMetadataName uses the correct validation function for the object
+// Kind, or if not set, defaults to the standard definition of a subdomain in
+// DNS (RFC 1123), used by most resources.
+func validateMetadataName(obj *k8sYamlStruct) error {
+ fn := validateMetadataNameFunc(obj)
+ allErrs := field.ErrorList{}
+ for _, msg := range fn(obj.Metadata.Name, false) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), obj.Metadata.Name, msg))
+ }
+ if len(allErrs) > 0 {
+ return fmt.Errorf("object name does not conform to Kubernetes naming requirements: %q: %w", obj.Metadata.Name, allErrs.ToAggregate())
+ }
+ return nil
+}
+
+// validateMetadataNameFunc will return a name validation function for the
+// object kind, if defined below.
+//
+// Rules should match those set in the various api validations:
+// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/core/validation/validation.go#L205-L274
+// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/apps/validation/validation.go#L39
+// ...
+//
+// Implementing here to avoid importing k/k.
+//
+// If no mapping is defined, returns NameIsDNSSubdomain. This is used by object
+// kinds that don't have special requirements, so is the most likely to work if
+// new kinds are added.
+func validateMetadataNameFunc(obj *k8sYamlStruct) validation.ValidateNameFunc {
+ switch strings.ToLower(obj.Kind) {
+ case "pod", "node", "secret", "endpoints", "resourcequota", // core
+ "controllerrevision", "daemonset", "deployment", "replicaset", "statefulset", // apps
+ "autoscaler", // autoscaler
+ "cronjob", "job", // batch
+ "lease", // coordination
+ "endpointslice", // discovery
+ "networkpolicy", "ingress", // networking
+ "podsecuritypolicy", // policy
+ "priorityclass", // scheduling
+ "podpreset", // settings
+ "storageclass", "volumeattachment", "csinode": // storage
+ return validation.NameIsDNSSubdomain
+ case "service":
+ return validation.NameIsDNS1035Label
+ case "namespace":
+ return validation.ValidateNamespaceName
+ case "serviceaccount":
+ return validation.ValidateServiceAccountName
+ case "certificatesigningrequest":
+ // No validation.
+ // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/certificates/validation/validation.go#L137-L140
+ return func(_ string, _ bool) []string { return nil }
+ case "role", "clusterrole", "rolebinding", "clusterrolebinding":
+ // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/rbac/validation/validation.go#L32-L34
+ return func(name string, _ bool) []string {
+ return apipath.IsValidPathSegmentName(name)
+ }
+ default:
+ return validation.NameIsDNSSubdomain
+ }
+}
+
+// validateMatchSelector ensures that template specs have a selector declared.
+// See https://github.com/helm/helm/issues/1990
+func validateMatchSelector(yamlStruct *k8sYamlStruct, manifest string) error {
+ switch yamlStruct.Kind {
+ case "Deployment", "ReplicaSet", "DaemonSet", "StatefulSet":
+ // verify that matchLabels or matchExpressions is present
+ if !strings.Contains(manifest, "matchLabels") && !strings.Contains(manifest, "matchExpressions") {
+ return fmt.Errorf("a %s must contain matchLabels or matchExpressions, and %q does not", yamlStruct.Kind, yamlStruct.Metadata.Name)
+ }
+ }
+ return nil
+}
+
+func validateListAnnotations(yamlStruct *k8sYamlStruct, manifest string) error {
+ if yamlStruct.Kind == "List" {
+ m := struct {
+ Items []struct {
+ Metadata struct {
+ Annotations map[string]string
+ }
+ }
+ }{}
+
+ if err := yaml.Unmarshal([]byte(manifest), &m); err != nil {
+ return validateYamlContent(err)
+ }
+
+ for _, i := range m.Items {
+ if _, ok := i.Metadata.Annotations["helm.sh/resource-policy"]; ok {
+ return errors.New("annotation 'helm.sh/resource-policy' within List objects are ignored")
+ }
+ }
+ }
+ return nil
+}
+
+func isYamlFileExtension(fileName string) bool {
+ ext := strings.ToLower(filepath.Ext(fileName))
+ return ext == ".yaml" || ext == ".yml"
+}
+
+// k8sYamlStruct stubs a Kubernetes YAML file.
+type k8sYamlStruct struct {
+ APIVersion string `json:"apiVersion"`
+ Kind string
+ Metadata k8sYamlMetadata
+}
+
+type k8sYamlMetadata struct {
+ Namespace string
+ Name string
+}
diff --git a/helm/internal/chart/v3/lint/rules/template_test.go b/helm/internal/chart/v3/lint/rules/template_test.go
new file mode 100644
index 000000000..0ffc92002
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/template_test.go
@@ -0,0 +1,467 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ chartutil "helm.sh/helm/v4/internal/chart/v3/util"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+const templateTestBasedir = "./testdata/albatross"
+
+func TestValidateAllowedExtension(t *testing.T) {
+ var failTest = []string{"/foo", "/test.toml"}
+ for _, test := range failTest {
+ err := validateAllowedExtension(test)
+ if err == nil || !strings.Contains(err.Error(), "Valid extensions are .yaml, .yml, .tpl, or .txt") {
+ t.Errorf("validateAllowedExtension('%s') to return \"Valid extensions are .yaml, .yml, .tpl, or .txt\", got no error", test)
+ }
+ }
+ var successTest = []string{"/foo.yaml", "foo.yaml", "foo.tpl", "/foo/bar/baz.yaml", "NOTES.txt"}
+ for _, test := range successTest {
+ err := validateAllowedExtension(test)
+ if err != nil {
+ t.Errorf("validateAllowedExtension('%s') to return no error but got \"%s\"", test, err.Error())
+ }
+ }
+}
+
+var values = map[string]interface{}{"nameOverride": "", "httpPort": 80}
+
+const namespace = "testNamespace"
+const strict = false
+
+func TestTemplateParsing(t *testing.T) {
+ linter := support.Linter{ChartDir: templateTestBasedir}
+ Templates(&linter, values, namespace, strict)
+ res := linter.Messages
+
+ if len(res) != 1 {
+ t.Fatalf("Expected one error, got %d, %v", len(res), res)
+ }
+
+ if !strings.Contains(res[0].Err.Error(), "deliberateSyntaxError") {
+ t.Errorf("Unexpected error: %s", res[0])
+ }
+}
+
+var wrongTemplatePath = filepath.Join(templateTestBasedir, "templates", "fail.yaml")
+var ignoredTemplatePath = filepath.Join(templateTestBasedir, "fail.yaml.ignored")
+
+// Test a template with all the existing features:
+// namespaces, partial templates
+func TestTemplateIntegrationHappyPath(t *testing.T) {
+ // Rename file so it gets ignored by the linter
+ os.Rename(wrongTemplatePath, ignoredTemplatePath)
+ defer os.Rename(ignoredTemplatePath, wrongTemplatePath)
+
+ linter := support.Linter{ChartDir: templateTestBasedir}
+ Templates(&linter, values, namespace, strict)
+ res := linter.Messages
+
+ if len(res) != 0 {
+ t.Fatalf("Expected no error, got %d, %v", len(res), res)
+ }
+}
+
+func TestMultiTemplateFail(t *testing.T) {
+ linter := support.Linter{ChartDir: "./testdata/multi-template-fail"}
+ Templates(&linter, values, namespace, strict)
+ res := linter.Messages
+
+ if len(res) != 1 {
+ t.Fatalf("Expected 1 error, got %d, %v", len(res), res)
+ }
+
+ if !strings.Contains(res[0].Err.Error(), "object name does not conform to Kubernetes naming requirements") {
+ t.Errorf("Unexpected error: %s", res[0].Err)
+ }
+}
+
+func TestValidateMetadataName(t *testing.T) {
+ tests := []struct {
+ obj *k8sYamlStruct
+ wantErr bool
+ }{
+ // Most kinds use IsDNS1123Subdomain.
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: ""}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one-two"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "-two"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one_two"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "a..b"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "operator:sa"}}, true},
+
+ // Service uses IsDNS1035Label.
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "123baz"}}, true},
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true},
+
+ // Namespace uses IsDNS1123Label.
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo-bar"}}, false},
+
+ // CertificateSigningRequest has no validation.
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: ""}}, false},
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, false},
+
+ // RBAC uses path validation.
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true},
+ {&k8sYamlStruct{Kind: "RoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+
+ // Unknown Kind
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: ""}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one-two"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "-two"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one_two"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "a..b"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+
+ // No kind
+ {&k8sYamlStruct{Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+ }
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("%s/%s", tt.obj.Kind, tt.obj.Metadata.Name), func(t *testing.T) {
+ if err := validateMetadataName(tt.obj); (err != nil) != tt.wantErr {
+ t.Errorf("validateMetadataName() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func TestDeprecatedAPIFails(t *testing.T) {
+ modTime := time.Now()
+ mychart := chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: "v2",
+ Name: "failapi",
+ Version: "0.1.0",
+ Icon: "satisfy-the-linting-gods.gif",
+ },
+ Templates: []*common.File{
+ {
+ Name: "templates/baddeployment.yaml",
+ ModTime: modTime,
+ Data: []byte("apiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: baddep\nspec: {selector: {matchLabels: {foo: bar}}}"),
+ },
+ {
+ Name: "templates/goodsecret.yaml",
+ ModTime: modTime,
+ Data: []byte("apiVersion: v1\nkind: Secret\nmetadata:\n name: goodsecret"),
+ },
+ },
+ }
+ tmpdir := t.TempDir()
+
+ if err := chartutil.SaveDir(&mychart, tmpdir); err != nil {
+ t.Fatal(err)
+ }
+
+ linter := support.Linter{ChartDir: filepath.Join(tmpdir, mychart.Name())}
+ Templates(&linter, values, namespace, strict)
+ if l := len(linter.Messages); l != 1 {
+ for i, msg := range linter.Messages {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ t.Fatalf("Expected 1 lint error, got %d", l)
+ }
+
+ err := linter.Messages[0].Err.(deprecatedAPIError)
+ if err.Deprecated != "apps/v1beta1 Deployment" {
+ t.Errorf("Surprised to learn that %q is deprecated", err.Deprecated)
+ }
+}
+
+const manifest = `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: foo
+data:
+ myval1: {{default "val" .Values.mymap.key1 }}
+ myval2: {{default "val" .Values.mymap.key2 }}
+`
+
+// TestStrictTemplateParsingMapError is a regression test.
+//
+// The template engine should not produce an error when a map in values.yaml does
+// not contain all possible keys.
+//
+// See https://github.com/helm/helm/issues/7483
+func TestStrictTemplateParsingMapError(t *testing.T) {
+
+ ch := chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "regression7483",
+ APIVersion: "v2",
+ Version: "0.1.0",
+ },
+ Values: map[string]interface{}{
+ "mymap": map[string]string{
+ "key1": "val1",
+ },
+ },
+ Templates: []*common.File{
+ {
+ Name: "templates/configmap.yaml",
+ ModTime: time.Now(),
+ Data: []byte(manifest),
+ },
+ },
+ }
+ dir := t.TempDir()
+ if err := chartutil.SaveDir(&ch, dir); err != nil {
+ t.Fatal(err)
+ }
+ linter := &support.Linter{
+ ChartDir: filepath.Join(dir, ch.Metadata.Name),
+ }
+ Templates(linter, ch.Values, namespace, strict)
+ if len(linter.Messages) != 0 {
+ t.Errorf("expected zero messages, got %d", len(linter.Messages))
+ for i, msg := range linter.Messages {
+ t.Logf("Message %d: %q", i, msg)
+ }
+ }
+}
+
+func TestValidateMatchSelector(t *testing.T) {
+ md := &k8sYamlStruct{
+ APIVersion: "apps/v1",
+ Kind: "Deployment",
+ Metadata: k8sYamlMetadata{
+ Name: "mydeployment",
+ },
+ }
+ manifest := `
+ apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.14.2
+ `
+ if err := validateMatchSelector(md, manifest); err != nil {
+ t.Error(err)
+ }
+ manifest = `
+ apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 3
+ selector:
+ matchExpressions:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.14.2
+ `
+ if err := validateMatchSelector(md, manifest); err != nil {
+ t.Error(err)
+ }
+ manifest = `
+ apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 3
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.14.2
+ `
+ if err := validateMatchSelector(md, manifest); err == nil {
+ t.Error("expected Deployment with no selector to fail")
+ }
+}
+
+func TestValidateTopIndentLevel(t *testing.T) {
+ for doc, shouldFail := range map[string]bool{
+ // Should not fail
+ "\n\n\n\t\n \t\n": false,
+ "apiVersion:foo\n bar:baz": false,
+ "\n\n\napiVersion:foo\n\n\n": false,
+ // Should fail
+ " apiVersion:foo": true,
+ "\n\n apiVersion:foo\n\n": true,
+ } {
+ if err := validateTopIndentLevel(doc); (err == nil) == shouldFail {
+ t.Errorf("Expected %t for %q", shouldFail, doc)
+ }
+ }
+
+}
+
+// TestEmptyWithCommentsManifests checks the lint is not failing against empty manifests that contains only comments
+// See https://github.com/helm/helm/issues/8621
+func TestEmptyWithCommentsManifests(t *testing.T) {
+ mychart := chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: "v2",
+ Name: "emptymanifests",
+ Version: "0.1.0",
+ Icon: "satisfy-the-linting-gods.gif",
+ },
+ Templates: []*common.File{
+ {
+ Name: "templates/empty-with-comments.yaml",
+ ModTime: time.Now(),
+ Data: []byte("#@formatter:off\n"),
+ },
+ },
+ }
+ tmpdir := t.TempDir()
+
+ if err := chartutil.SaveDir(&mychart, tmpdir); err != nil {
+ t.Fatal(err)
+ }
+
+ linter := support.Linter{ChartDir: filepath.Join(tmpdir, mychart.Name())}
+ Templates(&linter, values, namespace, strict)
+ if l := len(linter.Messages); l > 0 {
+ for i, msg := range linter.Messages {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ t.Fatalf("Expected 0 lint errors, got %d", l)
+ }
+}
+func TestValidateListAnnotations(t *testing.T) {
+ md := &k8sYamlStruct{
+ APIVersion: "v1",
+ Kind: "List",
+ Metadata: k8sYamlMetadata{
+ Name: "list",
+ },
+ }
+ manifest := `
+apiVersion: v1
+kind: List
+items:
+ - apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ annotations:
+ helm.sh/resource-policy: keep
+`
+
+ if err := validateListAnnotations(md, manifest); err == nil {
+ t.Fatal("expected list with nested keep annotations to fail")
+ }
+
+ manifest = `
+apiVersion: v1
+kind: List
+metadata:
+ annotations:
+ helm.sh/resource-policy: keep
+items:
+ - apiVersion: v1
+ kind: ConfigMap
+`
+
+ if err := validateListAnnotations(md, manifest); err != nil {
+ t.Fatalf("List objects keep annotations should pass. got: %s", err)
+ }
+}
+
+func TestIsYamlFileExtension(t *testing.T) {
+ tests := []struct {
+ filename string
+ expected bool
+ }{
+ {"test.yaml", true},
+ {"test.yml", true},
+ {"test.txt", false},
+ {"test", false},
+ }
+
+ for _, test := range tests {
+ result := isYamlFileExtension(test.filename)
+ if result != test.expected {
+ t.Errorf("isYamlFileExtension(%s) = %v; want %v", test.filename, result, test.expected)
+ }
+ }
+
+}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/albatross/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/albatross/Chart.yaml
new file mode 100644
index 000000000..5e1ed515c
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/albatross/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: albatross
+description: testing chart
+version: 199.44.12345-Alpha.1+cafe009
+icon: http://riverrun.io
diff --git a/helm/internal/chart/v3/lint/rules/testdata/albatross/templates/_helpers.tpl b/helm/internal/chart/v3/lint/rules/testdata/albatross/templates/_helpers.tpl
new file mode 100644
index 000000000..24f76db73
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/albatross/templates/_helpers.tpl
@@ -0,0 +1,16 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{define "name"}}{{default "nginx" .Values.nameOverride | trunc 63 | trimSuffix "-" }}{{end}}
+
+{{/*
+Create a default fully qualified app name.
+
+We truncate at 63 chars because some Kubernetes name fields are limited to this
+(by the DNS naming spec).
+*/}}
+{{define "fullname"}}
+{{- $name := default "nginx" .Values.nameOverride -}}
+{{printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{end}}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/albatross/templates/fail.yaml b/helm/internal/chart/v3/lint/rules/testdata/albatross/templates/fail.yaml
new file mode 100644
index 000000000..a11e0e90e
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/albatross/templates/fail.yaml
@@ -0,0 +1 @@
+{{ deliberateSyntaxError }}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/albatross/templates/svc.yaml b/helm/internal/chart/v3/lint/rules/testdata/albatross/templates/svc.yaml
new file mode 100644
index 000000000..16bb27d55
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/albatross/templates/svc.yaml
@@ -0,0 +1,19 @@
+# This is a service gateway to the replica set created by the deployment.
+# Take a look at the deployment.yaml for general notes about this chart.
+apiVersion: v1
+kind: Service
+metadata:
+ name: "{{ .Values.name }}"
+ labels:
+ app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+ kubeVersion: {{ .Capabilities.KubeVersion.Major }}
+spec:
+ ports:
+ - port: {{default 80 .Values.httpPort | quote}}
+ targetPort: 80
+ protocol: TCP
+ name: http
+ selector:
+ app.kubernetes.io/name: {{template "fullname" .}}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/albatross/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/albatross/values.yaml
new file mode 100644
index 000000000..74cc6a0dc
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/albatross/values.yaml
@@ -0,0 +1 @@
+name: "mariner"
diff --git a/helm/internal/chart/v3/lint/rules/testdata/anotherbadchartfile/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/anotherbadchartfile/Chart.yaml
new file mode 100644
index 000000000..8a598473b
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/anotherbadchartfile/Chart.yaml
@@ -0,0 +1,15 @@
+name: "some-chart"
+apiVersion: v3
+description: A Helm chart for Kubernetes
+version: 72445e2
+home: ""
+type: application
+appVersion: 72225e2
+icon: "https://some-url.com/icon.jpeg"
+dependencies:
+ - name: mariadb
+ version: 5.x.x
+ repository: https://charts.helm.sh/stable/
+ condition: mariadb.enabled
+ tags:
+ - database
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badchartfile/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/badchartfile/Chart.yaml
new file mode 100644
index 000000000..3564ede3e
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/badchartfile/Chart.yaml
@@ -0,0 +1,11 @@
+description: A Helm chart for Kubernetes
+version: 0.0.0.0
+home: ""
+type: application
+dependencies:
+- name: mariadb
+ version: 5.x.x
+ repository: https://charts.helm.sh/stable/
+ condition: mariadb.enabled
+ tags:
+ - database
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badchartfile/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/badchartfile/values.yaml
new file mode 100644
index 000000000..9f367033b
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/badchartfile/values.yaml
@@ -0,0 +1 @@
+# Default values for badchartfile.
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badchartname/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/badchartname/Chart.yaml
new file mode 100644
index 000000000..41f452354
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/badchartname/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+version: 0.1.0
+name: "../badchartname"
+type: application
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badchartname/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/badchartname/values.yaml
new file mode 100644
index 000000000..9f367033b
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/badchartname/values.yaml
@@ -0,0 +1 @@
+# Default values for badchartfile.
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/Chart.yaml
new file mode 100644
index 000000000..3bf007393
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+version: 0.1.0
+name: badcrdfile
+type: application
+icon: http://riverrun.io
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml b/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml
new file mode 100644
index 000000000..468916053
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml
@@ -0,0 +1,2 @@
+apiVersion: bad.k8s.io/v1beta1
+kind: CustomResourceDefinition
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml b/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml
new file mode 100644
index 000000000..523b97f85
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml
@@ -0,0 +1,2 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: NotACustomResourceDefinition
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/templates/.gitkeep b/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/templates/.gitkeep
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/values.yaml
new file mode 100644
index 000000000..2fffc7715
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/badcrdfile/values.yaml
@@ -0,0 +1 @@
+# Default values for badcrdfile.
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badvaluesfile/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/badvaluesfile/Chart.yaml
new file mode 100644
index 000000000..aace27e21
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/badvaluesfile/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+name: badvaluesfile
+description: A Helm chart for Kubernetes
+version: 0.0.1
+home: ""
+icon: http://riverrun.io
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml b/helm/internal/chart/v3/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml
new file mode 100644
index 000000000..6c2ceb8db
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{.name | default "foo" | title}}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/badvaluesfile/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/badvaluesfile/values.yaml
new file mode 100644
index 000000000..b5a10271c
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/badvaluesfile/values.yaml
@@ -0,0 +1,2 @@
+# Invalid value for badvaluesfile for testing lint fails with invalid yaml format
+name= "value"
diff --git a/helm/internal/chart/v3/lint/rules/testdata/goodone/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/goodone/Chart.yaml
new file mode 100644
index 000000000..bf8f5e309
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/goodone/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: goodone
+description: good testing chart
+version: 199.44.12345-Alpha.1+cafe009
+icon: http://riverrun.io
diff --git a/helm/internal/chart/v3/lint/rules/testdata/goodone/crds/test-crd.yaml b/helm/internal/chart/v3/lint/rules/testdata/goodone/crds/test-crd.yaml
new file mode 100644
index 000000000..1d7350f1d
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/goodone/crds/test-crd.yaml
@@ -0,0 +1,19 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: tests.test.io
+spec:
+ group: test.io
+ names:
+ kind: Test
+ listKind: TestList
+ plural: tests
+ singular: test
+ scope: Namespaced
+ versions:
+ - name : v1alpha2
+ served: true
+ storage: true
+ - name : v1alpha1
+ served: true
+ storage: false
diff --git a/helm/internal/chart/v3/lint/rules/testdata/goodone/templates/goodone.yaml b/helm/internal/chart/v3/lint/rules/testdata/goodone/templates/goodone.yaml
new file mode 100644
index 000000000..cd46f62c7
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/goodone/templates/goodone.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{ .Values.name | default "foo" | lower }}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/goodone/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/goodone/values.yaml
new file mode 100644
index 000000000..92c3d9bb9
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/goodone/values.yaml
@@ -0,0 +1 @@
+name: "goodone-here"
diff --git a/helm/internal/chart/v3/lint/rules/testdata/invalidchartfile/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/invalidchartfile/Chart.yaml
new file mode 100644
index 000000000..0fd58d1d4
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/invalidchartfile/Chart.yaml
@@ -0,0 +1,6 @@
+name: some-chart
+apiVersion: v2
+apiVersion: v1
+description: A Helm chart for Kubernetes
+version: 1.3.0
+icon: http://example.com
diff --git a/helm/internal/chart/v3/lint/rules/testdata/invalidchartfile/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/invalidchartfile/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/Chart.yaml
new file mode 100644
index 000000000..0f6d1ee98
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+version: 0.1.0
+name: invalidcrdsdir
+type: application
+icon: http://riverrun.io
diff --git a/helm/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/crds b/helm/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/crds
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/values.yaml
new file mode 100644
index 000000000..6b1611a64
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/invalidcrdsdir/values.yaml
@@ -0,0 +1 @@
+# Default values for invalidcrdsdir.
diff --git a/helm/internal/chart/v3/lint/rules/testdata/malformed-template/.helmignore b/helm/internal/chart/v3/lint/rules/testdata/malformed-template/.helmignore
new file mode 100644
index 000000000..0e8a0eb36
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/malformed-template/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/helm/internal/chart/v3/lint/rules/testdata/malformed-template/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/malformed-template/Chart.yaml
new file mode 100644
index 000000000..d46b98cb5
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/malformed-template/Chart.yaml
@@ -0,0 +1,25 @@
+apiVersion: v3
+name: test
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.16.0"
+icon: https://riverrun.io
\ No newline at end of file
diff --git a/helm/internal/chart/v3/lint/rules/testdata/malformed-template/templates/bad.yaml b/helm/internal/chart/v3/lint/rules/testdata/malformed-template/templates/bad.yaml
new file mode 100644
index 000000000..213198fda
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/malformed-template/templates/bad.yaml
@@ -0,0 +1 @@
+{ {- $relname := .Release.Name -}}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/malformed-template/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/malformed-template/values.yaml
new file mode 100644
index 000000000..1cc3182ea
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/malformed-template/values.yaml
@@ -0,0 +1,82 @@
+# Default values for test.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: nginx
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+podAnnotations: {}
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 80
+
+ingress:
+ enabled: false
+ className: ""
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+autoscaling:
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 100
+ targetCPUUtilizationPercentage: 80
+ # targetMemoryUtilizationPercentage: 80
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/multi-template-fail/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/multi-template-fail/Chart.yaml
new file mode 100644
index 000000000..bfb580bea
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/multi-template-fail/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v3
+name: multi-template-fail
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application and it is recommended to use it with quotes.
+appVersion: "1.16.0"
diff --git a/helm/internal/chart/v3/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml b/helm/internal/chart/v3/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml
new file mode 100644
index 000000000..835be07be
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: game-config
+data:
+ game.properties: cheat
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: -this:name-is-not_valid$
+data:
+ game.properties: empty
diff --git a/helm/internal/chart/v3/lint/rules/testdata/v3-fail/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/Chart.yaml
new file mode 100644
index 000000000..2a29c33fa
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v3
+name: v3-fail
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application and it is recommended to use it with quotes.
+appVersion: "1.16.0"
diff --git a/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/_helpers.tpl b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/_helpers.tpl
new file mode 100644
index 000000000..0b89e723b
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "v3-fail.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "v3-fail.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "v3-fail.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "v3-fail.labels" -}}
+helm.sh/chart: {{ include "v3-fail.chart" . }}
+{{ include "v3-fail.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "v3-fail.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "v3-fail.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "v3-fail.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "v3-fail.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/deployment.yaml b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/deployment.yaml
new file mode 100644
index 000000000..6d651ab8e
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/deployment.yaml
@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "v3-fail.fullname" . }}
+ labels:
+ nope: {{ .Release.Time }}
+ {{- include "v3-fail.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "v3-fail.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "v3-fail.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "v3-fail.serviceAccountName" . }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ containers:
+ - name: {{ .Chart.Name }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 12 }}
+ image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: http
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/ingress.yaml b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/ingress.yaml
new file mode 100644
index 000000000..4790650d0
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/ingress.yaml
@@ -0,0 +1,62 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "v3-fail.fullname" . -}}
+{{- $svcPort := .Values.service.port -}}
+{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
+ {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
+ {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
+ {{- end }}
+{{- end }}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- else -}}
+apiVersion: extensions/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ {{- include "v3-fail.labels" . | nindent 4 }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ "helm.sh/hook": crd-install
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
+ ingressClassName: {{ .Values.ingress.className }}
+ {{- end }}
+ {{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ .host | quote }}
+ http:
+ paths:
+ {{- range .paths }}
+ - path: {{ .path }}
+ {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
+ pathType: {{ .pathType }}
+ {{- end }}
+ backend:
+ {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
+ service:
+ name: {{ $fullName }}
+ port:
+ number: {{ $svcPort }}
+ {{- else }}
+ serviceName: {{ $fullName }}
+ servicePort: {{ $svcPort }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/service.yaml b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/service.yaml
new file mode 100644
index 000000000..79a0f40b0
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/templates/service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "v3-fail.fullname" . }}
+ annotations:
+ helm.sh/hook: crd-install
+ labels:
+ {{- include "v3-fail.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "v3-fail.selectorLabels" . | nindent 4 }}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/v3-fail/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/values.yaml
new file mode 100644
index 000000000..01d99b4e6
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/v3-fail/values.yaml
@@ -0,0 +1,66 @@
+# Default values for v3-fail.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: nginx
+ pullPolicy: IfNotPresent
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 80
+
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths: []
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/withsubchart/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/withsubchart/Chart.yaml
new file mode 100644
index 000000000..fa15eabaf
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/withsubchart/Chart.yaml
@@ -0,0 +1,16 @@
+apiVersion: v3
+name: withsubchart
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+appVersion: "1.16.0"
+icon: http://riverrun.io
+
+dependencies:
+ - name: subchart
+ version: 0.1.16
+ repository: "file://../subchart"
+ import-values:
+ - child: subchart
+ parent: subchart
+
diff --git a/helm/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml b/helm/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml
new file mode 100644
index 000000000..35b13e70d
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+name: subchart
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+appVersion: "1.16.0"
diff --git a/helm/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml b/helm/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml
new file mode 100644
index 000000000..6cb6cc2af
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{ .Values.subchart.name | lower }}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/values.yaml
new file mode 100644
index 000000000..422a359d5
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/withsubchart/charts/subchart/values.yaml
@@ -0,0 +1,2 @@
+subchart:
+ name: subchart
\ No newline at end of file
diff --git a/helm/internal/chart/v3/lint/rules/testdata/withsubchart/templates/mainchart.yaml b/helm/internal/chart/v3/lint/rules/testdata/withsubchart/templates/mainchart.yaml
new file mode 100644
index 000000000..6cb6cc2af
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/testdata/withsubchart/templates/mainchart.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{ .Values.subchart.name | lower }}
diff --git a/helm/internal/chart/v3/lint/rules/testdata/withsubchart/values.yaml b/helm/internal/chart/v3/lint/rules/testdata/withsubchart/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/lint/rules/values.go b/helm/internal/chart/v3/lint/rules/values.go
new file mode 100644
index 000000000..0af9765dd
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/values.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "helm.sh/helm/v4/internal/chart/v3/lint/support"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+)
+
+// ValuesWithOverrides tests the values.yaml file.
+//
+// If a schema is present in the chart, values are tested against that. Otherwise,
+// they are only tested for well-formedness.
+//
+// If additional values are supplied, they are coalesced into the values in values.yaml.
+func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]interface{}, skipSchemaValidation bool) {
+ file := "values.yaml"
+ vf := filepath.Join(linter.ChartDir, file)
+ fileExists := linter.RunLinterRule(support.InfoSev, file, validateValuesFileExistence(vf))
+
+ if !fileExists {
+ return
+ }
+
+ linter.RunLinterRule(support.ErrorSev, file, validateValuesFile(vf, valueOverrides, skipSchemaValidation))
+}
+
+func validateValuesFileExistence(valuesPath string) error {
+ _, err := os.Stat(valuesPath)
+ if err != nil {
+ return fmt.Errorf("file does not exist")
+ }
+ return nil
+}
+
+func validateValuesFile(valuesPath string, overrides map[string]interface{}, skipSchemaValidation bool) error {
+ values, err := common.ReadValuesFile(valuesPath)
+ if err != nil {
+ return fmt.Errorf("unable to parse YAML: %w", err)
+ }
+
+ // Helm 3.0.0 carried over the values linting from Helm 2.x, which only tests the top
+ // level values against the top-level expectations. Subchart values are not linted.
+ // We could change that. For now, though, we retain that strategy, and thus can
+ // coalesce tables (like reuse-values does) instead of doing the full chart
+ // CoalesceValues
+ coalescedValues := util.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides)
+ coalescedValues = util.CoalesceTables(coalescedValues, values)
+
+ ext := filepath.Ext(valuesPath)
+ schemaPath := valuesPath[:len(valuesPath)-len(ext)] + ".schema.json"
+ schema, err := os.ReadFile(schemaPath)
+ if len(schema) == 0 {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if !skipSchemaValidation {
+ return util.ValidateAgainstSingleSchema(coalescedValues, schema)
+ }
+
+ return nil
+}
diff --git a/helm/internal/chart/v3/lint/rules/values_test.go b/helm/internal/chart/v3/lint/rules/values_test.go
new file mode 100644
index 000000000..288b77436
--- /dev/null
+++ b/helm/internal/chart/v3/lint/rules/values_test.go
@@ -0,0 +1,183 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+)
+
+var nonExistingValuesFilePath = filepath.Join("/fake/dir", "values.yaml")
+
+const testSchema = `
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "helm values test schema",
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "username",
+ "password"
+ ],
+ "properties": {
+ "username": {
+ "description": "Your username",
+ "type": "string"
+ },
+ "password": {
+ "description": "Your password",
+ "type": "string"
+ }
+ }
+}
+`
+
+func TestValidateValuesYamlNotDirectory(t *testing.T) {
+ _ = os.Mkdir(nonExistingValuesFilePath, os.ModePerm)
+ defer os.Remove(nonExistingValuesFilePath)
+
+ err := validateValuesFileExistence(nonExistingValuesFilePath)
+ if err == nil {
+ t.Errorf("validateValuesFileExistence to return a linter error, got no error")
+ }
+}
+
+func TestValidateValuesFileWellFormed(t *testing.T) {
+ badYaml := `
+ not:well[]{}formed
+ `
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(badYaml))
+ valfile := filepath.Join(tmpdir, "values.yaml")
+ if err := validateValuesFile(valfile, map[string]interface{}{}, false); err == nil {
+ t.Fatal("expected values file to fail parsing")
+ }
+}
+
+func TestValidateValuesFileSchema(t *testing.T) {
+ yaml := "username: admin\npassword: swordfish"
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+ if err := validateValuesFile(valfile, map[string]interface{}{}, false); err != nil {
+ t.Fatalf("Failed validation with %s", err)
+ }
+}
+
+func TestValidateValuesFileSchemaFailure(t *testing.T) {
+ // 1234 is an int, not a string. This should fail.
+ yaml := "username: 1234\npassword: swordfish"
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+
+ err := validateValuesFile(valfile, map[string]interface{}{}, false)
+ if err == nil {
+ t.Fatal("expected values file to fail parsing")
+ }
+
+ assert.Contains(t, err.Error(), "- at '/username': got number, want string")
+}
+
+func TestValidateValuesFileSchemaFailureButWithSkipSchemaValidation(t *testing.T) {
+ // 1234 is an int, not a string. This should fail normally but pass with skipSchemaValidation.
+ yaml := "username: 1234\npassword: swordfish"
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+
+ err := validateValuesFile(valfile, map[string]interface{}{}, true)
+ if err != nil {
+ t.Fatal("expected values file to pass parsing because of skipSchemaValidation")
+ }
+}
+
+func TestValidateValuesFileSchemaOverrides(t *testing.T) {
+ yaml := "username: admin"
+ overrides := map[string]interface{}{
+ "password": "swordfish",
+ }
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+ if err := validateValuesFile(valfile, overrides, false); err != nil {
+ t.Fatalf("Failed validation with %s", err)
+ }
+}
+
+func TestValidateValuesFile(t *testing.T) {
+ tests := []struct {
+ name string
+ yaml string
+ overrides map[string]interface{}
+ errorMessage string
+ }{
+ {
+ name: "value added",
+ yaml: "username: admin",
+ overrides: map[string]interface{}{"password": "swordfish"},
+ },
+ {
+ name: "value not overridden",
+ yaml: "username: admin\npassword:",
+ overrides: map[string]interface{}{"username": "anotherUser"},
+ errorMessage: "- at '/password': got null, want string",
+ },
+ {
+ name: "value overridden",
+ yaml: "username: admin\npassword:",
+ overrides: map[string]interface{}{"username": "anotherUser", "password": "swordfish"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(tt.yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+
+ err := validateValuesFile(valfile, tt.overrides, false)
+
+ switch {
+ case err != nil && tt.errorMessage == "":
+ t.Errorf("Failed validation with %s", err)
+ case err == nil && tt.errorMessage != "":
+ t.Error("expected values file to fail parsing")
+ case err != nil && tt.errorMessage != "":
+ assert.Contains(t, err.Error(), tt.errorMessage, "Failed with unexpected error")
+ }
+ })
+ }
+}
+
+func createTestingSchema(t *testing.T, dir string) string {
+ t.Helper()
+ schemafile := filepath.Join(dir, "values.schema.json")
+ if err := os.WriteFile(schemafile, []byte(testSchema), 0700); err != nil {
+ t.Fatalf("Failed to write schema to tmpdir: %s", err)
+ }
+ return schemafile
+}
diff --git a/helm/internal/chart/v3/lint/support/doc.go b/helm/internal/chart/v3/lint/support/doc.go
new file mode 100644
index 000000000..2d54a9b7d
--- /dev/null
+++ b/helm/internal/chart/v3/lint/support/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package support contains tools for linting charts.
+
+Linting is the process of testing charts for errors or warnings regarding
+formatting, compilation, or standards compliance.
+*/
+package support // import "helm.sh/helm/v4/internal/chart/v3/lint/support"
diff --git a/helm/internal/chart/v3/lint/support/message.go b/helm/internal/chart/v3/lint/support/message.go
new file mode 100644
index 000000000..5efbc7a61
--- /dev/null
+++ b/helm/internal/chart/v3/lint/support/message.go
@@ -0,0 +1,76 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package support
+
+import "fmt"
+
+// Severity indicates the severity of a Message.
+const (
+ // UnknownSev indicates that the severity of the error is unknown, and should not stop processing.
+ UnknownSev = iota
+ // InfoSev indicates information, for example missing values.yaml file
+ InfoSev
+ // WarningSev indicates that something does not meet code standards, but will likely function.
+ WarningSev
+ // ErrorSev indicates that something will not likely function.
+ ErrorSev
+)
+
+// sev matches the *Sev states.
+var sev = []string{"UNKNOWN", "INFO", "WARNING", "ERROR"}
+
+// Linter encapsulates a linting run of a particular chart.
+type Linter struct {
+ Messages []Message
+ // The highest severity of all the failing lint rules
+ HighestSeverity int
+ ChartDir string
+}
+
+// Message describes an error encountered while linting.
+type Message struct {
+ // Severity is one of the *Sev constants
+ Severity int
+ Path string
+ Err error
+}
+
+func (m Message) Error() string {
+ return fmt.Sprintf("[%s] %s: %s", sev[m.Severity], m.Path, m.Err.Error())
+}
+
+// NewMessage creates a new Message struct
+func NewMessage(severity int, path string, err error) Message {
+ return Message{Severity: severity, Path: path, Err: err}
+}
+
+// RunLinterRule returns true if the validation passed
+func (l *Linter) RunLinterRule(severity int, path string, err error) bool {
+ // severity is out of bound
+ if severity < 0 || severity >= len(sev) {
+ return false
+ }
+
+ if err != nil {
+ l.Messages = append(l.Messages, NewMessage(severity, path, err))
+
+ if severity > l.HighestSeverity {
+ l.HighestSeverity = severity
+ }
+ }
+ return err == nil
+}
diff --git a/helm/internal/chart/v3/lint/support/message_test.go b/helm/internal/chart/v3/lint/support/message_test.go
new file mode 100644
index 000000000..ce5b5e42e
--- /dev/null
+++ b/helm/internal/chart/v3/lint/support/message_test.go
@@ -0,0 +1,79 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package support
+
+import (
+ "errors"
+ "testing"
+)
+
+var errLint = errors.New("lint failed")
+
+func TestRunLinterRule(t *testing.T) {
+ var tests = []struct {
+ Severity int
+ LintError error
+ ExpectedMessages int
+ ExpectedReturn bool
+ ExpectedHighestSeverity int
+ }{
+ {InfoSev, errLint, 1, false, InfoSev},
+ {WarningSev, errLint, 2, false, WarningSev},
+ {ErrorSev, errLint, 3, false, ErrorSev},
+ // No error so it returns true
+ {ErrorSev, nil, 3, true, ErrorSev},
+ // Retains highest severity
+ {InfoSev, errLint, 4, false, ErrorSev},
+ // Invalid severity values
+ {4, errLint, 4, false, ErrorSev},
+ {22, errLint, 4, false, ErrorSev},
+ {-1, errLint, 4, false, ErrorSev},
+ }
+
+ linter := Linter{}
+ for _, test := range tests {
+ isValid := linter.RunLinterRule(test.Severity, "chart", test.LintError)
+ if len(linter.Messages) != test.ExpectedMessages {
+ t.Errorf("RunLinterRule(%d, \"chart\", %v), linter.Messages should now have %d message, we got %d", test.Severity, test.LintError, test.ExpectedMessages, len(linter.Messages))
+ }
+
+ if linter.HighestSeverity != test.ExpectedHighestSeverity {
+ t.Errorf("RunLinterRule(%d, \"chart\", %v), linter.HighestSeverity should be %d, we got %d", test.Severity, test.LintError, test.ExpectedHighestSeverity, linter.HighestSeverity)
+ }
+
+ if isValid != test.ExpectedReturn {
+ t.Errorf("RunLinterRule(%d, \"chart\", %v), should have returned %t but returned %t", test.Severity, test.LintError, test.ExpectedReturn, isValid)
+ }
+ }
+}
+
+func TestMessage(t *testing.T) {
+ m := Message{ErrorSev, "Chart.yaml", errors.New("Foo")}
+ if m.Error() != "[ERROR] Chart.yaml: Foo" {
+ t.Errorf("Unexpected output: %s", m.Error())
+ }
+
+ m = Message{WarningSev, "templates/", errors.New("Bar")}
+ if m.Error() != "[WARNING] templates/: Bar" {
+ t.Errorf("Unexpected output: %s", m.Error())
+ }
+
+ m = Message{InfoSev, "templates/rc.yaml", errors.New("FooBar")}
+ if m.Error() != "[INFO] templates/rc.yaml: FooBar" {
+ t.Errorf("Unexpected output: %s", m.Error())
+ }
+}
diff --git a/helm/internal/chart/v3/loader/archive.go b/helm/internal/chart/v3/loader/archive.go
new file mode 100644
index 000000000..a9d4faf8f
--- /dev/null
+++ b/helm/internal/chart/v3/loader/archive.go
@@ -0,0 +1,74 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/pkg/chart/loader/archive"
+)
+
+// FileLoader loads a chart from a file
+type FileLoader string
+
+// Load loads a chart
+func (l FileLoader) Load() (*chart.Chart, error) {
+ return LoadFile(string(l))
+}
+
+// LoadFile loads from an archive file.
+func LoadFile(name string) (*chart.Chart, error) {
+ if fi, err := os.Stat(name); err != nil {
+ return nil, err
+ } else if fi.IsDir() {
+ return nil, errors.New("cannot load a directory")
+ }
+
+ raw, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer raw.Close()
+
+ err = archive.EnsureArchive(name, raw)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := LoadArchive(raw)
+ if err != nil {
+ if errors.Is(err, gzip.ErrHeader) {
+ return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %s)", name, err)
+ }
+ }
+ return c, err
+}
+
+// LoadArchive loads from a reader containing a compressed tar archive.
+func LoadArchive(in io.Reader) (*chart.Chart, error) {
+ files, err := archive.LoadArchiveFiles(in)
+ if err != nil {
+ return nil, err
+ }
+
+ return LoadFiles(files)
+}
diff --git a/helm/internal/chart/v3/loader/directory.go b/helm/internal/chart/v3/loader/directory.go
new file mode 100644
index 000000000..dfe3af3b2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/directory.go
@@ -0,0 +1,122 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/sympath"
+ "helm.sh/helm/v4/pkg/chart/loader/archive"
+ "helm.sh/helm/v4/pkg/ignore"
+)
+
+var utf8bom = []byte{0xEF, 0xBB, 0xBF}
+
+// DirLoader loads a chart from a directory
+type DirLoader string
+
+// Load loads the chart
+func (l DirLoader) Load() (*chart.Chart, error) {
+ return LoadDir(string(l))
+}
+
+// LoadDir loads from a directory.
+//
+// This loads charts only from directories.
+func LoadDir(dir string) (*chart.Chart, error) {
+ topdir, err := filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ // Just used for errors.
+ c := &chart.Chart{}
+
+ rules := ignore.Empty()
+ ifile := filepath.Join(topdir, ignore.HelmIgnore)
+ if _, err := os.Stat(ifile); err == nil {
+ r, err := ignore.ParseFile(ifile)
+ if err != nil {
+ return c, err
+ }
+ rules = r
+ }
+ rules.AddDefaults()
+
+ files := []*archive.BufferedFile{}
+ topdir += string(filepath.Separator)
+
+ walk := func(name string, fi os.FileInfo, err error) error {
+ n := strings.TrimPrefix(name, topdir)
+ if n == "" {
+ // No need to process top level. Avoid bug with helmignore .* matching
+ // empty names. See issue 1779.
+ return nil
+ }
+
+ // Normalize to / since it will also work on Windows
+ n = filepath.ToSlash(n)
+
+ if err != nil {
+ return err
+ }
+ if fi.IsDir() {
+ // Directory-based ignore rules should involve skipping the entire
+ // contents of that directory.
+ if rules.Ignore(n, fi) {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ // If a .helmignore file matches, skip this file.
+ if rules.Ignore(n, fi) {
+ return nil
+ }
+
+ // Irregular files include devices, sockets, and other uses of files that
+ // are not regular files. In Go they have a file mode type bit set.
+ // See https://golang.org/pkg/os/#FileMode for examples.
+ if !fi.Mode().IsRegular() {
+ return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name)
+ }
+
+ if fi.Size() > archive.MaxDecompressedFileSize {
+ return fmt.Errorf("chart file %q is larger than the maximum file size %d", fi.Name(), archive.MaxDecompressedFileSize)
+ }
+
+ data, err := os.ReadFile(name)
+ if err != nil {
+ return fmt.Errorf("error reading %s: %w", n, err)
+ }
+
+ data = bytes.TrimPrefix(data, utf8bom)
+
+ files = append(files, &archive.BufferedFile{Name: n, ModTime: fi.ModTime(), Data: data})
+ return nil
+ }
+ if err = sympath.Walk(topdir, walk); err != nil {
+ return c, err
+ }
+
+ return LoadFiles(files)
+}
diff --git a/helm/internal/chart/v3/loader/load.go b/helm/internal/chart/v3/loader/load.go
new file mode 100644
index 000000000..373c4659f
--- /dev/null
+++ b/helm/internal/chart/v3/loader/load.go
@@ -0,0 +1,222 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "os"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ utilyaml "k8s.io/apimachinery/pkg/util/yaml"
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/loader/archive"
+)
+
+// ChartLoader loads a chart.
+type ChartLoader interface {
+ Load() (*chart.Chart, error)
+}
+
+// Loader returns a new ChartLoader appropriate for the given chart name
+func Loader(name string) (ChartLoader, error) {
+ fi, err := os.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ if fi.IsDir() {
+ return DirLoader(name), nil
+ }
+ return FileLoader(name), nil
+}
+
+// Load takes a string name, tries to resolve it to a file or directory, and then loads it.
+//
+// This is the preferred way to load a chart. It will discover the chart encoding
+// and hand off to the appropriate chart reader.
+//
+// If a .helmignore file is present, the directory loader will skip loading any files
+// matching it. But .helmignore is not evaluated when reading out of an archive.
+func Load(name string) (*chart.Chart, error) {
+ l, err := Loader(name)
+ if err != nil {
+ return nil, err
+ }
+ return l.Load()
+}
+
+// LoadFiles loads from in-memory files.
+func LoadFiles(files []*archive.BufferedFile) (*chart.Chart, error) {
+ c := new(chart.Chart)
+ subcharts := make(map[string][]*archive.BufferedFile)
+ var subChartsKeys []string
+
+ // do not rely on assumed ordering of files in the chart and crash
+ // if Chart.yaml was not coming early enough to initialize metadata
+ for _, f := range files {
+ c.Raw = append(c.Raw, &common.File{Name: f.Name, ModTime: f.ModTime, Data: f.Data})
+ if f.Name == "Chart.yaml" {
+ if c.Metadata == nil {
+ c.Metadata = new(chart.Metadata)
+ }
+ if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil {
+ return c, fmt.Errorf("cannot load Chart.yaml: %w", err)
+ }
+ // While the documentation says the APIVersion is required, in practice there
+ // are cases where that's not enforced. Since this package set is for v3 charts,
+ // when this function is used v3 is automatically added when not present.
+ if c.Metadata.APIVersion == "" {
+ c.Metadata.APIVersion = chart.APIVersionV3
+ }
+ c.ModTime = f.ModTime
+ }
+ }
+ for _, f := range files {
+ switch {
+ case f.Name == "Chart.yaml":
+ // already processed
+ continue
+ case f.Name == "Chart.lock":
+ c.Lock = new(chart.Lock)
+ if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil {
+ return c, fmt.Errorf("cannot load Chart.lock: %w", err)
+ }
+ case f.Name == "values.yaml":
+ values, err := LoadValues(bytes.NewReader(f.Data))
+ if err != nil {
+ return c, fmt.Errorf("cannot load values.yaml: %w", err)
+ }
+ c.Values = values
+ case f.Name == "values.schema.json":
+ c.Schema = f.Data
+ c.SchemaModTime = f.ModTime
+
+ case strings.HasPrefix(f.Name, "templates/"):
+ c.Templates = append(c.Templates, &common.File{Name: f.Name, Data: f.Data, ModTime: f.ModTime})
+ case strings.HasPrefix(f.Name, "charts/"):
+ if filepath.Ext(f.Name) == ".prov" {
+ c.Files = append(c.Files, &common.File{Name: f.Name, Data: f.Data, ModTime: f.ModTime})
+ continue
+ }
+
+ fname := strings.TrimPrefix(f.Name, "charts/")
+ cname := strings.SplitN(fname, "/", 2)[0]
+ if slices.Index(subChartsKeys, cname) == -1 {
+ subChartsKeys = append(subChartsKeys, cname)
+ }
+ subcharts[cname] = append(subcharts[cname], &archive.BufferedFile{Name: fname, ModTime: f.ModTime, Data: f.Data})
+ default:
+ c.Files = append(c.Files, &common.File{Name: f.Name, ModTime: f.ModTime, Data: f.Data})
+ }
+ }
+
+ if c.Metadata == nil {
+ return c, errors.New("Chart.yaml file is missing") //nolint:staticcheck
+ }
+
+ if err := c.Validate(); err != nil {
+ return c, err
+ }
+
+ for n, files := range subcharts {
+ var sc *chart.Chart
+ var err error
+ switch {
+ case strings.IndexAny(n, "_.") == 0:
+ continue
+ case filepath.Ext(n) == ".tgz":
+ file := files[0]
+ if file.Name != n {
+ return c, fmt.Errorf("error unpacking subchart tar in %s: expected %s, got %s", c.Name(), n, file.Name)
+ }
+ // Untar the chart and add to c.Dependencies
+ sc, err = LoadArchive(bytes.NewBuffer(file.Data))
+ default:
+ // We have to trim the prefix off of every file, and ignore any file
+ // that is in charts/, but isn't actually a chart.
+ buff := make([]*archive.BufferedFile, 0, len(files))
+ for _, f := range files {
+ parts := strings.SplitN(f.Name, "/", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ f.Name = parts[1]
+ buff = append(buff, f)
+ }
+ sc, err = LoadFiles(buff)
+ }
+
+ if err != nil {
+ return c, fmt.Errorf("error unpacking subchart %s in %s: %w", n, c.Name(), err)
+ }
+ c.AddDependency(sc)
+ }
+
+ return c, nil
+}
+
+// LoadValues loads values from a reader.
+//
+// The reader is expected to contain one or more YAML documents, the values of which are merged.
+// And the values can be either a chart's default values or user-supplied values.
+func LoadValues(data io.Reader) (map[string]interface{}, error) {
+ values := map[string]interface{}{}
+ reader := utilyaml.NewYAMLReader(bufio.NewReader(data))
+ for {
+ currentMap := map[string]interface{}{}
+ raw, err := reader.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ return nil, fmt.Errorf("error reading yaml document: %w", err)
+ }
+ if err := yaml.Unmarshal(raw, ¤tMap); err != nil {
+ return nil, fmt.Errorf("cannot unmarshal yaml document: %w", err)
+ }
+ values = MergeMaps(values, currentMap)
+ }
+ return values, nil
+}
+
+// MergeMaps merges two maps. If a key exists in both maps, the value from b will be used.
+// If the value is a map, the maps will be merged recursively.
+func MergeMaps(a, b map[string]interface{}) map[string]interface{} {
+ out := make(map[string]interface{}, len(a))
+ maps.Copy(out, a)
+ for k, v := range b {
+ if v, ok := v.(map[string]interface{}); ok {
+ if bv, ok := out[k]; ok {
+ if bv, ok := bv.(map[string]interface{}); ok {
+ out[k] = MergeMaps(bv, v)
+ continue
+ }
+ }
+ }
+ out[k] = v
+ }
+ return out
+}
diff --git a/helm/internal/chart/v3/loader/load_test.go b/helm/internal/chart/v3/loader/load_test.go
new file mode 100644
index 000000000..12403f9c2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/load_test.go
@@ -0,0 +1,726 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/loader/archive"
+)
+
+func TestLoadDir(t *testing.T) {
+ l, err := Loader("testdata/frobnitz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+}
+
+func TestLoadDirWithDevNull(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("test only works on unix systems with /dev/null present")
+ }
+
+ l, err := Loader("testdata/frobnitz_with_dev_null")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ if _, err := l.Load(); err == nil {
+ t.Errorf("packages with an irregular file (/dev/null) should not load")
+ }
+}
+
+func TestLoadDirWithSymlink(t *testing.T) {
+ sym := filepath.Join("..", "LICENSE")
+ link := filepath.Join("testdata", "frobnitz_with_symlink", "LICENSE")
+
+ if err := os.Symlink(sym, link); err != nil {
+ t.Fatal(err)
+ }
+
+ defer os.Remove(link)
+
+ l, err := Loader("testdata/frobnitz_with_symlink")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+}
+
+func TestBomTestData(t *testing.T) {
+ testFiles := []string{"frobnitz_with_bom/.helmignore", "frobnitz_with_bom/templates/template.tpl", "frobnitz_with_bom/Chart.yaml"}
+ for _, file := range testFiles {
+ data, err := os.ReadFile("testdata/" + file)
+ if err != nil || !bytes.HasPrefix(data, utf8bom) {
+ t.Errorf("Test file has no BOM or is invalid: testdata/%s", file)
+ }
+ }
+
+ archive, err := os.ReadFile("testdata/frobnitz_with_bom.tgz")
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ unzipped, err := gzip.NewReader(bytes.NewReader(archive))
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ defer unzipped.Close()
+ for _, testFile := range testFiles {
+ data := make([]byte, 3)
+ err := unzipped.Reset(bytes.NewReader(archive))
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ tr := tar.NewReader(unzipped)
+ for {
+ file, err := tr.Next()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ if file != nil && strings.EqualFold(file.Name, testFile) {
+ _, err := tr.Read(data)
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ } else {
+ break
+ }
+ }
+ }
+ if !bytes.Equal(data, utf8bom) {
+ t.Fatalf("Test file has no BOM or is invalid: frobnitz_with_bom.tgz/%s", testFile)
+ }
+ }
+}
+
+func TestLoadDirWithUTFBOM(t *testing.T) {
+ l, err := Loader("testdata/frobnitz_with_bom")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+ verifyBomStripped(t, c.Files)
+}
+
+func TestLoadArchiveWithUTFBOM(t *testing.T) {
+ l, err := Loader("testdata/frobnitz_with_bom.tgz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+ verifyBomStripped(t, c.Files)
+}
+
+func TestLoadFile(t *testing.T) {
+ l, err := Loader("testdata/frobnitz-1.2.3.tgz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+}
+
+func TestLoadFiles(t *testing.T) {
+ modTime := time.Now()
+ goodFiles := []*archive.BufferedFile{
+ {
+ Name: "Chart.yaml",
+ ModTime: modTime,
+ Data: []byte(`apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+`),
+ },
+ {
+ Name: "values.yaml",
+ ModTime: modTime,
+ Data: []byte("var: some values"),
+ },
+ {
+ Name: "values.schema.json",
+ ModTime: modTime,
+ Data: []byte("type: Values"),
+ },
+ {
+ Name: "templates/deployment.yaml",
+ ModTime: modTime,
+ Data: []byte("some deployment"),
+ },
+ {
+ Name: "templates/service.yaml",
+ ModTime: modTime,
+ Data: []byte("some service"),
+ },
+ }
+
+ c, err := LoadFiles(goodFiles)
+ if err != nil {
+ t.Errorf("Expected good files to be loaded, got %v", err)
+ }
+
+ if c.Name() != "frobnitz" {
+ t.Errorf("Expected chart name to be 'frobnitz', got %s", c.Name())
+ }
+
+ if c.Values["var"] != "some values" {
+ t.Error("Expected chart values to be populated with default values")
+ }
+
+ if len(c.Raw) != 5 {
+ t.Errorf("Expected %d files, got %d", 5, len(c.Raw))
+ }
+
+ if !bytes.Equal(c.Schema, []byte("type: Values")) {
+ t.Error("Expected chart schema to be populated with default values")
+ }
+
+ if len(c.Templates) != 2 {
+ t.Errorf("Expected number of templates == 2, got %d", len(c.Templates))
+ }
+
+ if _, err = LoadFiles([]*archive.BufferedFile{}); err == nil {
+ t.Fatal("Expected err to be non-nil")
+ }
+ if err.Error() != "Chart.yaml file is missing" {
+ t.Errorf("Expected chart metadata missing error, got '%s'", err.Error())
+ }
+}
+
+// Test the order of file loading. The Chart.yaml file needs to come first for
+// later comparison checks. See https://github.com/helm/helm/pull/8948
+func TestLoadFilesOrder(t *testing.T) {
+ modTime := time.Now()
+ goodFiles := []*archive.BufferedFile{
+ {
+ Name: "requirements.yaml",
+ ModTime: modTime,
+ Data: []byte("dependencies:"),
+ },
+ {
+ Name: "values.yaml",
+ ModTime: modTime,
+ Data: []byte("var: some values"),
+ },
+
+ {
+ Name: "templates/deployment.yaml",
+ ModTime: modTime,
+ Data: []byte("some deployment"),
+ },
+ {
+ Name: "templates/service.yaml",
+ ModTime: modTime,
+ Data: []byte("some service"),
+ },
+ {
+ Name: "Chart.yaml",
+ ModTime: modTime,
+ Data: []byte(`apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+`),
+ },
+ }
+
+ // Capture stderr to make sure message about Chart.yaml handle dependencies
+ // is not present
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatalf("Unable to create pipe: %s", err)
+ }
+ stderr := log.Writer()
+ log.SetOutput(w)
+ defer func() {
+ log.SetOutput(stderr)
+ }()
+
+ _, err = LoadFiles(goodFiles)
+ if err != nil {
+ t.Errorf("Expected good files to be loaded, got %v", err)
+ }
+ w.Close()
+
+ var text bytes.Buffer
+ io.Copy(&text, r)
+ if text.String() != "" {
+ t.Errorf("Expected no message to Stderr, got %s", text.String())
+ }
+
+}
+
+// Packaging the chart on a Windows machine will produce an
+// archive that has \\ as delimiters. Test that we support these archives
+func TestLoadFileBackslash(t *testing.T) {
+ c, err := Load("testdata/frobnitz_backslash-1.2.3.tgz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyChartFileAndTemplate(t, c, "frobnitz_backslash")
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+}
+
+func TestLoadV3WithReqs(t *testing.T) {
+ l, err := Loader("testdata/frobnitz.v3.reqs")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+}
+
+func TestLoadInvalidArchive(t *testing.T) {
+ tmpdir := t.TempDir()
+
+ writeTar := func(filename, internalPath string, body []byte) {
+ dest, err := os.Create(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ zipper := gzip.NewWriter(dest)
+ tw := tar.NewWriter(zipper)
+
+ h := &tar.Header{
+ Name: internalPath,
+ Mode: 0755,
+ Size: int64(len(body)),
+ ModTime: time.Now(),
+ }
+ if err := tw.WriteHeader(h); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := tw.Write(body); err != nil {
+ t.Fatal(err)
+ }
+ tw.Close()
+ zipper.Close()
+ dest.Close()
+ }
+
+ for _, tt := range []struct {
+ chartname string
+ internal string
+ expectError string
+ }{
+ {"illegal-dots.tgz", "../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-dots2.tgz", "/foo/../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-dots3.tgz", "/../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-dots4.tgz", "./../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-name.tgz", "./.", "chart illegally contains content outside the base directory"},
+ {"illegal-name2.tgz", "/./.", "chart illegally contains content outside the base directory"},
+ {"illegal-name3.tgz", "missing-leading-slash", "chart illegally contains content outside the base directory"},
+ {"illegal-name4.tgz", "/missing-leading-slash", "Chart.yaml file is missing"},
+ {"illegal-abspath.tgz", "//foo", "chart illegally contains absolute paths"},
+ {"illegal-abspath2.tgz", "///foo", "chart illegally contains absolute paths"},
+ {"illegal-abspath3.tgz", "\\\\foo", "chart illegally contains absolute paths"},
+ {"illegal-abspath3.tgz", "\\..\\..\\foo", "chart illegally references parent directory"},
+
+ // Under special circumstances, this can get normalized to things that look like absolute Windows paths
+ {"illegal-abspath4.tgz", "\\.\\c:\\\\foo", "chart contains illegally named files"},
+ {"illegal-abspath5.tgz", "/./c://foo", "chart contains illegally named files"},
+ {"illegal-abspath6.tgz", "\\\\?\\Some\\windows\\magic", "chart illegally contains absolute paths"},
+ } {
+ illegalChart := filepath.Join(tmpdir, tt.chartname)
+ writeTar(illegalChart, tt.internal, []byte("hello: world"))
+ _, err := Load(illegalChart)
+ if err == nil {
+ t.Fatal("expected error when unpacking illegal files")
+ }
+ if !strings.Contains(err.Error(), tt.expectError) {
+ t.Errorf("Expected error to contain %q, got %q for %s", tt.expectError, err.Error(), tt.chartname)
+ }
+ }
+
+ // Make sure that absolute path gets interpreted as relative
+ illegalChart := filepath.Join(tmpdir, "abs-path.tgz")
+ writeTar(illegalChart, "/Chart.yaml", []byte("hello: world"))
+ _, err := Load(illegalChart)
+ if err.Error() != "validation: chart.metadata.name is required" {
+ t.Error(err)
+ }
+
+ // And just to validate that the above was not spurious
+ illegalChart = filepath.Join(tmpdir, "abs-path2.tgz")
+ writeTar(illegalChart, "files/whatever.yaml", []byte("hello: world"))
+ _, err = Load(illegalChart)
+ if err.Error() != "Chart.yaml file is missing" {
+ t.Errorf("Unexpected error message: %s", err)
+ }
+
+ // Finally, test that drive letter gets stripped off on Windows
+ illegalChart = filepath.Join(tmpdir, "abs-winpath.tgz")
+ writeTar(illegalChart, "c:\\Chart.yaml", []byte("hello: world"))
+ _, err = Load(illegalChart)
+ if err.Error() != "validation: chart.metadata.name is required" {
+ t.Error(err)
+ }
+}
+
+func TestLoadValues(t *testing.T) {
+ testCases := map[string]struct {
+ data []byte
+ expctedValues map[string]interface{}
+ }{
+ "It should load values correctly": {
+ data: []byte(`
+foo:
+ image: foo:v1
+bar:
+ version: v2
+`),
+ expctedValues: map[string]interface{}{
+ "foo": map[string]interface{}{
+ "image": "foo:v1",
+ },
+ "bar": map[string]interface{}{
+ "version": "v2",
+ },
+ },
+ },
+ "It should load values correctly with multiple documents in one file": {
+ data: []byte(`
+foo:
+ image: foo:v1
+bar:
+ version: v2
+---
+foo:
+ image: foo:v2
+`),
+ expctedValues: map[string]interface{}{
+ "foo": map[string]interface{}{
+ "image": "foo:v2",
+ },
+ "bar": map[string]interface{}{
+ "version": "v2",
+ },
+ },
+ },
+ }
+ for testName, testCase := range testCases {
+ t.Run(testName, func(tt *testing.T) {
+ values, err := LoadValues(bytes.NewReader(testCase.data))
+ if err != nil {
+ tt.Fatal(err)
+ }
+ if !reflect.DeepEqual(values, testCase.expctedValues) {
+ tt.Errorf("Expected values: %v, got %v", testCase.expctedValues, values)
+ }
+ })
+ }
+}
+
+func TestMergeValuesV3(t *testing.T) {
+ nestedMap := map[string]interface{}{
+ "foo": "bar",
+ "baz": map[string]string{
+ "cool": "stuff",
+ },
+ }
+ anotherNestedMap := map[string]interface{}{
+ "foo": "bar",
+ "baz": map[string]string{
+ "cool": "things",
+ "awesome": "stuff",
+ },
+ }
+ flatMap := map[string]interface{}{
+ "foo": "bar",
+ "baz": "stuff",
+ }
+ anotherFlatMap := map[string]interface{}{
+ "testing": "fun",
+ }
+
+ testMap := MergeMaps(flatMap, nestedMap)
+ equal := reflect.DeepEqual(testMap, nestedMap)
+ if !equal {
+ t.Errorf("Expected a nested map to overwrite a flat value. Expected: %v, got %v", nestedMap, testMap)
+ }
+
+ testMap = MergeMaps(nestedMap, flatMap)
+ equal = reflect.DeepEqual(testMap, flatMap)
+ if !equal {
+ t.Errorf("Expected a flat value to overwrite a map. Expected: %v, got %v", flatMap, testMap)
+ }
+
+ testMap = MergeMaps(nestedMap, anotherNestedMap)
+ equal = reflect.DeepEqual(testMap, anotherNestedMap)
+ if !equal {
+ t.Errorf("Expected a nested map to overwrite another nested map. Expected: %v, got %v", anotherNestedMap, testMap)
+ }
+
+ testMap = MergeMaps(anotherFlatMap, anotherNestedMap)
+ expectedMap := map[string]interface{}{
+ "testing": "fun",
+ "foo": "bar",
+ "baz": map[string]string{
+ "cool": "things",
+ "awesome": "stuff",
+ },
+ }
+ equal = reflect.DeepEqual(testMap, expectedMap)
+ if !equal {
+ t.Errorf("Expected a map with different keys to merge properly with another map. Expected: %v, got %v", expectedMap, testMap)
+ }
+}
+
+func verifyChart(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ if c.Name() == "" {
+ t.Fatalf("No chart metadata found on %v", c)
+ }
+ t.Logf("Verifying chart %s", c.Name())
+ if len(c.Templates) != 1 {
+ t.Errorf("Expected 1 template, got %d", len(c.Templates))
+ }
+
+ numfiles := 6
+ if len(c.Files) != numfiles {
+ t.Errorf("Expected %d extra files, got %d", numfiles, len(c.Files))
+ for _, n := range c.Files {
+ t.Logf("\t%s", n.Name)
+ }
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Errorf("Expected 2 dependencies, got %d (%v)", len(c.Dependencies()), c.Dependencies())
+ for _, d := range c.Dependencies() {
+ t.Logf("\tSubchart: %s\n", d.Name())
+ }
+ }
+
+ expect := map[string]map[string]string{
+ "alpine": {
+ "version": "0.1.0",
+ },
+ "mariner": {
+ "version": "4.3.2",
+ },
+ }
+
+ for _, dep := range c.Dependencies() {
+ if dep.Metadata == nil {
+ t.Fatalf("expected metadata on dependency: %v", dep)
+ }
+ exp, ok := expect[dep.Name()]
+ if !ok {
+ t.Fatalf("Unknown dependency %s", dep.Name())
+ }
+ if exp["version"] != dep.Metadata.Version {
+ t.Errorf("Expected %s version %s, got %s", dep.Name(), exp["version"], dep.Metadata.Version)
+ }
+ }
+
+}
+
+func verifyDependencies(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ if len(c.Metadata.Dependencies) != 2 {
+ t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
+ }
+ tests := []*chart.Dependency{
+ {Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
+ {Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
+ }
+ for i, tt := range tests {
+ d := c.Metadata.Dependencies[i]
+ if d.Name != tt.Name {
+ t.Errorf("Expected dependency named %q, got %q", tt.Name, d.Name)
+ }
+ if d.Version != tt.Version {
+ t.Errorf("Expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, d.Version)
+ }
+ if d.Repository != tt.Repository {
+ t.Errorf("Expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, d.Repository)
+ }
+ }
+}
+
+func verifyDependenciesLock(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ if len(c.Metadata.Dependencies) != 2 {
+ t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
+ }
+ tests := []*chart.Dependency{
+ {Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
+ {Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
+ }
+ for i, tt := range tests {
+ d := c.Metadata.Dependencies[i]
+ if d.Name != tt.Name {
+ t.Errorf("Expected dependency named %q, got %q", tt.Name, d.Name)
+ }
+ if d.Version != tt.Version {
+ t.Errorf("Expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, d.Version)
+ }
+ if d.Repository != tt.Repository {
+ t.Errorf("Expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, d.Repository)
+ }
+ }
+}
+
+func verifyFrobnitz(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ verifyChartFileAndTemplate(t, c, "frobnitz")
+}
+
+func verifyChartFileAndTemplate(t *testing.T, c *chart.Chart, name string) {
+ t.Helper()
+ if c.Metadata == nil {
+ t.Fatal("Metadata is nil")
+ }
+ if c.Name() != name {
+ t.Errorf("Expected %s, got %s", name, c.Name())
+ }
+ if len(c.Templates) != 1 {
+ t.Fatalf("Expected 1 template, got %d", len(c.Templates))
+ }
+ if c.Templates[0].Name != "templates/template.tpl" {
+ t.Errorf("Unexpected template: %s", c.Templates[0].Name)
+ }
+ if len(c.Templates[0].Data) == 0 {
+ t.Error("No template data.")
+ }
+ if len(c.Files) != 6 {
+ t.Fatalf("Expected 6 Files, got %d", len(c.Files))
+ }
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("Expected 2 Dependency, got %d", len(c.Dependencies()))
+ }
+ if len(c.Metadata.Dependencies) != 2 {
+ t.Fatalf("Expected 2 Dependencies.Dependency, got %d", len(c.Metadata.Dependencies))
+ }
+ if len(c.Lock.Dependencies) != 2 {
+ t.Fatalf("Expected 2 Lock.Dependency, got %d", len(c.Lock.Dependencies))
+ }
+
+ for _, dep := range c.Dependencies() {
+ switch dep.Name() {
+ case "mariner":
+ case "alpine":
+ if len(dep.Templates) != 1 {
+ t.Fatalf("Expected 1 template, got %d", len(dep.Templates))
+ }
+ if dep.Templates[0].Name != "templates/alpine-pod.yaml" {
+ t.Errorf("Unexpected template: %s", dep.Templates[0].Name)
+ }
+ if len(dep.Templates[0].Data) == 0 {
+ t.Error("No template data.")
+ }
+ if len(dep.Files) != 1 {
+ t.Fatalf("Expected 1 Files, got %d", len(dep.Files))
+ }
+ if len(dep.Dependencies()) != 2 {
+ t.Fatalf("Expected 2 Dependency, got %d", len(dep.Dependencies()))
+ }
+ default:
+ t.Errorf("Unexpected dependency %s", dep.Name())
+ }
+ }
+}
+
+func verifyBomStripped(t *testing.T, files []*common.File) {
+ t.Helper()
+ for _, file := range files {
+ if bytes.HasPrefix(file.Data, utf8bom) {
+ t.Errorf("Byte Order Mark still present in processed file %s", file.Name)
+ }
+ }
+}
diff --git a/helm/internal/chart/v3/loader/testdata/LICENSE b/helm/internal/chart/v3/loader/testdata/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/internal/chart/v3/loader/testdata/albatross/Chart.yaml b/helm/internal/chart/v3/loader/testdata/albatross/Chart.yaml
new file mode 100644
index 000000000..eeef737ff
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/albatross/Chart.yaml
@@ -0,0 +1,4 @@
+name: albatross
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/loader/testdata/albatross/values.yaml b/helm/internal/chart/v3/loader/testdata/albatross/values.yaml
new file mode 100644
index 000000000..3121cd7ce
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/albatross/values.yaml
@@ -0,0 +1,4 @@
+albatross: "true"
+
+global:
+ author: Coleridge
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz-1.2.3.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz-1.2.3.tgz
new file mode 100644
index 000000000..de28e4120
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz-1.2.3.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/.helmignore b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/Chart.yaml
new file mode 100644
index 000000000..1b63fc3e2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/INSTALL.txt b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/LICENSE b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/_ignore_me b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/mariner-4.3.2.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/charts/mariner-4.3.2.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/docs/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/icon.svg b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/ignore/me.txt b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/templates/template.tpl b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz.v3.reqs/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/.helmignore b/helm/internal/chart/v3/loader/testdata/frobnitz/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/Chart.lock b/helm/internal/chart/v3/loader/testdata/frobnitz/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz/Chart.yaml
new file mode 100644
index 000000000..1b63fc3e2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/INSTALL.txt b/helm/internal/chart/v3/loader/testdata/frobnitz/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/LICENSE b/helm/internal/chart/v3/loader/testdata/frobnitz/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/charts/_ignore_me b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/charts/mariner-4.3.2.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..5c6bc4dcb
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz/charts/mariner-4.3.2.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/docs/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/icon.svg b/helm/internal/chart/v3/loader/testdata/frobnitz/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/ignore/me.txt b/helm/internal/chart/v3/loader/testdata/frobnitz/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/templates/template.tpl b/helm/internal/chart/v3/loader/testdata/frobnitz/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash-1.2.3.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash-1.2.3.tgz
new file mode 100644
index 000000000..dfbe88a73
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash-1.2.3.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/.helmignore b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/.helmignore
new file mode 100755
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.lock b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.lock
new file mode 100755
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.yaml
new file mode 100755
index 000000000..6a952e333
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz_backslash
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/INSTALL.txt b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/INSTALL.txt
new file mode 100755
index 000000000..2010438c2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/LICENSE b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/LICENSE
new file mode 100755
index 000000000..6121943b1
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/README.md
new file mode 100755
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/_ignore_me b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/_ignore_me
new file mode 100755
index 000000000..2cecca682
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/Chart.yaml
new file mode 100755
index 000000000..2a2c9c883
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/README.md
new file mode 100755
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/Chart.yaml
new file mode 100755
index 000000000..aea109c75
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/values.yaml
new file mode 100755
index 000000000..42c39c262
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100755
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/templates/alpine-pod.yaml
new file mode 100755
index 000000000..0ac5ca6a8
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service | quote }}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/values.yaml
new file mode 100755
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/mariner-4.3.2.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/mariner-4.3.2.tgz
new file mode 100755
index 000000000..5c6bc4dcb
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/charts/mariner-4.3.2.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/docs/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/docs/README.md
new file mode 100755
index 000000000..d40747caf
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/icon.svg b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/icon.svg
new file mode 100755
index 000000000..892130606
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/ignore/me.txt b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/ignore/me.txt
new file mode 100755
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/templates/template.tpl b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/templates/template.tpl
new file mode 100755
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/values.yaml
new file mode 100755
index 000000000..61f501258
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_backslash/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom.tgz
new file mode 100644
index 000000000..7f0edc6b2
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/.helmignore b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/.helmignore
new file mode 100644
index 000000000..7a4b92da2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.lock b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.lock
new file mode 100644
index 000000000..ed43b227f
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.yaml
new file mode 100644
index 000000000..924fae6fc
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/INSTALL.txt b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/INSTALL.txt
new file mode 100644
index 000000000..77c4e724a
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/LICENSE b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/LICENSE
new file mode 100644
index 000000000..c27b00bf2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/README.md
new file mode 100644
index 000000000..e9c40031b
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/_ignore_me b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/_ignore_me
new file mode 100644
index 000000000..a7e3a38b7
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..6fe4f411f
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/README.md
new file mode 100644
index 000000000..ea7526bee
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..0732c7d7d
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..f690d53c4
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..f3e662a28
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/values.yaml
new file mode 100644
index 000000000..6b7cb2596
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/mariner-4.3.2.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..5c6bc4dcb
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/charts/mariner-4.3.2.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/docs/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/docs/README.md
new file mode 100644
index 000000000..816c3e431
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/icon.svg b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/ignore/me.txt b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/templates/template.tpl b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/templates/template.tpl
new file mode 100644
index 000000000..bb29c5491
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/values.yaml
new file mode 100644
index 000000000..c24ceadf9
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_bom/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/.helmignore b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.lock b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.yaml
new file mode 100644
index 000000000..1b63fc3e2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/INSTALL.txt b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/LICENSE b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/_ignore_me b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/mariner-4.3.2.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..5c6bc4dcb
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/charts/mariner-4.3.2.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/docs/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/icon.svg b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/ignore/me.txt b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/null b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/null
new file mode 120000
index 000000000..dc1dc0cde
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/null
@@ -0,0 +1 @@
+/dev/null
\ No newline at end of file
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/templates/template.tpl b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_dev_null/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/.helmignore b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.lock b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.yaml
new file mode 100644
index 000000000..1b63fc3e2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/INSTALL.txt b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/_ignore_me b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/mariner-4.3.2.tgz b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..5c6bc4dcb
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/charts/mariner-4.3.2.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/docs/README.md b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/icon.svg b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/ignore/me.txt b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/templates/template.tpl b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/values.yaml b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/frobnitz_with_symlink/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/loader/testdata/genfrob.sh b/helm/internal/chart/v3/loader/testdata/genfrob.sh
new file mode 100755
index 000000000..eae68906b
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/genfrob.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# Pack the albatross chart into the mariner chart.
+echo "Packing albatross into mariner"
+tar -zcvf mariner/charts/albatross-0.1.0.tgz albatross
+
+echo "Packing mariner into frobnitz"
+tar -zcvf frobnitz/charts/mariner-4.3.2.tgz mariner
+cp frobnitz/charts/mariner-4.3.2.tgz frobnitz_backslash/charts/
+cp frobnitz/charts/mariner-4.3.2.tgz frobnitz_with_bom/charts/
+cp frobnitz/charts/mariner-4.3.2.tgz frobnitz_with_dev_null/charts/
+cp frobnitz/charts/mariner-4.3.2.tgz frobnitz_with_symlink/charts/
+
+# Pack the frobnitz chart.
+echo "Packing frobnitz"
+tar --exclude=ignore/* -zcvf frobnitz-1.2.3.tgz frobnitz
+tar --exclude=ignore/* -zcvf frobnitz_backslash-1.2.3.tgz frobnitz_backslash
+tar --exclude=ignore/* -zcvf frobnitz_with_bom.tgz frobnitz_with_bom
diff --git a/helm/internal/chart/v3/loader/testdata/mariner/Chart.yaml b/helm/internal/chart/v3/loader/testdata/mariner/Chart.yaml
new file mode 100644
index 000000000..4d3eea730
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/mariner/Chart.yaml
@@ -0,0 +1,9 @@
+apiVersion: v3
+name: mariner
+description: A Helm chart for Kubernetes
+version: 4.3.2
+home: ""
+dependencies:
+ - name: albatross
+ repository: https://example.com/mariner/charts
+ version: "0.1.0"
diff --git a/helm/internal/chart/v3/loader/testdata/mariner/charts/albatross-0.1.0.tgz b/helm/internal/chart/v3/loader/testdata/mariner/charts/albatross-0.1.0.tgz
new file mode 100644
index 000000000..ec7bfbfcf
Binary files /dev/null and b/helm/internal/chart/v3/loader/testdata/mariner/charts/albatross-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/loader/testdata/mariner/templates/placeholder.tpl b/helm/internal/chart/v3/loader/testdata/mariner/templates/placeholder.tpl
new file mode 100644
index 000000000..29c11843a
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/mariner/templates/placeholder.tpl
@@ -0,0 +1 @@
+# This is a placeholder.
diff --git a/helm/internal/chart/v3/loader/testdata/mariner/values.yaml b/helm/internal/chart/v3/loader/testdata/mariner/values.yaml
new file mode 100644
index 000000000..b0ccb0086
--- /dev/null
+++ b/helm/internal/chart/v3/loader/testdata/mariner/values.yaml
@@ -0,0 +1,7 @@
+# Default values for .
+# This is a YAML-formatted file. https://github.com/toml-lang/toml
+# Declare name/value pairs to be passed into your templates.
+# name: "value"
+
+:
+ test: true
diff --git a/helm/internal/chart/v3/metadata.go b/helm/internal/chart/v3/metadata.go
new file mode 100644
index 000000000..4629d571b
--- /dev/null
+++ b/helm/internal/chart/v3/metadata.go
@@ -0,0 +1,178 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v3
+
+import (
+ "path/filepath"
+ "strings"
+ "unicode"
+
+ "github.com/Masterminds/semver/v3"
+)
+
+// Maintainer describes a Chart maintainer.
+type Maintainer struct {
+ // Name is a user name or organization name
+ Name string `json:"name,omitempty"`
+ // Email is an optional email address to contact the named maintainer
+ Email string `json:"email,omitempty"`
+ // URL is an optional URL to an address for the named maintainer
+ URL string `json:"url,omitempty"`
+}
+
+// Validate checks valid data and sanitizes string characters.
+func (m *Maintainer) Validate() error {
+ if m == nil {
+ return ValidationError("maintainers must not contain empty or null nodes")
+ }
+ m.Name = sanitizeString(m.Name)
+ m.Email = sanitizeString(m.Email)
+ m.URL = sanitizeString(m.URL)
+ return nil
+}
+
+// Metadata for a Chart file. This models the structure of a Chart.yaml file.
+type Metadata struct {
+ // The name of the chart. Required.
+ Name string `json:"name,omitempty"`
+ // The URL to a relevant project page, git repo, or contact person
+ Home string `json:"home,omitempty"`
+ // Source is the URL to the source code of this chart
+ Sources []string `json:"sources,omitempty"`
+ // A SemVer 2 conformant version string of the chart. Required.
+ Version string `json:"version,omitempty"`
+ // A one-sentence description of the chart
+ Description string `json:"description,omitempty"`
+ // A list of string keywords
+ Keywords []string `json:"keywords,omitempty"`
+ // A list of name and URL/email address combinations for the maintainer(s)
+ Maintainers []*Maintainer `json:"maintainers,omitempty"`
+ // The URL to an icon file.
+ Icon string `json:"icon,omitempty"`
+ // The API Version of this chart. Required.
+ APIVersion string `json:"apiVersion,omitempty"`
+ // The condition to check to enable chart
+ Condition string `json:"condition,omitempty"`
+ // The tags to check to enable chart
+ Tags string `json:"tags,omitempty"`
+ // The version of the application enclosed inside of this chart.
+ AppVersion string `json:"appVersion,omitempty"`
+ // Whether or not this chart is deprecated
+ Deprecated bool `json:"deprecated,omitempty"`
+ // Annotations are additional mappings uninterpreted by Helm,
+ // made available for inspection by other applications.
+ Annotations map[string]string `json:"annotations,omitempty"`
+ // KubeVersion is a SemVer constraint specifying the version of Kubernetes required.
+ KubeVersion string `json:"kubeVersion,omitempty"`
+ // Dependencies are a list of dependencies for a chart.
+ Dependencies []*Dependency `json:"dependencies,omitempty"`
+ // Specifies the chart type: application or library
+ Type string `json:"type,omitempty"`
+}
+
+// Validate checks the metadata for known issues and sanitizes string
+// characters.
+func (md *Metadata) Validate() error {
+ if md == nil {
+ return ValidationError("chart.metadata is required")
+ }
+
+ md.Name = sanitizeString(md.Name)
+ md.Description = sanitizeString(md.Description)
+ md.Home = sanitizeString(md.Home)
+ md.Icon = sanitizeString(md.Icon)
+ md.Condition = sanitizeString(md.Condition)
+ md.Tags = sanitizeString(md.Tags)
+ md.AppVersion = sanitizeString(md.AppVersion)
+ md.KubeVersion = sanitizeString(md.KubeVersion)
+ for i := range md.Sources {
+ md.Sources[i] = sanitizeString(md.Sources[i])
+ }
+ for i := range md.Keywords {
+ md.Keywords[i] = sanitizeString(md.Keywords[i])
+ }
+
+ if md.APIVersion == "" {
+ return ValidationError("chart.metadata.apiVersion is required")
+ }
+ if md.Name == "" {
+ return ValidationError("chart.metadata.name is required")
+ }
+
+ if md.Name != filepath.Base(md.Name) {
+ return ValidationErrorf("chart.metadata.name %q is invalid", md.Name)
+ }
+
+ if md.Version == "" {
+ return ValidationError("chart.metadata.version is required")
+ }
+ if !isValidSemver(md.Version) {
+ return ValidationErrorf("chart.metadata.version %q is invalid", md.Version)
+ }
+ if !isValidChartType(md.Type) {
+ return ValidationError("chart.metadata.type must be application or library")
+ }
+
+ for _, m := range md.Maintainers {
+ if err := m.Validate(); err != nil {
+ return err
+ }
+ }
+
+ // Aliases need to be validated here to make sure that the alias name does
+ // not contain any illegal characters.
+ dependencies := map[string]*Dependency{}
+ for _, dependency := range md.Dependencies {
+ if err := dependency.Validate(); err != nil {
+ return err
+ }
+ key := dependency.Name
+ if dependency.Alias != "" {
+ key = dependency.Alias
+ }
+ if dependencies[key] != nil {
+ return ValidationErrorf("more than one dependency with name or alias %q", key)
+ }
+ dependencies[key] = dependency
+ }
+ return nil
+}
+
+func isValidChartType(in string) bool {
+ switch in {
+ case "", "application", "library":
+ return true
+ }
+ return false
+}
+
+func isValidSemver(v string) bool {
+ _, err := semver.NewVersion(v)
+ return err == nil
+}
+
+// sanitizeString normalize spaces and removes non-printable characters.
+func sanitizeString(str string) string {
+ return strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return ' '
+ }
+ if unicode.IsPrint(r) {
+ return r
+ }
+ return -1
+ }, str)
+}
diff --git a/helm/internal/chart/v3/metadata_test.go b/helm/internal/chart/v3/metadata_test.go
new file mode 100644
index 000000000..596a03695
--- /dev/null
+++ b/helm/internal/chart/v3/metadata_test.go
@@ -0,0 +1,201 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package v3
+
+import (
+ "testing"
+)
+
+func TestValidate(t *testing.T) {
+ tests := []struct {
+ name string
+ md *Metadata
+ err error
+ }{
+ {
+ "chart without metadata",
+ nil,
+ ValidationError("chart.metadata is required"),
+ },
+ {
+ "chart without apiVersion",
+ &Metadata{Name: "test", Version: "1.0"},
+ ValidationError("chart.metadata.apiVersion is required"),
+ },
+ {
+ "chart without name",
+ &Metadata{APIVersion: "v3", Version: "1.0"},
+ ValidationError("chart.metadata.name is required"),
+ },
+ {
+ "chart without name",
+ &Metadata{Name: "../../test", APIVersion: "v3", Version: "1.0"},
+ ValidationError("chart.metadata.name \"../../test\" is invalid"),
+ },
+ {
+ "chart without version",
+ &Metadata{Name: "test", APIVersion: "v3"},
+ ValidationError("chart.metadata.version is required"),
+ },
+ {
+ "chart with bad type",
+ &Metadata{Name: "test", APIVersion: "v3", Version: "1.0", Type: "test"},
+ ValidationError("chart.metadata.type must be application or library"),
+ },
+ {
+ "chart without dependency",
+ &Metadata{Name: "test", APIVersion: "v3", Version: "1.0", Type: "application"},
+ nil,
+ },
+ {
+ "dependency with valid alias",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "dependency", Alias: "legal-alias"},
+ },
+ },
+ nil,
+ },
+ {
+ "dependency with bad characters in alias",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "bad", Alias: "illegal alias"},
+ },
+ },
+ ValidationError("dependency \"bad\" has disallowed characters in the alias"),
+ },
+ {
+ "same dependency twice",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Alias: ""},
+ {Name: "foo", Alias: ""},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ "two dependencies with alias from second dependency shadowing first one",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Alias: ""},
+ {Name: "bar", Alias: "foo"},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ // this case would make sense and could work in future versions of Helm, currently template rendering would
+ // result in undefined behaviour
+ "same dependency twice with different version",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Alias: "", Version: "1.2.3"},
+ {Name: "foo", Alias: "", Version: "1.0.0"},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ // this case would make sense and could work in future versions of Helm, currently template rendering would
+ // result in undefined behaviour
+ "two dependencies with same name but different repos",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Repository: "repo-0"},
+ {Name: "foo", Repository: "repo-1"},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ "dependencies has nil",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ nil,
+ },
+ },
+ ValidationError("dependencies must not contain empty or null nodes"),
+ },
+ {
+ "maintainer not empty",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v3",
+ Version: "1.0",
+ Type: "application",
+ Maintainers: []*Maintainer{
+ nil,
+ },
+ },
+ ValidationError("maintainers must not contain empty or null nodes"),
+ },
+ {
+ "version invalid",
+ &Metadata{APIVersion: "3", Name: "test", Version: "1.2.3.4"},
+ ValidationError("chart.metadata.version \"1.2.3.4\" is invalid"),
+ },
+ }
+
+ for _, tt := range tests {
+ result := tt.md.Validate()
+ if result != tt.err {
+ t.Errorf("expected %q, got %q in test %q", tt.err, result, tt.name)
+ }
+ }
+}
+
+func TestValidate_sanitize(t *testing.T) {
+ md := &Metadata{APIVersion: "3", Name: "test", Version: "1.0", Description: "\adescr\u0081iption\rtest", Maintainers: []*Maintainer{{Name: "\r"}}}
+ if err := md.Validate(); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if md.Description != "description test" {
+ t.Fatalf("description was not sanitized: %q", md.Description)
+ }
+ if md.Maintainers[0].Name != " " {
+ t.Fatal("maintainer name was not sanitized")
+ }
+}
diff --git a/helm/internal/chart/v3/util/chartfile.go b/helm/internal/chart/v3/util/chartfile.go
new file mode 100644
index 000000000..25271e1cf
--- /dev/null
+++ b/helm/internal/chart/v3/util/chartfile.go
@@ -0,0 +1,96 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+)
+
+// LoadChartfile loads a Chart.yaml file into a *chart.Metadata.
+func LoadChartfile(filename string) (*chart.Metadata, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := new(chart.Metadata)
+ err = yaml.Unmarshal(b, y)
+ return y, err
+}
+
+// StrictLoadChartfile loads a Chart.yaml into a *chart.Metadata using a strict unmarshaling
+func StrictLoadChartfile(filename string) (*chart.Metadata, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := new(chart.Metadata)
+ err = yaml.UnmarshalStrict(b, y)
+ return y, err
+}
+
+// SaveChartfile saves the given metadata as a Chart.yaml file at the given path.
+//
+// 'filename' should be the complete path and filename ('foo/Chart.yaml')
+func SaveChartfile(filename string, cf *chart.Metadata) error {
+ out, err := yaml.Marshal(cf)
+ if err != nil {
+ return err
+ }
+ return os.WriteFile(filename, out, 0644)
+}
+
+// IsChartDir validate a chart directory.
+//
+// Checks for a valid Chart.yaml.
+func IsChartDir(dirName string) (bool, error) {
+ if fi, err := os.Stat(dirName); err != nil {
+ return false, err
+ } else if !fi.IsDir() {
+ return false, fmt.Errorf("%q is not a directory", dirName)
+ }
+
+ chartYaml := filepath.Join(dirName, ChartfileName)
+ if _, err := os.Stat(chartYaml); errors.Is(err, fs.ErrNotExist) {
+ return false, fmt.Errorf("no %s exists in directory %q", ChartfileName, dirName)
+ }
+
+ chartYamlContent, err := os.ReadFile(chartYaml)
+ if err != nil {
+ return false, fmt.Errorf("cannot read %s in directory %q", ChartfileName, dirName)
+ }
+
+ chartContent := new(chart.Metadata)
+ if err := yaml.Unmarshal(chartYamlContent, &chartContent); err != nil {
+ return false, err
+ }
+ if chartContent == nil {
+ return false, fmt.Errorf("chart metadata (%s) missing", ChartfileName)
+ }
+ if chartContent.Name == "" {
+ return false, fmt.Errorf("invalid chart (%s): name must not be empty", ChartfileName)
+ }
+
+ return true, nil
+}
diff --git a/helm/internal/chart/v3/util/chartfile_test.go b/helm/internal/chart/v3/util/chartfile_test.go
new file mode 100644
index 000000000..c3d19c381
--- /dev/null
+++ b/helm/internal/chart/v3/util/chartfile_test.go
@@ -0,0 +1,117 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "testing"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+)
+
+const testfile = "testdata/chartfiletest.yaml"
+
+func TestLoadChartfile(t *testing.T) {
+ f, err := LoadChartfile(testfile)
+ if err != nil {
+ t.Errorf("Failed to open %s: %s", testfile, err)
+ return
+ }
+ verifyChartfile(t, f, "frobnitz")
+}
+
+func verifyChartfile(t *testing.T, f *chart.Metadata, name string) {
+ t.Helper()
+ if f == nil { //nolint:staticcheck
+ t.Fatal("Failed verifyChartfile because f is nil")
+ }
+
+ if f.Name != name {
+ t.Errorf("Expected %s, got %s", name, f.Name)
+ }
+
+ if f.Description != "This is a frobnitz." {
+ t.Errorf("Unexpected description %q", f.Description)
+ }
+
+ if f.Version != "1.2.3" {
+ t.Errorf("Unexpected version %q", f.Version)
+ }
+
+ if len(f.Maintainers) != 2 {
+ t.Errorf("Expected 2 maintainers, got %d", len(f.Maintainers))
+ }
+
+ if f.Maintainers[0].Name != "The Helm Team" {
+ t.Errorf("Unexpected maintainer name.")
+ }
+
+ if f.Maintainers[1].Email != "nobody@example.com" {
+ t.Errorf("Unexpected maintainer email.")
+ }
+
+ if len(f.Sources) != 1 {
+ t.Fatalf("Unexpected number of sources")
+ }
+
+ if f.Sources[0] != "https://example.com/foo/bar" {
+ t.Errorf("Expected https://example.com/foo/bar, got %s", f.Sources)
+ }
+
+ if f.Home != "http://example.com" {
+ t.Error("Unexpected home.")
+ }
+
+ if f.Icon != "https://example.com/64x64.png" {
+ t.Errorf("Unexpected icon: %q", f.Icon)
+ }
+
+ if len(f.Keywords) != 3 {
+ t.Error("Unexpected keywords")
+ }
+
+ if len(f.Annotations) != 2 {
+ t.Fatalf("Unexpected annotations")
+ }
+
+ if want, got := "extravalue", f.Annotations["extrakey"]; want != got {
+ t.Errorf("Want %q, but got %q", want, got)
+ }
+
+ if want, got := "anothervalue", f.Annotations["anotherkey"]; want != got {
+ t.Errorf("Want %q, but got %q", want, got)
+ }
+
+ kk := []string{"frobnitz", "sprocket", "dodad"}
+ for i, k := range f.Keywords {
+ if kk[i] != k {
+ t.Errorf("Expected %q, got %q", kk[i], k)
+ }
+ }
+}
+
+func TestIsChartDir(t *testing.T) {
+ validChartDir, err := IsChartDir("testdata/frobnitz")
+ if !validChartDir {
+ t.Errorf("unexpected error while reading chart-directory: (%v)", err)
+ return
+ }
+ validChartDir, err = IsChartDir("testdata")
+ if validChartDir || err == nil {
+ t.Errorf("expected error but did not get any")
+ return
+ }
+}
diff --git a/helm/internal/chart/v3/util/compatible.go b/helm/internal/chart/v3/util/compatible.go
new file mode 100644
index 000000000..d384d2d45
--- /dev/null
+++ b/helm/internal/chart/v3/util/compatible.go
@@ -0,0 +1,34 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import "github.com/Masterminds/semver/v3"
+
+// IsCompatibleRange compares a version to a constraint.
+// It returns true if the version matches the constraint, and false in all other cases.
+func IsCompatibleRange(constraint, ver string) bool {
+ sv, err := semver.NewVersion(ver)
+ if err != nil {
+ return false
+ }
+
+ c, err := semver.NewConstraint(constraint)
+ if err != nil {
+ return false
+ }
+ return c.Check(sv)
+}
diff --git a/helm/internal/chart/v3/util/compatible_test.go b/helm/internal/chart/v3/util/compatible_test.go
new file mode 100644
index 000000000..e17d33e35
--- /dev/null
+++ b/helm/internal/chart/v3/util/compatible_test.go
@@ -0,0 +1,43 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package version represents the current version of the project.
+package util
+
+import "testing"
+
+func TestIsCompatibleRange(t *testing.T) {
+ tests := []struct {
+ constraint string
+ ver string
+ expected bool
+ }{
+ {"v2.0.0-alpha.4", "v2.0.0-alpha.4", true},
+ {"v2.0.0-alpha.3", "v2.0.0-alpha.4", false},
+ {"v2.0.0", "v2.0.0-alpha.4", false},
+ {"v2.0.0-alpha.4", "v2.0.0", false},
+ {"~v2.0.0", "v2.0.1", true},
+ {"v2", "v2.0.0", true},
+ {">2.0.0", "v2.1.1", true},
+ {"v2.1.*", "v2.1.1", true},
+ }
+
+ for _, tt := range tests {
+ if IsCompatibleRange(tt.constraint, tt.ver) != tt.expected {
+ t.Errorf("expected constraint %s to be %v for %s", tt.constraint, tt.expected, tt.ver)
+ }
+ }
+}
diff --git a/helm/internal/chart/v3/util/create.go b/helm/internal/chart/v3/util/create.go
new file mode 100644
index 000000000..0dfa30995
--- /dev/null
+++ b/helm/internal/chart/v3/util/create.go
@@ -0,0 +1,834 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// chartName is a regular expression for testing the supplied name of a chart.
+// This regular expression is probably stricter than it needs to be. We can relax it
+// somewhat. Newline characters, as well as $, quotes, +, parens, and % are known to be
+// problematic.
+var chartName = regexp.MustCompile("^[a-zA-Z0-9._-]+$")
+
+const (
+ // ChartfileName is the default Chart file name.
+ ChartfileName = "Chart.yaml"
+ // ValuesfileName is the default values file name.
+ ValuesfileName = "values.yaml"
+ // SchemafileName is the default values schema file name.
+ SchemafileName = "values.schema.json"
+ // TemplatesDir is the relative directory name for templates.
+ TemplatesDir = "templates"
+ // ChartsDir is the relative directory name for charts dependencies.
+ ChartsDir = "charts"
+ // TemplatesTestsDir is the relative directory name for tests.
+ TemplatesTestsDir = TemplatesDir + sep + "tests"
+ // IgnorefileName is the name of the Helm ignore file.
+ IgnorefileName = ".helmignore"
+ // IngressFileName is the name of the example ingress file.
+ IngressFileName = TemplatesDir + sep + "ingress.yaml"
+ // HTTPRouteFileName is the name of the example HTTPRoute file.
+ HTTPRouteFileName = TemplatesDir + sep + "httproute.yaml"
+ // DeploymentName is the name of the example deployment file.
+ DeploymentName = TemplatesDir + sep + "deployment.yaml"
+ // ServiceName is the name of the example service file.
+ ServiceName = TemplatesDir + sep + "service.yaml"
+ // ServiceAccountName is the name of the example serviceaccount file.
+ ServiceAccountName = TemplatesDir + sep + "serviceaccount.yaml"
+ // HorizontalPodAutoscalerName is the name of the example hpa file.
+ HorizontalPodAutoscalerName = TemplatesDir + sep + "hpa.yaml"
+ // NotesName is the name of the example NOTES.txt file.
+ NotesName = TemplatesDir + sep + "NOTES.txt"
+ // HelpersName is the name of the example helpers file.
+ HelpersName = TemplatesDir + sep + "_helpers.tpl"
+ // TestConnectionName is the name of the example test file.
+ TestConnectionName = TemplatesTestsDir + sep + "test-connection.yaml"
+)
+
+// maxChartNameLength is lower than the limits we know of with certain file systems,
+// and with certain Kubernetes fields.
+const maxChartNameLength = 250
+
+const sep = string(filepath.Separator)
+
+const defaultChartfile = `apiVersion: v3
+name: %s
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.16.0"
+`
+
+const defaultValues = `# Default values for %s.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
+replicaCount: 1
+
+# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
+image:
+ repository: nginx
+ # This sets the pull policy for images.
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+
+# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+imagePullSecrets: []
+# This is to override the chart name.
+nameOverride: ""
+fullnameOverride: ""
+
+# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Automatically mount a ServiceAccount's API credentials?
+ automount: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+# This is for setting Kubernetes Annotations to a Pod.
+# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+podAnnotations: {}
+# This is for setting Kubernetes Labels to a Pod.
+# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+podLabels: {}
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
+service:
+ # This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: ClusterIP
+ # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
+ port: 80
+
+# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
+ingress:
+ enabled: false
+ className: ""
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+# -- Expose the service via gateway-api HTTPRoute
+# Requires Gateway API resources and suitable controller installed within the cluster
+# (see: https://gateway-api.sigs.k8s.io/guides/)
+httpRoute:
+ # HTTPRoute enabled.
+ enabled: false
+ # HTTPRoute annotations.
+ annotations: {}
+ # Which Gateways this Route is attached to.
+ parentRefs:
+ - name: gateway
+ sectionName: http
+ # namespace: default
+ # Hostnames matching HTTP header.
+ hostnames:
+ - chart-example.local
+ # List of rules and filters applied.
+ rules:
+ - matches:
+ - path:
+ type: PathPrefix
+ value: /headers
+ # filters:
+ # - type: RequestHeaderModifier
+ # requestHeaderModifier:
+ # set:
+ # - name: My-Overwrite-Header
+ # value: this-is-the-only-value
+ # remove:
+ # - User-Agent
+ # - matches:
+ # - path:
+ # type: PathPrefix
+ # value: /echo
+ # headers:
+ # - name: version
+ # value: v2
+
+resources: {}
+ # For publicly distributed charts, we recommend leaving 'resources' commented out.
+ # This makes resource allocation a conscious choice for the user and increases the chances
+ # charts run on a wide range of environments from low-resource clusters like Minikube to those
+ # with strict resource policies. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
+livenessProbe:
+ httpGet:
+ path: /
+ port: http
+readinessProbe:
+ httpGet:
+ path: /
+ port: http
+
+# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
+autoscaling:
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 100
+ targetCPUUtilizationPercentage: 80
+ # targetMemoryUtilizationPercentage: 80
+
+# Additional volumes on the output Deployment definition.
+volumes: []
+# - name: foo
+# secret:
+# secretName: mysecret
+# optional: false
+
+# Additional volumeMounts on the output Deployment definition.
+volumeMounts: []
+# - name: foo
+# mountPath: "/etc/foo"
+# readOnly: true
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+`
+
+const defaultIgnore = `# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
+`
+
+const defaultIngress = `{{- if .Values.ingress.enabled -}}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ {{- with .Values.ingress.className }}
+ ingressClassName: {{ . }}
+ {{- end }}
+ {{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ .host | quote }}
+ http:
+ paths:
+ {{- range .paths }}
+ - path: {{ .path }}
+ {{- with .pathType }}
+ pathType: {{ . }}
+ {{- end }}
+ backend:
+ service:
+ name: {{ include ".fullname" $ }}
+ port:
+ number: {{ $.Values.service.port }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+`
+
+const defaultHTTPRoute = `{{- if .Values.httpRoute.enabled -}}
+{{- $fullName := include ".fullname" . -}}
+{{- $svcPort := .Values.service.port -}}
+apiVersion: gateway.networking.k8s.io/v1
+kind: HTTPRoute
+metadata:
+ name: {{ $fullName }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ {{- with .Values.httpRoute.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ parentRefs:
+ {{- with .Values.httpRoute.parentRefs }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.httpRoute.hostnames }}
+ hostnames:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ rules:
+ {{- range .Values.httpRoute.rules }}
+ {{- with .matches }}
+ - matches:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .filters }}
+ filters:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ backendRefs:
+ - name: {{ $fullName }}
+ port: {{ $svcPort }}
+ weight: 1
+ {{- end }}
+{{- end }}
+`
+
+const defaultDeployment = `apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+spec:
+ {{- if not .Values.autoscaling.enabled }}
+ replicas: {{ .Values.replicaCount }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include ".selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include ".labels" . | nindent 8 }}
+ {{- with .Values.podLabels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include ".serviceAccountName" . }}
+ {{- with .Values.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}
+ {{- with .Values.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.service.port }}
+ protocol: TCP
+ {{- with .Values.livenessProbe }}
+ livenessProbe:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.readinessProbe }}
+ readinessProbe:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.volumeMounts }}
+ volumeMounts:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.volumes }}
+ volumes:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+`
+
+const defaultService = `apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include ".selectorLabels" . | nindent 4 }}
+`
+
+const defaultServiceAccount = `{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include ".serviceAccountName" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
+{{- end }}
+`
+
+const defaultHorizontalPodAutoscaler = `{{- if .Values.autoscaling.enabled }}
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: {{ include ".fullname" . }}
+ minReplicas: {{ .Values.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.autoscaling.maxReplicas }}
+ metrics:
+ {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
+ {{- end }}
+ {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: memory
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
+ {{- end }}
+{{- end }}
+`
+
+const defaultNotes = `1. Get the application URL by running these commands:
+{{- if .Values.httpRoute.enabled }}
+{{- if .Values.httpRoute.hostnames }}
+ export APP_HOSTNAME={{ .Values.httpRoute.hostnames | first }}
+{{- else }}
+ export APP_HOSTNAME=$(kubectl get --namespace {{(first .Values.httpRoute.parentRefs).namespace | default .Release.Namespace }} gateway/{{ (first .Values.httpRoute.parentRefs).name }} -o jsonpath="{.spec.listeners[0].hostname}")
+ {{- end }}
+{{- if and .Values.httpRoute.rules (first .Values.httpRoute.rules).matches (first (first .Values.httpRoute.rules).matches).path.value }}
+ echo "Visit http://$APP_HOSTNAME{{ (first (first .Values.httpRoute.rules).matches).path.value }} to use your application"
+
+ NOTE: Your HTTPRoute depends on the listener configuration of your gateway and your HTTPRoute rules.
+ The rules can be set for path, method, header and query parameters.
+ You can check the gateway configuration with 'kubectl get --namespace {{(first .Values.httpRoute.parentRefs).namespace | default .Release.Namespace }} gateway/{{ (first .Values.httpRoute.parentRefs).name }} -o yaml'
+{{- end }}
+{{- else if .Values.ingress.enabled }}
+{{- range $host := .Values.ingress.hosts }}
+ {{- range .paths }}
+ http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
+ {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include ".fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+ echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
+{{- end }}
+`
+
+const defaultHelpers = `{{/*
+Expand the name of the chart.
+*/}}
+{{- define ".name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define ".fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define ".chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define ".labels" -}}
+helm.sh/chart: {{ include ".chart" . }}
+{{ include ".selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define ".selectorLabels" -}}
+app.kubernetes.io/name: {{ include ".name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define ".serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include ".fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
+`
+
+const defaultTestConnection = `apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include ".fullname" . }}-test-connection"
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: wget
+ image: busybox
+ command: ['wget']
+ args: ['{{ include ".fullname" . }}:{{ .Values.service.port }}']
+ restartPolicy: Never
+`
+
+// Stderr is an io.Writer to which error messages can be written
+//
+// In Helm 4, this will be replaced. It is needed in Helm 3 to preserve API backward
+// compatibility.
+var Stderr io.Writer = os.Stderr
+
+// CreateFrom creates a new chart, but scaffolds it from the src chart.
+func CreateFrom(chartfile *chart.Metadata, dest, src string) error {
+ schart, err := loader.Load(src)
+ if err != nil {
+ return fmt.Errorf("could not load %s: %w", src, err)
+ }
+
+ schart.Metadata = chartfile
+
+ var updatedTemplates []*common.File
+
+ for _, template := range schart.Templates {
+ newData := transform(string(template.Data), schart.Name())
+ updatedTemplates = append(updatedTemplates, &common.File{Name: template.Name, ModTime: template.ModTime, Data: newData})
+ }
+
+ schart.Templates = updatedTemplates
+ b, err := yaml.Marshal(schart.Values)
+ if err != nil {
+ return fmt.Errorf("reading values file: %w", err)
+ }
+
+ var m map[string]interface{}
+ if err := yaml.Unmarshal(transform(string(b), schart.Name()), &m); err != nil {
+ return fmt.Errorf("transforming values file: %w", err)
+ }
+ schart.Values = m
+
+ // SaveDir looks for the file values.yaml when saving rather than the values
+ // key in order to preserve the comments in the YAML. The name placeholder
+ // needs to be replaced on that file.
+ for _, f := range schart.Raw {
+ if f.Name == ValuesfileName {
+ f.Data = transform(string(f.Data), schart.Name())
+ }
+ }
+
+ return SaveDir(schart, dest)
+}
+
+// Create creates a new chart in a directory.
+//
+// Inside of dir, this will create a directory based on the name of
+// chartfile.Name. It will then write the Chart.yaml into this directory and
+// create the (empty) appropriate directories.
+//
+// The returned string will point to the newly created directory. It will be
+// an absolute path, even if the provided base directory was relative.
+//
+// If dir does not exist, this will return an error.
+// If Chart.yaml or any directories cannot be created, this will return an
+// error. In such a case, this will attempt to clean up by removing the
+// new chart directory.
+func Create(name, dir string) (string, error) {
+
+ // Sanity-check the name of a chart so user doesn't create one that causes problems.
+ if err := validateChartName(name); err != nil {
+ return "", err
+ }
+
+ path, err := filepath.Abs(dir)
+ if err != nil {
+ return path, err
+ }
+
+ if fi, err := os.Stat(path); err != nil {
+ return path, err
+ } else if !fi.IsDir() {
+ return path, fmt.Errorf("no such directory %s", path)
+ }
+
+ cdir := filepath.Join(path, name)
+ if fi, err := os.Stat(cdir); err == nil && !fi.IsDir() {
+ return cdir, fmt.Errorf("file %s already exists and is not a directory", cdir)
+ }
+
+ // Note: If adding a new template below (i.e., to `helm create`) which is disabled by default (similar to hpa and
+ // ingress below); or making an existing template disabled by default, add the enabling condition in
+ // `TestHelmCreateChart_CheckDeprecatedWarnings` in `pkg/lint/lint_test.go` to make it run through deprecation checks
+ // with latest Kubernetes version.
+ files := []struct {
+ path string
+ content []byte
+ }{
+ {
+ // Chart.yaml
+ path: filepath.Join(cdir, ChartfileName),
+ content: fmt.Appendf(nil, defaultChartfile, name),
+ },
+ {
+ // values.yaml
+ path: filepath.Join(cdir, ValuesfileName),
+ content: fmt.Appendf(nil, defaultValues, name),
+ },
+ {
+ // .helmignore
+ path: filepath.Join(cdir, IgnorefileName),
+ content: []byte(defaultIgnore),
+ },
+ {
+ // ingress.yaml
+ path: filepath.Join(cdir, IngressFileName),
+ content: transform(defaultIngress, name),
+ },
+ {
+ // httproute.yaml
+ path: filepath.Join(cdir, HTTPRouteFileName),
+ content: transform(defaultHTTPRoute, name),
+ },
+ {
+ // deployment.yaml
+ path: filepath.Join(cdir, DeploymentName),
+ content: transform(defaultDeployment, name),
+ },
+ {
+ // service.yaml
+ path: filepath.Join(cdir, ServiceName),
+ content: transform(defaultService, name),
+ },
+ {
+ // serviceaccount.yaml
+ path: filepath.Join(cdir, ServiceAccountName),
+ content: transform(defaultServiceAccount, name),
+ },
+ {
+ // hpa.yaml
+ path: filepath.Join(cdir, HorizontalPodAutoscalerName),
+ content: transform(defaultHorizontalPodAutoscaler, name),
+ },
+ {
+ // NOTES.txt
+ path: filepath.Join(cdir, NotesName),
+ content: transform(defaultNotes, name),
+ },
+ {
+ // _helpers.tpl
+ path: filepath.Join(cdir, HelpersName),
+ content: transform(defaultHelpers, name),
+ },
+ {
+ // test-connection.yaml
+ path: filepath.Join(cdir, TestConnectionName),
+ content: transform(defaultTestConnection, name),
+ },
+ }
+
+ for _, file := range files {
+ if _, err := os.Stat(file.path); err == nil {
+ // There is no handle to a preferred output stream here.
+ fmt.Fprintf(Stderr, "WARNING: File %q already exists. Overwriting.\n", file.path)
+ }
+ if err := writeFile(file.path, file.content); err != nil {
+ return cdir, err
+ }
+ }
+ // Need to add the ChartsDir explicitly as it does not contain any file OOTB
+ if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0755); err != nil {
+ return cdir, err
+ }
+ return cdir, nil
+}
+
+// transform performs a string replacement of the specified source for
+// a given key with the replacement string
+func transform(src, replacement string) []byte {
+ return []byte(strings.ReplaceAll(src, "", replacement))
+}
+
+func writeFile(name string, content []byte) error {
+ if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil {
+ return err
+ }
+ return os.WriteFile(name, content, 0644)
+}
+
+func validateChartName(name string) error {
+ if name == "" || len(name) > maxChartNameLength {
+ return fmt.Errorf("chart name must be between 1 and %d characters", maxChartNameLength)
+ }
+ if !chartName.MatchString(name) {
+ return fmt.Errorf("chart name must match the regular expression %q", chartName.String())
+ }
+ return nil
+}
diff --git a/helm/internal/chart/v3/util/create_test.go b/helm/internal/chart/v3/util/create_test.go
new file mode 100644
index 000000000..b3b58cc5a
--- /dev/null
+++ b/helm/internal/chart/v3/util/create_test.go
@@ -0,0 +1,172 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "testing"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+)
+
+func TestCreate(t *testing.T) {
+ tdir := t.TempDir()
+
+ c, err := Create("foo", tdir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ dir := filepath.Join(tdir, "foo")
+
+ mychart, err := loader.LoadDir(c)
+ if err != nil {
+ t.Fatalf("Failed to load newly created chart %q: %s", c, err)
+ }
+
+ if mychart.Name() != "foo" {
+ t.Errorf("Expected name to be 'foo', got %q", mychart.Name())
+ }
+
+ for _, f := range []string{
+ ChartfileName,
+ DeploymentName,
+ HelpersName,
+ IgnorefileName,
+ NotesName,
+ ServiceAccountName,
+ ServiceName,
+ TemplatesDir,
+ TemplatesTestsDir,
+ TestConnectionName,
+ ValuesfileName,
+ } {
+ if _, err := os.Stat(filepath.Join(dir, f)); err != nil {
+ t.Errorf("Expected %s file: %s", f, err)
+ }
+ }
+}
+
+func TestCreateFrom(t *testing.T) {
+ tdir := t.TempDir()
+
+ cf := &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "foo",
+ Version: "0.1.0",
+ }
+ srcdir := "./testdata/frobnitz/charts/mariner"
+
+ if err := CreateFrom(cf, tdir, srcdir); err != nil {
+ t.Fatal(err)
+ }
+
+ dir := filepath.Join(tdir, "foo")
+ c := filepath.Join(tdir, cf.Name)
+ mychart, err := loader.LoadDir(c)
+ if err != nil {
+ t.Fatalf("Failed to load newly created chart %q: %s", c, err)
+ }
+
+ if mychart.Name() != "foo" {
+ t.Errorf("Expected name to be 'foo', got %q", mychart.Name())
+ }
+
+ for _, f := range []string{
+ ChartfileName,
+ ValuesfileName,
+ filepath.Join(TemplatesDir, "placeholder.tpl"),
+ } {
+ if _, err := os.Stat(filepath.Join(dir, f)); err != nil {
+ t.Errorf("Expected %s file: %s", f, err)
+ }
+
+ // Check each file to make sure has been replaced
+ b, err := os.ReadFile(filepath.Join(dir, f))
+ if err != nil {
+ t.Errorf("Unable to read file %s: %s", f, err)
+ }
+ if bytes.Contains(b, []byte("")) {
+ t.Errorf("File %s contains ", f)
+ }
+ }
+}
+
+// TestCreate_Overwrite is a regression test for making sure that files are overwritten.
+func TestCreate_Overwrite(t *testing.T) {
+ tdir := t.TempDir()
+
+ var errlog bytes.Buffer
+
+ if _, err := Create("foo", tdir); err != nil {
+ t.Fatal(err)
+ }
+
+ dir := filepath.Join(tdir, "foo")
+
+ tplname := filepath.Join(dir, "templates/hpa.yaml")
+ writeFile(tplname, []byte("FOO"))
+
+ // Now re-run the create
+ Stderr = &errlog
+ if _, err := Create("foo", tdir); err != nil {
+ t.Fatal(err)
+ }
+
+ data, err := os.ReadFile(tplname)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(data) == "FOO" {
+ t.Fatal("File that should have been modified was not.")
+ }
+
+ if errlog.Len() == 0 {
+ t.Errorf("Expected warnings about overwriting files.")
+ }
+}
+
+func TestValidateChartName(t *testing.T) {
+ for name, shouldPass := range map[string]bool{
+ "": false,
+ "abcdefghijklmnopqrstuvwxyz-_.": true,
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ-_.": true,
+ "$hello": false,
+ "Hellô": false,
+ "he%%o": false,
+ "he\nllo": false,
+
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ-_.": false,
+ } {
+ if err := validateChartName(name); (err != nil) == shouldPass {
+ t.Errorf("test for %q failed", name)
+ }
+ }
+}
diff --git a/helm/internal/chart/v3/util/dependencies.go b/helm/internal/chart/v3/util/dependencies.go
new file mode 100644
index 000000000..4ef9e6961
--- /dev/null
+++ b/helm/internal/chart/v3/util/dependencies.go
@@ -0,0 +1,381 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "log/slog"
+ "strings"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/copystructure"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+)
+
+// ProcessDependencies checks through this chart's dependencies, processing accordingly.
+func ProcessDependencies(c *chart.Chart, v common.Values) error {
+ if err := processDependencyEnabled(c, v, ""); err != nil {
+ return err
+ }
+ return processDependencyImportValues(c, true)
+}
+
+// processDependencyConditions disables charts based on condition path value in values
+func processDependencyConditions(reqs []*chart.Dependency, cvals common.Values, cpath string) {
+ if reqs == nil {
+ return
+ }
+ for _, r := range reqs {
+ for c := range strings.SplitSeq(strings.TrimSpace(r.Condition), ",") {
+ if len(c) > 0 {
+ // retrieve value
+ vv, err := cvals.PathValue(cpath + c)
+ if err == nil {
+ // if not bool, warn
+ if bv, ok := vv.(bool); ok {
+ r.Enabled = bv
+ break
+ }
+ slog.Warn("returned non-bool value", "path", c, "chart", r.Name)
+ } else if _, ok := err.(common.ErrNoValue); !ok {
+ // this is a real error
+ slog.Warn("the method PathValue returned error", slog.Any("error", err))
+ }
+ }
+ }
+ }
+}
+
+// processDependencyTags disables charts based on tags in values
+func processDependencyTags(reqs []*chart.Dependency, cvals common.Values) {
+ if reqs == nil {
+ return
+ }
+ vt, err := cvals.Table("tags")
+ if err != nil {
+ return
+ }
+ for _, r := range reqs {
+ var hasTrue, hasFalse bool
+ for _, k := range r.Tags {
+ if b, ok := vt[k]; ok {
+ // if not bool, warn
+ if bv, ok := b.(bool); ok {
+ if bv {
+ hasTrue = true
+ } else {
+ hasFalse = true
+ }
+ } else {
+ slog.Warn("returned non-bool value", "tag", k, "chart", r.Name)
+ }
+ }
+ }
+ if !hasTrue && hasFalse {
+ r.Enabled = false
+ } else if hasTrue || !hasTrue && !hasFalse {
+ r.Enabled = true
+ }
+ }
+}
+
+// getAliasDependency finds the chart for an alias dependency and copies parts that will be modified
+func getAliasDependency(charts []*chart.Chart, dep *chart.Dependency) *chart.Chart {
+ for _, c := range charts {
+ if c == nil {
+ continue
+ }
+ if c.Name() != dep.Name {
+ continue
+ }
+ if !IsCompatibleRange(dep.Version, c.Metadata.Version) {
+ continue
+ }
+
+ out := *c
+ out.Metadata = copyMetadata(c.Metadata)
+
+ // empty dependencies and shallow copy all dependencies, otherwise parent info may be corrupted if
+ // there is more than one dependency aliasing this chart
+ out.SetDependencies()
+ for _, dependency := range c.Dependencies() {
+ cpy := *dependency
+ out.AddDependency(&cpy)
+ }
+
+ if dep.Alias != "" {
+ out.Metadata.Name = dep.Alias
+ }
+ return &out
+ }
+ return nil
+}
+
+func copyMetadata(metadata *chart.Metadata) *chart.Metadata {
+ md := *metadata
+
+ if md.Dependencies != nil {
+ dependencies := make([]*chart.Dependency, len(md.Dependencies))
+ for i := range md.Dependencies {
+ dependency := *md.Dependencies[i]
+ dependencies[i] = &dependency
+ }
+ md.Dependencies = dependencies
+ }
+ return &md
+}
+
+// processDependencyEnabled removes disabled charts from dependencies
+func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path string) error {
+ if c.Metadata.Dependencies == nil {
+ return nil
+ }
+
+ var chartDependencies []*chart.Chart
+ // If any dependency is not a part of Chart.yaml
+ // then this should be added to chartDependencies.
+ // However, if the dependency is already specified in Chart.yaml
+ // we should not add it, as it would be processed from Chart.yaml anyway.
+
+Loop:
+ for _, existing := range c.Dependencies() {
+ for _, req := range c.Metadata.Dependencies {
+ if existing.Name() == req.Name && IsCompatibleRange(req.Version, existing.Metadata.Version) {
+ continue Loop
+ }
+ }
+ chartDependencies = append(chartDependencies, existing)
+ }
+
+ for _, req := range c.Metadata.Dependencies {
+ if req == nil {
+ continue
+ }
+ if chartDependency := getAliasDependency(c.Dependencies(), req); chartDependency != nil {
+ chartDependencies = append(chartDependencies, chartDependency)
+ }
+ if req.Alias != "" {
+ req.Name = req.Alias
+ }
+ }
+ c.SetDependencies(chartDependencies...)
+
+ // set all to true
+ for _, lr := range c.Metadata.Dependencies {
+ lr.Enabled = true
+ }
+ cvals, err := util.CoalesceValues(c, v)
+ if err != nil {
+ return err
+ }
+ // flag dependencies as enabled/disabled
+ processDependencyTags(c.Metadata.Dependencies, cvals)
+ processDependencyConditions(c.Metadata.Dependencies, cvals, path)
+ // make a map of charts to remove
+ rm := map[string]struct{}{}
+ for _, r := range c.Metadata.Dependencies {
+ if !r.Enabled {
+ // remove disabled chart
+ rm[r.Name] = struct{}{}
+ }
+ }
+ // don't keep disabled charts in new slice
+ cd := []*chart.Chart{}
+ copy(cd, c.Dependencies()[:0])
+ for _, n := range c.Dependencies() {
+ if _, ok := rm[n.Metadata.Name]; !ok {
+ cd = append(cd, n)
+ }
+ }
+ // don't keep disabled charts in metadata
+ cdMetadata := []*chart.Dependency{}
+ copy(cdMetadata, c.Metadata.Dependencies[:0])
+ for _, n := range c.Metadata.Dependencies {
+ if _, ok := rm[n.Name]; !ok {
+ cdMetadata = append(cdMetadata, n)
+ }
+ }
+
+ // recursively call self to process sub dependencies
+ for _, t := range cd {
+ subpath := path + t.Metadata.Name + "."
+ if err := processDependencyEnabled(t, cvals, subpath); err != nil {
+ return err
+ }
+ }
+ // set the correct dependencies in metadata
+ c.Metadata.Dependencies = nil
+ c.Metadata.Dependencies = append(c.Metadata.Dependencies, cdMetadata...)
+ c.SetDependencies(cd...)
+
+ return nil
+}
+
+// pathToMap creates a nested map given a YAML path in dot notation.
+func pathToMap(path string, data map[string]interface{}) map[string]interface{} {
+ if path == "." {
+ return data
+ }
+ return set(parsePath(path), data)
+}
+
+func parsePath(key string) []string { return strings.Split(key, ".") }
+
+func set(path []string, data map[string]interface{}) map[string]interface{} {
+ if len(path) == 0 {
+ return nil
+ }
+ cur := data
+ for i := len(path) - 1; i >= 0; i-- {
+ cur = map[string]interface{}{path[i]: cur}
+ }
+ return cur
+}
+
+// processImportValues merges values from child to parent based on the chart's dependencies' ImportValues field.
+func processImportValues(c *chart.Chart, merge bool) error {
+ if c.Metadata.Dependencies == nil {
+ return nil
+ }
+ // combine chart values and empty config to get Values
+ var cvals common.Values
+ var err error
+ if merge {
+ cvals, err = util.MergeValues(c, nil)
+ } else {
+ cvals, err = util.CoalesceValues(c, nil)
+ }
+ if err != nil {
+ return err
+ }
+ b := make(map[string]interface{})
+ // import values from each dependency if specified in import-values
+ for _, r := range c.Metadata.Dependencies {
+ var outiv []interface{}
+ for _, riv := range r.ImportValues {
+ switch iv := riv.(type) {
+ case map[string]interface{}:
+ child := fmt.Sprintf("%v", iv["child"])
+ parent := fmt.Sprintf("%v", iv["parent"])
+
+ outiv = append(outiv, map[string]string{
+ "child": child,
+ "parent": parent,
+ })
+
+ // get child table
+ vv, err := cvals.Table(r.Name + "." + child)
+ if err != nil {
+ slog.Warn(
+ "ImportValues missing table from chart",
+ slog.String("chart", "chart"),
+ slog.String("name", r.Name),
+ slog.Any("error", err),
+ )
+ continue
+ }
+ // create value map from child to be merged into parent
+ if merge {
+ b = util.MergeTables(b, pathToMap(parent, vv.AsMap()))
+ } else {
+ b = util.CoalesceTables(b, pathToMap(parent, vv.AsMap()))
+ }
+ case string:
+ child := "exports." + iv
+ outiv = append(outiv, map[string]string{
+ "child": child,
+ "parent": ".",
+ })
+ vm, err := cvals.Table(r.Name + "." + child)
+ if err != nil {
+ slog.Warn("ImportValues missing table", slog.Any("error", err))
+ continue
+ }
+ if merge {
+ b = util.MergeTables(b, vm.AsMap())
+ } else {
+ b = util.CoalesceTables(b, vm.AsMap())
+ }
+ }
+ }
+ r.ImportValues = outiv
+ }
+
+ // Imported values from a child to a parent chart have a lower priority than
+ // the parents values. This enables parent charts to import a large section
+ // from a child and then override select parts. This is why b is merged into
+ // cvals in the code below and not the other way around.
+ if merge {
+ // deep copying the cvals as there are cases where pointers can end
+ // up in the cvals when they are copied onto b in ways that break things.
+ cvals = deepCopyMap(cvals)
+ c.Values = util.MergeTables(cvals, b)
+ } else {
+ // Trimming the nil values from cvals is needed for backwards compatibility.
+ // Previously, the b value had been populated with cvals along with some
+ // overrides. This caused the coalescing functionality to remove the
+ // nil/null values. This trimming is for backwards compat.
+ cvals = trimNilValues(cvals)
+ c.Values = util.CoalesceTables(cvals, b)
+ }
+
+ return nil
+}
+
+func deepCopyMap(vals map[string]interface{}) map[string]interface{} {
+ valsCopy, err := copystructure.Copy(vals)
+ if err != nil {
+ return vals
+ }
+ return valsCopy.(map[string]interface{})
+}
+
+func trimNilValues(vals map[string]interface{}) map[string]interface{} {
+ valsCopy, err := copystructure.Copy(vals)
+ if err != nil {
+ return vals
+ }
+ valsCopyMap := valsCopy.(map[string]interface{})
+ for key, val := range valsCopyMap {
+ if val == nil {
+ // Iterate over the values and remove nil keys
+ delete(valsCopyMap, key)
+ } else if istable(val) {
+ // Recursively call into ourselves to remove keys from inner tables
+ valsCopyMap[key] = trimNilValues(val.(map[string]interface{}))
+ }
+ }
+
+ return valsCopyMap
+}
+
+// istable is a special-purpose function to see if the present thing matches the definition of a YAML table.
+func istable(v interface{}) bool {
+ _, ok := v.(map[string]interface{})
+ return ok
+}
+
+// processDependencyImportValues imports specified chart values from child to parent.
+func processDependencyImportValues(c *chart.Chart, merge bool) error {
+ for _, d := range c.Dependencies() {
+ // recurse
+ if err := processDependencyImportValues(d, merge); err != nil {
+ return err
+ }
+ }
+ return processImportValues(c, merge)
+}
diff --git a/helm/internal/chart/v3/util/dependencies_test.go b/helm/internal/chart/v3/util/dependencies_test.go
new file mode 100644
index 000000000..3c5bb96f7
--- /dev/null
+++ b/helm/internal/chart/v3/util/dependencies_test.go
@@ -0,0 +1,570 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package util
+
+import (
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "testing"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+func loadChart(t *testing.T, path string) *chart.Chart {
+ t.Helper()
+ c, err := loader.Load(path)
+ if err != nil {
+ t.Fatalf("failed to load testdata: %s", err)
+ }
+ return c
+}
+
+func TestLoadDependency(t *testing.T) {
+ tests := []*chart.Dependency{
+ {Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
+ {Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
+ }
+
+ check := func(deps []*chart.Dependency) {
+ if len(deps) != 2 {
+ t.Errorf("expected 2 dependencies, got %d", len(deps))
+ }
+ for i, tt := range tests {
+ if deps[i].Name != tt.Name {
+ t.Errorf("expected dependency named %q, got %q", tt.Name, deps[i].Name)
+ }
+ if deps[i].Version != tt.Version {
+ t.Errorf("expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, deps[i].Version)
+ }
+ if deps[i].Repository != tt.Repository {
+ t.Errorf("expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, deps[i].Repository)
+ }
+ }
+ }
+ c := loadChart(t, "testdata/frobnitz")
+ check(c.Metadata.Dependencies)
+ check(c.Lock.Dependencies)
+}
+
+func TestDependencyEnabled(t *testing.T) {
+ type M = map[string]interface{}
+ tests := []struct {
+ name string
+ v M
+ e []string // expected charts including duplicates in alphanumeric order
+ }{{
+ "tags with no effect",
+ M{"tags": M{"nothinguseful": false}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb"},
+ }, {
+ "tags disabling a group",
+ M{"tags": M{"front-end": false}},
+ []string{"parentchart"},
+ }, {
+ "tags disabling a group and enabling a different group",
+ M{"tags": M{"front-end": false, "back-end": true}},
+ []string{"parentchart", "parentchart.subchart2", "parentchart.subchart2.subchartb", "parentchart.subchart2.subchartc"},
+ }, {
+ "tags disabling only children, children still enabled since tag front-end=true in values.yaml",
+ M{"tags": M{"subcharta": false, "subchartb": false}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb"},
+ }, {
+ "tags disabling all parents/children with additional tag re-enabling a parent",
+ M{"tags": M{"front-end": false, "subchart1": true, "back-end": false}},
+ []string{"parentchart", "parentchart.subchart1"},
+ }, {
+ "conditions enabling the parent charts, but back-end (b, c) is still disabled via values.yaml",
+ M{"subchart1": M{"enabled": true}, "subchart2": M{"enabled": true}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb", "parentchart.subchart2"},
+ }, {
+ "conditions disabling the parent charts, effectively disabling children",
+ M{"subchart1": M{"enabled": false}, "subchart2": M{"enabled": false}},
+ []string{"parentchart"},
+ }, {
+ "conditions a child using the second condition path of child's condition",
+ M{"subchart1": M{"subcharta": M{"enabled": false}}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subchartb"},
+ }, {
+ "tags enabling a parent/child group with condition disabling one child",
+ M{"subchart2": M{"subchartc": M{"enabled": false}}, "tags": M{"back-end": true}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb", "parentchart.subchart2", "parentchart.subchart2.subchartb"},
+ }, {
+ "tags will not enable a child if parent is explicitly disabled with condition",
+ M{"subchart1": M{"enabled": false}, "tags": M{"front-end": true}},
+ []string{"parentchart"},
+ }, {
+ "subcharts with alias also respect conditions",
+ M{"subchart1": M{"enabled": false}, "subchart2alias": M{"enabled": true, "subchartb": M{"enabled": true}}},
+ []string{"parentchart", "parentchart.subchart2alias", "parentchart.subchart2alias.subchartb"},
+ }}
+
+ for _, tc := range tests {
+ c := loadChart(t, "testdata/subpop")
+ t.Run(tc.name, func(t *testing.T) {
+ if err := processDependencyEnabled(c, tc.v, ""); err != nil {
+ t.Fatalf("error processing enabled dependencies %v", err)
+ }
+
+ names := extractChartNames(c)
+ if len(names) != len(tc.e) {
+ t.Fatalf("slice lengths do not match got %v, expected %v", len(names), len(tc.e))
+ }
+ for i := range names {
+ if names[i] != tc.e[i] {
+ t.Fatalf("slice values do not match got %v, expected %v", names, tc.e)
+ }
+ }
+ })
+ }
+}
+
+// extractChartNames recursively searches chart dependencies returning all charts found
+func extractChartNames(c *chart.Chart) []string {
+ var out []string
+ var fn func(c *chart.Chart)
+ fn = func(c *chart.Chart) {
+ out = append(out, c.ChartPath())
+ for _, d := range c.Dependencies() {
+ fn(d)
+ }
+ }
+ fn(c)
+ sort.Strings(out)
+ return out
+}
+
+func TestProcessDependencyImportValues(t *testing.T) {
+ c := loadChart(t, "testdata/subpop")
+
+ e := make(map[string]string)
+
+ e["imported-chart1.SC1bool"] = "true"
+ e["imported-chart1.SC1float"] = "3.14"
+ e["imported-chart1.SC1int"] = "100"
+ e["imported-chart1.SC1string"] = "dollywood"
+ e["imported-chart1.SC1extra1"] = "11"
+ e["imported-chart1.SPextra1"] = "helm rocks"
+ e["imported-chart1.SC1extra1"] = "11"
+
+ e["imported-chartA.SCAbool"] = "false"
+ e["imported-chartA.SCAfloat"] = "3.1"
+ e["imported-chartA.SCAint"] = "55"
+ e["imported-chartA.SCAstring"] = "jabba"
+ e["imported-chartA.SPextra3"] = "1.337"
+ e["imported-chartA.SC1extra2"] = "1.337"
+ e["imported-chartA.SCAnested1.SCAnested2"] = "true"
+
+ e["imported-chartA-B.SCAbool"] = "false"
+ e["imported-chartA-B.SCAfloat"] = "3.1"
+ e["imported-chartA-B.SCAint"] = "55"
+ e["imported-chartA-B.SCAstring"] = "jabba"
+
+ e["imported-chartA-B.SCBbool"] = "true"
+ e["imported-chartA-B.SCBfloat"] = "7.77"
+ e["imported-chartA-B.SCBint"] = "33"
+ e["imported-chartA-B.SCBstring"] = "boba"
+ e["imported-chartA-B.SPextra5"] = "k8s"
+ e["imported-chartA-B.SC1extra5"] = "tiller"
+
+ // These values are imported from the child chart to the parent. Parent
+ // values take precedence over imported values. This enables importing a
+ // large section from a child chart and overriding a selection from it.
+ e["overridden-chart1.SC1bool"] = "false"
+ e["overridden-chart1.SC1float"] = "3.141592"
+ e["overridden-chart1.SC1int"] = "99"
+ e["overridden-chart1.SC1string"] = "pollywog"
+ e["overridden-chart1.SPextra2"] = "42"
+
+ e["overridden-chartA.SCAbool"] = "true"
+ e["overridden-chartA.SCAfloat"] = "41.3"
+ e["overridden-chartA.SCAint"] = "808"
+ e["overridden-chartA.SCAstring"] = "jabberwocky"
+ e["overridden-chartA.SPextra4"] = "true"
+
+ // These values are imported from the child chart to the parent. Parent
+ // values take precedence over imported values. This enables importing a
+ // large section from a child chart and overriding a selection from it.
+ e["overridden-chartA-B.SCAbool"] = "true"
+ e["overridden-chartA-B.SCAfloat"] = "41.3"
+ e["overridden-chartA-B.SCAint"] = "808"
+ e["overridden-chartA-B.SCAstring"] = "jabberwocky"
+ e["overridden-chartA-B.SCBbool"] = "false"
+ e["overridden-chartA-B.SCBfloat"] = "1.99"
+ e["overridden-chartA-B.SCBint"] = "77"
+ e["overridden-chartA-B.SCBstring"] = "jango"
+ e["overridden-chartA-B.SPextra6"] = "111"
+ e["overridden-chartA-B.SCAextra1"] = "23"
+ e["overridden-chartA-B.SCBextra1"] = "13"
+ e["overridden-chartA-B.SC1extra6"] = "77"
+
+ // `exports` style
+ e["SCBexported1B"] = "1965"
+ e["SC1extra7"] = "true"
+ e["SCBexported2A"] = "blaster"
+ e["global.SC1exported2.all.SC1exported3"] = "SC1expstr"
+
+ if err := processDependencyImportValues(c, false); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ cc := common.Values(c.Values)
+ for kk, vv := range e {
+ pv, err := cc.PathValue(kk)
+ if err != nil {
+ t.Fatalf("retrieving import values table %v %v", kk, err)
+ }
+
+ switch pv := pv.(type) {
+ case float64:
+ if s := strconv.FormatFloat(pv, 'f', -1, 64); s != vv {
+ t.Errorf("failed to match imported float value %v with expected %v for key %q", s, vv, kk)
+ }
+ case bool:
+ if b := strconv.FormatBool(pv); b != vv {
+ t.Errorf("failed to match imported bool value %v with expected %v for key %q", b, vv, kk)
+ }
+ default:
+ if pv != vv {
+ t.Errorf("failed to match imported string value %q with expected %q for key %q", pv, vv, kk)
+ }
+ }
+ }
+
+ // Since this was processed with coalescing there should be no null values.
+ // Here we verify that.
+ _, err := cc.PathValue("ensurenull")
+ if err == nil {
+ t.Error("expect nil value not found but found it")
+ }
+ switch xerr := err.(type) {
+ case common.ErrNoValue:
+ // We found what we expected
+ default:
+ t.Errorf("expected an ErrNoValue but got %q instead", xerr)
+ }
+
+ c = loadChart(t, "testdata/subpop")
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ cc = common.Values(c.Values)
+ val, err := cc.PathValue("ensurenull")
+ if err != nil {
+ t.Error("expect value but ensurenull was not found")
+ }
+ if val != nil {
+ t.Errorf("expect nil value but got %q instead", val)
+ }
+}
+
+func TestProcessDependencyImportValuesFromSharedDependencyToAliases(t *testing.T) {
+ c := loadChart(t, "testdata/chart-with-import-from-aliased-dependencies")
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ e := make(map[string]string)
+
+ e["foo-defaults.defaultValue"] = "42"
+ e["bar-defaults.defaultValue"] = "42"
+
+ e["foo.defaults.defaultValue"] = "42"
+ e["bar.defaults.defaultValue"] = "42"
+
+ e["foo.grandchild.defaults.defaultValue"] = "42"
+ e["bar.grandchild.defaults.defaultValue"] = "42"
+
+ cValues := common.Values(c.Values)
+ for kk, vv := range e {
+ pv, err := cValues.PathValue(kk)
+ if err != nil {
+ t.Fatalf("retrieving import values table %v %v", kk, err)
+ }
+ if pv != vv {
+ t.Errorf("failed to match imported value %v with expected %v", pv, vv)
+ }
+ }
+}
+
+func TestProcessDependencyImportValuesMultiLevelPrecedence(t *testing.T) {
+ c := loadChart(t, "testdata/three-level-dependent-chart/umbrella")
+
+ e := make(map[string]string)
+
+ // The order of precedence should be:
+ // 1. User specified values (e.g CLI)
+ // 2. Parent chart values
+ // 3. Imported values
+ // 4. Sub-chart values
+ // The 4 app charts here deal with things differently:
+ // - app1 has a port value set in the umbrella chart. It does not import any
+ // values so the value from the umbrella chart should be used.
+ // - app2 has a value in the app chart and imports from the library. The
+ // app chart value should take precedence.
+ // - app3 has no value in the app chart and imports the value from the library
+ // chart. The library chart value should be used.
+ // - app4 has a value in the app chart and does not import the value from the
+ // library chart. The app charts value should be used.
+ e["app1.service.port"] = "3456"
+ e["app2.service.port"] = "8080"
+ e["app3.service.port"] = "9090"
+ e["app4.service.port"] = "1234"
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ cc := common.Values(c.Values)
+ for kk, vv := range e {
+ pv, err := cc.PathValue(kk)
+ if err != nil {
+ t.Fatalf("retrieving import values table %v %v", kk, err)
+ }
+
+ switch pv := pv.(type) {
+ case float64:
+ if s := strconv.FormatFloat(pv, 'f', -1, 64); s != vv {
+ t.Errorf("failed to match imported float value %v with expected %v", s, vv)
+ }
+ default:
+ if pv != vv {
+ t.Errorf("failed to match imported string value %q with expected %q", pv, vv)
+ }
+ }
+ }
+}
+
+func TestProcessDependencyImportValuesForEnabledCharts(t *testing.T) {
+ c := loadChart(t, "testdata/import-values-from-enabled-subchart/parent-chart")
+ nameOverride := "parent-chart-prod"
+
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 1 {
+ t.Fatal("expected no changes in dependencies")
+ }
+
+ if len(c.Metadata.Dependencies) != 1 {
+ t.Fatalf("expected 1 dependency specified in Chart.yaml, got %d", len(c.Metadata.Dependencies))
+ }
+
+ prodDependencyValues := c.Dependencies()[0].Values
+ if prodDependencyValues["nameOverride"] != nameOverride {
+ t.Fatalf("dependency chart name should be %s but got %s", nameOverride, prodDependencyValues["nameOverride"])
+ }
+}
+
+func TestGetAliasDependency(t *testing.T) {
+ c := loadChart(t, "testdata/frobnitz")
+ req := c.Metadata.Dependencies
+
+ if len(req) == 0 {
+ t.Fatalf("there are no dependencies to test")
+ }
+
+ // Success case
+ aliasChart := getAliasDependency(c.Dependencies(), req[0])
+ if aliasChart == nil {
+ t.Fatalf("failed to get dependency chart for alias %s", req[0].Name)
+ }
+ if req[0].Alias != "" {
+ if aliasChart.Name() != req[0].Alias {
+ t.Fatalf("dependency chart name should be %s but got %s", req[0].Alias, aliasChart.Name())
+ }
+ } else if aliasChart.Name() != req[0].Name {
+ t.Fatalf("dependency chart name should be %s but got %s", req[0].Name, aliasChart.Name())
+ }
+
+ if req[0].Version != "" {
+ if !IsCompatibleRange(req[0].Version, aliasChart.Metadata.Version) {
+ t.Fatalf("dependency chart version is not in the compatible range")
+ }
+ }
+
+ // Failure case
+ req[0].Name = "something-else"
+ if aliasChart := getAliasDependency(c.Dependencies(), req[0]); aliasChart != nil {
+ t.Fatalf("expected no chart but got %s", aliasChart.Name())
+ }
+
+ req[0].Version = "something else which is not in the compatible range"
+ if IsCompatibleRange(req[0].Version, aliasChart.Metadata.Version) {
+ t.Fatalf("dependency chart version which is not in the compatible range should cause a failure other than a success ")
+ }
+}
+
+func TestDependentChartAliases(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-alias")
+ req := c.Metadata.Dependencies
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 3 {
+ t.Fatal("expected alias dependencies to be added")
+ }
+
+ if len(c.Dependencies()) != len(c.Metadata.Dependencies) {
+ t.Fatalf("expected number of chart dependencies %d, but got %d", len(c.Metadata.Dependencies), len(c.Dependencies()))
+ }
+
+ aliasChart := getAliasDependency(c.Dependencies(), req[2])
+
+ if aliasChart == nil {
+ t.Fatalf("failed to get dependency chart for alias %s", req[2].Name)
+ }
+ if aliasChart.Parent() != c {
+ t.Fatalf("dependency chart has wrong parent, expected %s but got %s", c.Name(), aliasChart.Parent().Name())
+ }
+ if req[2].Alias != "" {
+ if aliasChart.Name() != req[2].Alias {
+ t.Fatalf("dependency chart name should be %s but got %s", req[2].Alias, aliasChart.Name())
+ }
+ } else if aliasChart.Name() != req[2].Name {
+ t.Fatalf("dependency chart name should be %s but got %s", req[2].Name, aliasChart.Name())
+ }
+
+ req[2].Name = "dummy-name"
+ if aliasChart := getAliasDependency(c.Dependencies(), req[2]); aliasChart != nil {
+ t.Fatalf("expected no chart but got %s", aliasChart.Name())
+ }
+
+}
+
+func TestDependentChartWithSubChartsAbsentInDependency(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-no-requirements-yaml")
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected no changes in dependencies")
+ }
+}
+
+func TestDependentChartWithSubChartsHelmignore(t *testing.T) {
+ // FIXME what does this test?
+ loadChart(t, "testdata/dependent-chart-helmignore")
+}
+
+func TestDependentChartsWithSubChartsSymlink(t *testing.T) {
+ joonix := filepath.Join("testdata", "joonix")
+ if err := os.Symlink(filepath.Join("..", "..", "frobnitz"), filepath.Join(joonix, "charts", "frobnitz")); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(filepath.Join(joonix, "charts", "frobnitz"))
+ c := loadChart(t, joonix)
+
+ if c.Name() != "joonix" {
+ t.Fatalf("unexpected chart name: %s", c.Name())
+ }
+ if n := len(c.Dependencies()); n != 1 {
+ t.Fatalf("expected 1 dependency for this chart, but got %d", n)
+ }
+}
+
+func TestDependentChartsWithSubchartsAllSpecifiedInDependency(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-with-all-in-requirements-yaml")
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected no changes in dependencies")
+ }
+
+ if len(c.Dependencies()) != len(c.Metadata.Dependencies) {
+ t.Fatalf("expected number of chart dependencies %d, but got %d", len(c.Metadata.Dependencies), len(c.Dependencies()))
+ }
+}
+
+func TestDependentChartsWithSomeSubchartsSpecifiedInDependency(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-with-mixed-requirements-yaml")
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected no changes in dependencies")
+ }
+
+ if len(c.Metadata.Dependencies) != 1 {
+ t.Fatalf("expected 1 dependency specified in Chart.yaml, got %d", len(c.Metadata.Dependencies))
+ }
+}
+
+func validateDependencyTree(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ for _, dependency := range c.Dependencies() {
+ if dependency.Parent() != c {
+ if dependency.Parent() != c {
+ t.Fatalf("dependency chart %s has wrong parent, expected %s but got %s", dependency.Name(), c.Name(), dependency.Parent().Name())
+ }
+ }
+ // recurse entire tree
+ validateDependencyTree(t, dependency)
+ }
+}
+
+func TestChartWithDependencyAliasedTwiceAndDoublyReferencedSubDependency(t *testing.T) {
+ c := loadChart(t, "testdata/chart-with-dependency-aliased-twice")
+
+ if len(c.Dependencies()) != 1 {
+ t.Fatalf("expected one dependency for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected two dependencies after processing aliases")
+ }
+ validateDependencyTree(t, c)
+}
diff --git a/helm/internal/chart/v3/util/doc.go b/helm/internal/chart/v3/util/doc.go
new file mode 100644
index 000000000..002d5babc
--- /dev/null
+++ b/helm/internal/chart/v3/util/doc.go
@@ -0,0 +1,45 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+package util contains tools for working with charts.
+
+Charts are described in the chart package (pkg/chart).
+This package provides utilities for serializing and deserializing charts.
+
+A chart can be represented on the file system in one of two ways:
+
+ - As a directory that contains a Chart.yaml file and other chart things.
+ - As a tarred gzipped file containing a directory that then contains a
+ Chart.yaml file.
+
+This package provides utilities for working with those file formats.
+
+The preferred way of loading a chart is using 'loader.Load`:
+
+ chart, err := loader.Load(filename)
+
+This will attempt to discover whether the file at 'filename' is a directory or
+a chart archive. It will then load accordingly.
+
+For accepting raw compressed tar file data from an io.Reader, the
+'loader.LoadArchive()' will read in the data, uncompress it, and unpack it
+into a Chart.
+
+When creating charts in memory, use the 'helm.sh/helm/pkg/chart'
+package directly.
+*/
+package util // import chartutil "helm.sh/helm/v4/internal/chart/v3/util"
diff --git a/helm/internal/chart/v3/util/expand.go b/helm/internal/chart/v3/util/expand.go
new file mode 100644
index 000000000..1a10fce3c
--- /dev/null
+++ b/helm/internal/chart/v3/util/expand.go
@@ -0,0 +1,94 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ securejoin "github.com/cyphar/filepath-securejoin"
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/pkg/chart/loader/archive"
+)
+
+// Expand uncompresses and extracts a chart into the specified directory.
+func Expand(dir string, r io.Reader) error {
+ files, err := archive.LoadArchiveFiles(r)
+ if err != nil {
+ return err
+ }
+
+ // Get the name of the chart
+ var chartName string
+ for _, file := range files {
+ if file.Name == "Chart.yaml" {
+ ch := &chart.Metadata{}
+ if err := yaml.Unmarshal(file.Data, ch); err != nil {
+ return fmt.Errorf("cannot load Chart.yaml: %w", err)
+ }
+ chartName = ch.Name
+ }
+ }
+ if chartName == "" {
+ return errors.New("chart name not specified")
+ }
+
+ // Find the base directory
+ // The directory needs to be cleaned prior to passing to SecureJoin or the location may end up
+ // being wrong or returning an error. This was introduced in v0.4.0.
+ dir = filepath.Clean(dir)
+ chartdir, err := securejoin.SecureJoin(dir, chartName)
+ if err != nil {
+ return err
+ }
+
+ // Copy all files verbatim. We don't parse these files because parsing can remove
+ // comments.
+ for _, file := range files {
+ outpath, err := securejoin.SecureJoin(chartdir, file.Name)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the necessary subdirs get created.
+ basedir := filepath.Dir(outpath)
+ if err := os.MkdirAll(basedir, 0755); err != nil {
+ return err
+ }
+
+ if err := os.WriteFile(outpath, file.Data, 0644); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ExpandFile expands the src file into the dest directory.
+func ExpandFile(dest, src string) error {
+ h, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer h.Close()
+ return Expand(dest, h)
+}
diff --git a/helm/internal/chart/v3/util/expand_test.go b/helm/internal/chart/v3/util/expand_test.go
new file mode 100644
index 000000000..280995f7e
--- /dev/null
+++ b/helm/internal/chart/v3/util/expand_test.go
@@ -0,0 +1,124 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestExpand(t *testing.T) {
+ dest := t.TempDir()
+
+ reader, err := os.Open("testdata/frobnitz-1.2.3.tgz")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := Expand(dest, reader); err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChartPath := filepath.Join(dest, "frobnitz")
+ fi, err := os.Stat(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !fi.IsDir() {
+ t.Fatalf("expected a chart directory at %s", expectedChartPath)
+ }
+
+ dir, err := os.Open(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fis, err := dir.Readdir(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectLen := 11
+ if len(fis) != expectLen {
+ t.Errorf("Expected %d files, but got %d", expectLen, len(fis))
+ }
+
+ for _, fi := range fis {
+ expect, err := os.Stat(filepath.Join("testdata", "frobnitz", fi.Name()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // os.Stat can return different values for directories, based on the OS
+ // for Linux, for example, os.Stat always returns the size of the directory
+ // (value-4096) regardless of the size of the contents of the directory
+ mode := expect.Mode()
+ if !mode.IsDir() {
+ if fi.Size() != expect.Size() {
+ t.Errorf("Expected %s to have size %d, got %d", fi.Name(), expect.Size(), fi.Size())
+ }
+ }
+ }
+}
+
+func TestExpandFile(t *testing.T) {
+ dest := t.TempDir()
+
+ if err := ExpandFile(dest, "testdata/frobnitz-1.2.3.tgz"); err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChartPath := filepath.Join(dest, "frobnitz")
+ fi, err := os.Stat(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !fi.IsDir() {
+ t.Fatalf("expected a chart directory at %s", expectedChartPath)
+ }
+
+ dir, err := os.Open(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fis, err := dir.Readdir(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectLen := 11
+ if len(fis) != expectLen {
+ t.Errorf("Expected %d files, but got %d", expectLen, len(fis))
+ }
+
+ for _, fi := range fis {
+ expect, err := os.Stat(filepath.Join("testdata", "frobnitz", fi.Name()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // os.Stat can return different values for directories, based on the OS
+ // for Linux, for example, os.Stat always returns the size of the directory
+ // (value-4096) regardless of the size of the contents of the directory
+ mode := expect.Mode()
+ if !mode.IsDir() {
+ if fi.Size() != expect.Size() {
+ t.Errorf("Expected %s to have size %d, got %d", fi.Name(), expect.Size(), fi.Size())
+ }
+ }
+ }
+}
diff --git a/helm/internal/chart/v3/util/save.go b/helm/internal/chart/v3/util/save.go
new file mode 100644
index 000000000..f886c6175
--- /dev/null
+++ b/helm/internal/chart/v3/util/save.go
@@ -0,0 +1,257 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "archive/tar"
+ "compress/gzip"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "time"
+
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+var headerBytes = []byte("+aHR0cHM6Ly95b3V0dS5iZS96OVV6MWljandyTQo=")
+
+// SaveDir saves a chart as files in a directory.
+//
+// This takes the chart name, and creates a new subdirectory inside of the given dest
+// directory, writing the chart's contents to that subdirectory.
+func SaveDir(c *chart.Chart, dest string) error {
+ // Create the chart directory
+ err := validateName(c.Name())
+ if err != nil {
+ return err
+ }
+ outdir := filepath.Join(dest, c.Name())
+ if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() {
+ return fmt.Errorf("file %s already exists and is not a directory", outdir)
+ }
+ if err := os.MkdirAll(outdir, 0755); err != nil {
+ return err
+ }
+
+ // Save the chart file.
+ if err := SaveChartfile(filepath.Join(outdir, ChartfileName), c.Metadata); err != nil {
+ return err
+ }
+
+ // Save values.yaml
+ for _, f := range c.Raw {
+ if f.Name == ValuesfileName {
+ vf := filepath.Join(outdir, ValuesfileName)
+ if err := writeFile(vf, f.Data); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save values.schema.json if it exists
+ if c.Schema != nil {
+ filename := filepath.Join(outdir, SchemafileName)
+ if err := writeFile(filename, c.Schema); err != nil {
+ return err
+ }
+ }
+
+ // Save templates and files
+ for _, o := range [][]*common.File{c.Templates, c.Files} {
+ for _, f := range o {
+ n := filepath.Join(outdir, f.Name)
+ if err := writeFile(n, f.Data); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save dependencies
+ base := filepath.Join(outdir, ChartsDir)
+ for _, dep := range c.Dependencies() {
+ // Here, we write each dependency as a tar file.
+ if _, err := Save(dep, base); err != nil {
+ return fmt.Errorf("saving %s: %w", dep.ChartFullPath(), err)
+ }
+ }
+ return nil
+}
+
+// Save creates an archived chart to the given directory.
+//
+// This takes an existing chart and a destination directory.
+//
+// If the directory is /foo, and the chart is named bar, with version 1.0.0, this
+// will generate /foo/bar-1.0.0.tgz.
+//
+// This returns the absolute path to the chart archive file.
+func Save(c *chart.Chart, outDir string) (string, error) {
+ if err := c.Validate(); err != nil {
+ return "", fmt.Errorf("chart validation: %w", err)
+ }
+
+ filename := fmt.Sprintf("%s-%s.tgz", c.Name(), c.Metadata.Version)
+ filename = filepath.Join(outDir, filename)
+ dir := filepath.Dir(filename)
+ if stat, err := os.Stat(dir); err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ if err2 := os.MkdirAll(dir, 0755); err2 != nil {
+ return "", err2
+ }
+ } else {
+ return "", fmt.Errorf("stat %s: %w", dir, err)
+ }
+ } else if !stat.IsDir() {
+ return "", fmt.Errorf("is not a directory: %s", dir)
+ }
+
+ f, err := os.Create(filename)
+ if err != nil {
+ return "", err
+ }
+
+ // Wrap in gzip writer
+ zipper := gzip.NewWriter(f)
+ zipper.Extra = headerBytes
+ zipper.Comment = "Helm"
+
+ // Wrap in tar writer
+ twriter := tar.NewWriter(zipper)
+ rollback := false
+ defer func() {
+ twriter.Close()
+ zipper.Close()
+ f.Close()
+ if rollback {
+ os.Remove(filename)
+ }
+ }()
+
+ if err := writeTarContents(twriter, c, ""); err != nil {
+ rollback = true
+ return filename, err
+ }
+ return filename, nil
+}
+
+func writeTarContents(out *tar.Writer, c *chart.Chart, prefix string) error {
+ err := validateName(c.Name())
+ if err != nil {
+ return err
+ }
+ base := filepath.Join(prefix, c.Name())
+
+ // Save Chart.yaml
+ cdata, err := yaml.Marshal(c.Metadata)
+ if err != nil {
+ return err
+ }
+ if err := writeToTar(out, filepath.Join(base, ChartfileName), cdata, c.ModTime); err != nil {
+ return err
+ }
+
+ // Save Chart.lock
+ if c.Lock != nil {
+ ldata, err := yaml.Marshal(c.Lock)
+ if err != nil {
+ return err
+ }
+ if err := writeToTar(out, filepath.Join(base, "Chart.lock"), ldata, c.Lock.Generated); err != nil {
+ return err
+ }
+ }
+
+ // Save values.yaml
+ for _, f := range c.Raw {
+ if f.Name == ValuesfileName {
+ if err := writeToTar(out, filepath.Join(base, ValuesfileName), f.Data, f.ModTime); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save values.schema.json if it exists
+ if c.Schema != nil {
+ if !json.Valid(c.Schema) {
+ return errors.New("invalid JSON in " + SchemafileName)
+ }
+ if err := writeToTar(out, filepath.Join(base, SchemafileName), c.Schema, c.SchemaModTime); err != nil {
+ return err
+ }
+ }
+
+ // Save templates
+ for _, f := range c.Templates {
+ n := filepath.Join(base, f.Name)
+ if err := writeToTar(out, n, f.Data, f.ModTime); err != nil {
+ return err
+ }
+ }
+
+ // Save files
+ for _, f := range c.Files {
+ n := filepath.Join(base, f.Name)
+ if err := writeToTar(out, n, f.Data, f.ModTime); err != nil {
+ return err
+ }
+ }
+
+ // Save dependencies
+ for _, dep := range c.Dependencies() {
+ if err := writeTarContents(out, dep, filepath.Join(base, ChartsDir)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// writeToTar writes a single file to a tar archive.
+func writeToTar(out *tar.Writer, name string, body []byte, modTime time.Time) error {
+ // TODO: Do we need to create dummy parent directory names if none exist?
+ h := &tar.Header{
+ Name: filepath.ToSlash(name),
+ Mode: 0644,
+ Size: int64(len(body)),
+ ModTime: modTime,
+ }
+ if h.ModTime.IsZero() {
+ h.ModTime = time.Now()
+ }
+ if err := out.WriteHeader(h); err != nil {
+ return err
+ }
+ _, err := out.Write(body)
+ return err
+}
+
+// If the name has directory name has characters which would change the location
+// they need to be removed.
+func validateName(name string) error {
+ nname := filepath.Base(name)
+
+ if nname != name {
+ return common.ErrInvalidChartName{Name: name}
+ }
+
+ return nil
+}
diff --git a/helm/internal/chart/v3/util/save_test.go b/helm/internal/chart/v3/util/save_test.go
new file mode 100644
index 000000000..7a42a76af
--- /dev/null
+++ b/helm/internal/chart/v3/util/save_test.go
@@ -0,0 +1,357 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+
+ chart "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/internal/chart/v3/loader"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+func TestSave(t *testing.T) {
+ tmp := t.TempDir()
+
+ for _, dest := range []string{tmp, filepath.Join(tmp, "newdir")} {
+ t.Run("outDir="+dest, func(t *testing.T) {
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ Lock: &chart.Lock{
+ Digest: "testdigest",
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: time.Now(), Data: []byte("1,001 Nights")},
+ },
+ Schema: []byte("{\n \"title\": \"Values\"\n}"),
+ }
+ chartWithInvalidJSON := withSchema(*c, []byte("{"))
+
+ where, err := Save(c, dest)
+ if err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+ if !strings.HasPrefix(where, dest) {
+ t.Fatalf("Expected %q to start with %q", where, dest)
+ }
+ if !strings.HasSuffix(where, ".tgz") {
+ t.Fatalf("Expected %q to end with .tgz", where)
+ }
+
+ c2, err := loader.LoadFile(where)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c2.Name() != c.Name() {
+ t.Fatalf("Expected chart archive to have %q, got %q", c.Name(), c2.Name())
+ }
+ if len(c2.Files) != 1 || c2.Files[0].Name != "scheherazade/shahryar.txt" {
+ t.Fatal("Files data did not match")
+ }
+
+ if !bytes.Equal(c.Schema, c2.Schema) {
+ indentation := 4
+ formattedExpected := Indent(indentation, string(c.Schema))
+ formattedActual := Indent(indentation, string(c2.Schema))
+ t.Fatalf("Schema data did not match.\nExpected:\n%s\nActual:\n%s", formattedExpected, formattedActual)
+ }
+ if _, err := Save(&chartWithInvalidJSON, dest); err == nil {
+ t.Fatalf("Invalid JSON was not caught while saving chart")
+ }
+
+ c.Metadata.APIVersion = chart.APIVersionV3
+ where, err = Save(c, dest)
+ if err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+ c2, err = loader.LoadFile(where)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c2.Lock == nil {
+ t.Fatal("Expected v3 chart archive to contain a Chart.lock file")
+ }
+ if c2.Lock.Digest != c.Lock.Digest {
+ t.Fatal("Chart.lock data did not match")
+ }
+ })
+ }
+
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "../ahab",
+ Version: "1.2.3",
+ },
+ Lock: &chart.Lock{
+ Digest: "testdigest",
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: time.Now(), Data: []byte("1,001 Nights")},
+ },
+ }
+ _, err := Save(c, tmp)
+ if err == nil {
+ t.Fatal("Expected error saving chart with invalid name")
+ }
+}
+
+// Creates a copy with a different schema; does not modify anything.
+func withSchema(chart chart.Chart, schema []byte) chart.Chart {
+ chart.Schema = schema
+ return chart
+}
+
+func Indent(n int, text string) string {
+ startOfLine := regexp.MustCompile(`(?m)^`)
+ indentation := strings.Repeat(" ", n)
+ return startOfLine.ReplaceAllLiteralString(text, indentation)
+}
+
+func TestSavePreservesTimestamps(t *testing.T) {
+ // Test executes so quickly that if we don't subtract a second, the
+ // check will fail because `initialCreateTime` will be identical to the
+ // written timestamp for the files.
+ initialCreateTime := time.Now().Add(-1 * time.Second)
+ tmp := t.TempDir()
+
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ ModTime: initialCreateTime,
+ Values: map[string]interface{}{
+ "imageName": "testimage",
+ "imageId": 42,
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: initialCreateTime, Data: []byte("1,001 Nights")},
+ },
+ Schema: []byte("{\n \"title\": \"Values\"\n}"),
+ SchemaModTime: initialCreateTime,
+ }
+
+ where, err := Save(c, tmp)
+ if err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+
+ allHeaders, err := retrieveAllHeadersFromTar(where)
+ if err != nil {
+ t.Fatalf("Failed to parse tar: %v", err)
+ }
+
+ roundedTime := initialCreateTime.Round(time.Second)
+ for _, header := range allHeaders {
+ if !header.ModTime.Equal(roundedTime) {
+ t.Fatalf("File timestamp not preserved: %v", header.ModTime)
+ }
+ }
+}
+
+// We could refactor `load.go` to use this `retrieveAllHeadersFromTar` function
+// as well, so we are not duplicating components of the code which iterate
+// through the tar.
+func retrieveAllHeadersFromTar(path string) ([]*tar.Header, error) {
+ raw, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer raw.Close()
+
+ unzipped, err := gzip.NewReader(raw)
+ if err != nil {
+ return nil, err
+ }
+ defer unzipped.Close()
+
+ tr := tar.NewReader(unzipped)
+ headers := []*tar.Header{}
+ for {
+ hd, err := tr.Next()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ headers = append(headers, hd)
+ }
+
+ return headers, nil
+}
+
+func TestSaveDir(t *testing.T) {
+ tmp := t.TempDir()
+ modTime := time.Now()
+
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: modTime, Data: []byte("1,001 Nights")},
+ },
+ Templates: []*common.File{
+ {Name: path.Join(TemplatesDir, "nested", "dir", "thing.yaml"), ModTime: modTime, Data: []byte("abc: {{ .Values.abc }}")},
+ },
+ }
+
+ if err := SaveDir(c, tmp); err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+
+ c2, err := loader.LoadDir(tmp + "/ahab")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if c2.Name() != c.Name() {
+ t.Fatalf("Expected chart archive to have %q, got %q", c.Name(), c2.Name())
+ }
+
+ if len(c2.Templates) != 1 || c2.Templates[0].Name != c.Templates[0].Name {
+ t.Fatal("Templates data did not match")
+ }
+
+ if len(c2.Files) != 1 || c2.Files[0].Name != c.Files[0].Name {
+ t.Fatal("Files data did not match")
+ }
+
+ tmp2 := t.TempDir()
+ c.Metadata.Name = "../ahab"
+ pth := filepath.Join(tmp2, "tmpcharts")
+ if err := os.MkdirAll(filepath.Join(pth), 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := SaveDir(c, pth); err.Error() != "\"../ahab\" is not a valid chart name" {
+ t.Fatalf("Did not get expected error for chart named %q", c.Name())
+ }
+}
+
+func TestRepeatableSave(t *testing.T) {
+ tmp := t.TempDir()
+ defer os.RemoveAll(tmp)
+ modTime := time.Date(2021, 9, 1, 20, 34, 58, 651387237, time.UTC)
+ tests := []struct {
+ name string
+ chart *chart.Chart
+ want string
+ }{
+ {
+ name: "Package 1 file",
+ chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ ModTime: modTime,
+ Lock: &chart.Lock{
+ Digest: "testdigest",
+ Generated: modTime,
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: modTime, Data: []byte("1,001 Nights")},
+ },
+ Schema: []byte("{\n \"title\": \"Values\"\n}"),
+ SchemaModTime: modTime,
+ },
+ want: "5bfea18cc3c8cbc265744bc32bffa9489a4dbe87d6b51b90f4255e4839d35e03",
+ },
+ {
+ name: "Package 2 files",
+ chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV3,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ ModTime: modTime,
+ Lock: &chart.Lock{
+ Digest: "testdigest",
+ Generated: modTime,
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: modTime, Data: []byte("1,001 Nights")},
+ {Name: "scheherazade/dunyazad.txt", ModTime: modTime, Data: []byte("1,001 Nights again")},
+ },
+ Schema: []byte("{\n \"title\": \"Values\"\n}"),
+ SchemaModTime: modTime,
+ },
+ want: "a240365c21e0a2f4a57873132a9b686566a612d08bcb3f20c9446bfff005ccce",
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ // create package
+ dest := path.Join(tmp, "newdir")
+ where, err := Save(test.chart, dest)
+ if err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+ // get shasum for package
+ result, err := sha256Sum(where)
+ if err != nil {
+ t.Fatalf("Failed to check shasum: %s", err)
+ }
+ // assert that the package SHA is what we wanted.
+ if result != test.want {
+ t.Errorf("FormatName() result = %v, want %v", result, test.want)
+ }
+ })
+ }
+}
+
+func sha256Sum(filePath string) (string, error) {
+ f, err := os.Open(filePath)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ h := sha256.New()
+ if _, err := io.Copy(h, f); err != nil {
+ return "", err
+ }
+
+ return fmt.Sprintf("%x", h.Sum(nil)), nil
+}
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml
new file mode 100644
index 000000000..4a4da7996
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml
@@ -0,0 +1,14 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: chart-with-dependency-aliased-twice
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: child
+ alias: foo
+ version: 1.0.0
+ - name: child
+ alias: bar
+ version: 1.0.0
+
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml
new file mode 100644
index 000000000..0f3afd8c6
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: child
+type: application
+version: 1.0.0
+
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml
new file mode 100644
index 000000000..3e0bf725b
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: grandchild
+type: application
+version: 1.0.0
+
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml
new file mode 100644
index 000000000..1830492ef
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}-{{ .Values.from }}
+data:
+ {{- toYaml .Values | nindent 2 }}
+
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml
new file mode 100644
index 000000000..b5d55af7c
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{- toYaml .Values | nindent 2 }}
+
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/values.yaml b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/values.yaml
new file mode 100644
index 000000000..695521a4a
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-dependency-aliased-twice/values.yaml
@@ -0,0 +1,7 @@
+foo:
+ grandchild:
+ from: foo
+bar:
+ grandchild:
+ from: bar
+
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml
new file mode 100644
index 000000000..f2f0610b5
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml
@@ -0,0 +1,20 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: chart-with-dependency-aliased-twice
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: child
+ alias: foo
+ version: 1.0.0
+ import-values:
+ - parent: foo-defaults
+ child: defaults
+ - name: child
+ alias: bar
+ version: 1.0.0
+ import-values:
+ - parent: bar-defaults
+ child: defaults
+
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml
new file mode 100644
index 000000000..08ccac9e5
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml
@@ -0,0 +1,12 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: child
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: grandchild
+ version: 1.0.0
+ import-values:
+ - parent: defaults
+ child: defaults
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml
new file mode 100644
index 000000000..3e0bf725b
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v3
+appVersion: 1.0.0
+name: grandchild
+type: application
+version: 1.0.0
+
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml
new file mode 100644
index 000000000..f51c594f4
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml
@@ -0,0 +1,2 @@
+defaults:
+ defaultValue: "42"
\ No newline at end of file
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml
new file mode 100644
index 000000000..3140f53dd
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{ .Values.defaults | toYaml }}
+
diff --git a/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml
new file mode 100644
index 000000000..a2b62c95a
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{ toYaml .Values.defaults | indent 2 }}
+
diff --git a/helm/internal/chart/v3/util/testdata/chartfiletest.yaml b/helm/internal/chart/v3/util/testdata/chartfiletest.yaml
new file mode 100644
index 000000000..d222c8f8d
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/chartfiletest.yaml
@@ -0,0 +1,20 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
diff --git a/helm/internal/chart/v3/util/testdata/coleridge.yaml b/helm/internal/chart/v3/util/testdata/coleridge.yaml
new file mode 100644
index 000000000..b6579628b
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/coleridge.yaml
@@ -0,0 +1,12 @@
+poet: "Coleridge"
+title: "Rime of the Ancient Mariner"
+stanza: ["at", "length", "did", "cross", "an", "Albatross"]
+
+mariner:
+ with: "crossbow"
+ shot: "ALBATROSS"
+
+water:
+ water:
+ where: "everywhere"
+ nor: "any drop to drink"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/.helmignore b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.lock b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.yaml
new file mode 100644
index 000000000..b8773d0d3
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/Chart.yaml
@@ -0,0 +1,29 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+ alias: mariners2
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+ alias: mariners1
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/INSTALL.txt b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/LICENSE b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/_ignore_me b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/mariner-4.3.2.tgz b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/charts/mariner-4.3.2.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/docs/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/icon.svg b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/ignore/me.txt b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/templates/template.tpl b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-alias/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-alias/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/.helmignore b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/.helmignore
new file mode 100644
index 000000000..8a71bc82e
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/.helmignore
@@ -0,0 +1,2 @@
+ignore/
+.*
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/Chart.yaml
new file mode 100644
index 000000000..8b4ad8cdd
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/.ignore_me b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/.ignore_me
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/_ignore_me b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/templates/template.tpl b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-helmignore/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/.helmignore b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/Chart.yaml
new file mode 100644
index 000000000..8b4ad8cdd
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/INSTALL.txt b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/LICENSE b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/_ignore_me b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/mariner-4.3.2.tgz b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/charts/mariner-4.3.2.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/docs/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/icon.svg b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/ignore/me.txt b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/templates/template.tpl b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-no-requirements-yaml/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/.helmignore b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/Chart.yaml
new file mode 100644
index 000000000..06283093e
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/Chart.yaml
@@ -0,0 +1,24 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/INSTALL.txt b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/LICENSE b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/_ignore_me b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/mariner-4.3.2.tgz b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/mariner-4.3.2.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/docs/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/icon.svg b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/ignore/me.txt b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/templates/template.tpl b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-all-in-requirements-yaml/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/.helmignore b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/Chart.yaml
new file mode 100644
index 000000000..6543799d0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/INSTALL.txt b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/LICENSE b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/_ignore_me b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/mariner-4.3.2.tgz b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/mariner-4.3.2.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/docs/README.md b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/icon.svg b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/ignore/me.txt b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/templates/template.tpl b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/values.yaml b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/dependent-chart-with-mixed-requirements-yaml/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz-1.2.3.tgz b/helm/internal/chart/v3/util/testdata/frobnitz-1.2.3.tgz
new file mode 100644
index 000000000..8731dce02
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/frobnitz-1.2.3.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/.helmignore b/helm/internal/chart/v3/util/testdata/frobnitz/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/Chart.lock b/helm/internal/chart/v3/util/testdata/frobnitz/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/Chart.yaml b/helm/internal/chart/v3/util/testdata/frobnitz/Chart.yaml
new file mode 100644
index 000000000..1b63fc3e2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v3
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/INSTALL.txt b/helm/internal/chart/v3/util/testdata/frobnitz/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/LICENSE b/helm/internal/chart/v3/util/testdata/frobnitz/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/README.md b/helm/internal/chart/v3/util/testdata/frobnitz/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/_ignore_me b/helm/internal/chart/v3/util/testdata/frobnitz/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/Chart.yaml b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..2a2c9c883
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/README.md b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..aea109c75
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/values.yaml b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/Chart.yaml b/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/Chart.yaml
new file mode 100644
index 000000000..4d3eea730
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/Chart.yaml
@@ -0,0 +1,9 @@
+apiVersion: v3
+name: mariner
+description: A Helm chart for Kubernetes
+version: 4.3.2
+home: ""
+dependencies:
+ - name: albatross
+ repository: https://example.com/mariner/charts
+ version: "0.1.0"
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/Chart.yaml b/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/Chart.yaml
new file mode 100644
index 000000000..da605991b
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: albatross
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/values.yaml b/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/values.yaml
new file mode 100644
index 000000000..3121cd7ce
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/charts/albatross/values.yaml
@@ -0,0 +1,4 @@
+albatross: "true"
+
+global:
+ author: Coleridge
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/templates/placeholder.tpl b/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/templates/placeholder.tpl
new file mode 100644
index 000000000..29c11843a
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/templates/placeholder.tpl
@@ -0,0 +1 @@
+# This is a placeholder.
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/values.yaml b/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/values.yaml
new file mode 100644
index 000000000..b0ccb0086
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/charts/mariner/values.yaml
@@ -0,0 +1,7 @@
+# Default values for .
+# This is a YAML-formatted file. https://github.com/toml-lang/toml
+# Declare name/value pairs to be passed into your templates.
+# name: "value"
+
+:
+ test: true
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/docs/README.md b/helm/internal/chart/v3/util/testdata/frobnitz/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/icon.svg b/helm/internal/chart/v3/util/testdata/frobnitz/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/ignore/me.txt b/helm/internal/chart/v3/util/testdata/frobnitz/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/templates/template.tpl b/helm/internal/chart/v3/util/testdata/frobnitz/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz/values.yaml b/helm/internal/chart/v3/util/testdata/frobnitz/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/frobnitz/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/internal/chart/v3/util/testdata/frobnitz_backslash-1.2.3.tgz b/helm/internal/chart/v3/util/testdata/frobnitz_backslash-1.2.3.tgz
new file mode 100644
index 000000000..692965951
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/frobnitz_backslash-1.2.3.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/genfrob.sh b/helm/internal/chart/v3/util/testdata/genfrob.sh
new file mode 100755
index 000000000..35fdd59f2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/genfrob.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+# Pack the albatross chart into the mariner chart.
+echo "Packing albatross into mariner"
+tar -zcvf mariner/charts/albatross-0.1.0.tgz albatross
+
+echo "Packing mariner into frobnitz"
+tar -zcvf frobnitz/charts/mariner-4.3.2.tgz mariner
+tar -zcvf frobnitz_backslash/charts/mariner-4.3.2.tgz mariner
+
+# Pack the frobnitz chart.
+echo "Packing frobnitz"
+tar --exclude=ignore/* -zcvf frobnitz-1.2.3.tgz frobnitz
+tar --exclude=ignore/* -zcvf frobnitz_backslash-1.2.3.tgz frobnitz_backslash
diff --git a/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.lock b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.lock
new file mode 100644
index 000000000..b2f17fb39
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.lock
@@ -0,0 +1,9 @@
+dependencies:
+- name: dev
+ repository: file://envs/dev
+ version: v0.1.0
+- name: prod
+ repository: file://envs/prod
+ version: v0.1.0
+digest: sha256:9403fc24f6cf9d6055820126cf7633b4bd1fed3c77e4880c674059f536346182
+generated: "2020-02-03T10:38:51.180474+01:00"
diff --git a/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.yaml b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.yaml
new file mode 100644
index 000000000..0b3e9958b
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.yaml
@@ -0,0 +1,22 @@
+apiVersion: v3
+name: parent-chart
+version: v0.1.0
+appVersion: v0.1.0
+dependencies:
+ - name: dev
+ repository: "file://envs/dev"
+ version: ">= 0.0.1"
+ condition: dev.enabled,global.dev.enabled
+ tags:
+ - dev
+ import-values:
+ - data
+
+ - name: prod
+ repository: "file://envs/prod"
+ version: ">= 0.0.1"
+ condition: prod.enabled,global.prod.enabled
+ tags:
+ - prod
+ import-values:
+ - data
\ No newline at end of file
diff --git a/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/dev-v0.1.0.tgz b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/dev-v0.1.0.tgz
new file mode 100644
index 000000000..d28e1621c
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/dev-v0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/prod-v0.1.0.tgz b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/prod-v0.1.0.tgz
new file mode 100644
index 000000000..a0c5aa84b
Binary files /dev/null and b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/prod-v0.1.0.tgz differ
diff --git a/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/Chart.yaml b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/Chart.yaml
new file mode 100644
index 000000000..72427c097
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+name: dev
+version: v0.1.0
+appVersion: v0.1.0
\ No newline at end of file
diff --git a/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/values.yaml b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/values.yaml
new file mode 100644
index 000000000..38f03484d
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/values.yaml
@@ -0,0 +1,9 @@
+# Dev values parent-chart
+nameOverride: parent-chart-dev
+exports:
+ data:
+ resources:
+ autoscaler:
+ minReplicas: 1
+ maxReplicas: 3
+ targetCPUUtilizationPercentage: 80
diff --git a/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/Chart.yaml b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/Chart.yaml
new file mode 100644
index 000000000..058ab3942
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+name: prod
+version: v0.1.0
+appVersion: v0.1.0
\ No newline at end of file
diff --git a/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/values.yaml b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/values.yaml
new file mode 100644
index 000000000..10cc756b2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/values.yaml
@@ -0,0 +1,9 @@
+# Prod values parent-chart
+nameOverride: parent-chart-prod
+exports:
+ data:
+ resources:
+ autoscaler:
+ minReplicas: 2
+ maxReplicas: 5
+ targetCPUUtilizationPercentage: 90
diff --git a/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/templates/autoscaler.yaml b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/templates/autoscaler.yaml
new file mode 100644
index 000000000..976e5a8f1
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/templates/autoscaler.yaml
@@ -0,0 +1,16 @@
+###################################################################################################
+# parent-chart horizontal pod autoscaler
+###################################################################################################
+apiVersion: autoscaling/v1
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ .Release.Name }}-autoscaler
+ namespace: {{ .Release.Namespace }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1beta1
+ kind: Deployment
+ name: {{ .Release.Name }}
+ minReplicas: {{ required "A valid .Values.resources.autoscaler.minReplicas entry required!" .Values.resources.autoscaler.minReplicas }}
+ maxReplicas: {{ required "A valid .Values.resources.autoscaler.maxReplicas entry required!" .Values.resources.autoscaler.maxReplicas }}
+ targetCPUUtilizationPercentage: {{ required "A valid .Values.resources.autoscaler.targetCPUUtilizationPercentage!" .Values.resources.autoscaler.targetCPUUtilizationPercentage }}
\ No newline at end of file
diff --git a/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/values.yaml b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/values.yaml
new file mode 100644
index 000000000..b812f0a33
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/import-values-from-enabled-subchart/parent-chart/values.yaml
@@ -0,0 +1,10 @@
+# Default values for parent-chart.
+nameOverride: parent-chart
+tags:
+ dev: false
+ prod: true
+resources:
+ autoscaler:
+ minReplicas: 0
+ maxReplicas: 0
+ targetCPUUtilizationPercentage: 99
\ No newline at end of file
diff --git a/helm/internal/chart/v3/util/testdata/joonix/Chart.yaml b/helm/internal/chart/v3/util/testdata/joonix/Chart.yaml
new file mode 100644
index 000000000..1860a3df1
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/joonix/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: joonix
+version: 1.2.3
diff --git a/helm/internal/chart/v3/util/testdata/joonix/charts/.gitkeep b/helm/internal/chart/v3/util/testdata/joonix/charts/.gitkeep
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/chart/v3/util/testdata/subpop/Chart.yaml b/helm/internal/chart/v3/util/testdata/subpop/Chart.yaml
new file mode 100644
index 000000000..53e9ec502
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/Chart.yaml
@@ -0,0 +1,41 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: parentchart
+version: 0.1.0
+dependencies:
+ - name: subchart1
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchart1.enabled
+ tags:
+ - front-end
+ - subchart1
+ import-values:
+ - child: SC1data
+ parent: imported-chart1
+ - child: SC1data
+ parent: overridden-chart1
+ - child: imported-chartA
+ parent: imported-chartA
+ - child: imported-chartA-B
+ parent: imported-chartA-B
+ - child: overridden-chartA-B
+ parent: overridden-chartA-B
+ - child: SCBexported1A
+ parent: .
+ - SCBexported2
+ - SC1exported1
+
+ - name: subchart2
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchart2.enabled
+ tags:
+ - back-end
+ - subchart2
+
+ - name: subchart2
+ alias: subchart2alias
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchart2alias.enabled
diff --git a/helm/internal/chart/v3/util/testdata/subpop/README.md b/helm/internal/chart/v3/util/testdata/subpop/README.md
new file mode 100644
index 000000000..e43fbfe9c
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/README.md
@@ -0,0 +1,18 @@
+## Subpop
+
+This chart is for testing the processing of enabled/disabled charts
+via conditions and tags.
+
+Currently there are three levels:
+
+````
+parent
+-1 tags: front-end, subchart1
+--A tags: front-end, subchartA
+--B tags: front-end, subchartB
+-2 tags: back-end, subchart2
+--B tags: back-end, subchartB
+--C tags: back-end, subchartC
+````
+
+Tags and conditions are currently in requirements.yaml files.
\ No newline at end of file
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/Chart.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/Chart.yaml
new file mode 100644
index 000000000..1539fb97d
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/Chart.yaml
@@ -0,0 +1,36 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subchart1
+version: 0.1.0
+dependencies:
+ - name: subcharta
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subcharta.enabled
+ tags:
+ - front-end
+ - subcharta
+ import-values:
+ - child: SCAdata
+ parent: imported-chartA
+ - child: SCAdata
+ parent: overridden-chartA
+ - child: SCAdata
+ parent: imported-chartA-B
+
+ - name: subchartb
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchartb.enabled
+ import-values:
+ - child: SCBdata
+ parent: imported-chartB
+ - child: SCBdata
+ parent: imported-chartA-B
+ - child: exports.SCBexported2
+ parent: exports.SCBexported2
+ - SCBexported1
+
+ tags:
+ - front-end
+ - subchartb
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/Chart.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/Chart.yaml
new file mode 100644
index 000000000..2755a821b
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subcharta
+version: 0.1.0
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/templates/service.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/values.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/values.yaml
new file mode 100644
index 000000000..f0381ae6a
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartA/values.yaml
@@ -0,0 +1,17 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+# subchartA
+service:
+ name: apache
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+SCAdata:
+ SCAbool: false
+ SCAfloat: 3.1
+ SCAint: 55
+ SCAstring: "jabba"
+ SCAnested1:
+ SCAnested2: true
+
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/Chart.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/Chart.yaml
new file mode 100644
index 000000000..bf12fe8f3
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subchartb
+version: 0.1.0
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/templates/service.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/values.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/values.yaml
new file mode 100644
index 000000000..774fdd75c
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/charts/subchartB/values.yaml
@@ -0,0 +1,35 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+
+SCBdata:
+ SCBbool: true
+ SCBfloat: 7.77
+ SCBint: 33
+ SCBstring: "boba"
+
+exports:
+ SCBexported1:
+ SCBexported1A:
+ SCBexported1B: 1965
+
+ SCBexported2:
+ SCBexported2A: "blaster"
+
+global:
+ kolla:
+ nova:
+ api:
+ all:
+ port: 8774
+ metadata:
+ all:
+ port: 8775
+
+
+
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/crds/crdA.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/crds/crdA.yaml
new file mode 100644
index 000000000..fca77fd4b
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/crds/crdA.yaml
@@ -0,0 +1,13 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: testCRDs
+spec:
+ group: testCRDGroups
+ names:
+ kind: TestCRD
+ listKind: TestCRDList
+ plural: TestCRDs
+ shortNames:
+ - tc
+ singular: authconfig
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/NOTES.txt b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/NOTES.txt
new file mode 100644
index 000000000..4bdf443f6
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/NOTES.txt
@@ -0,0 +1 @@
+Sample notes for {{ .Chart.Name }}
\ No newline at end of file
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/service.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/service.yaml
new file mode 100644
index 000000000..fee94dced
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/service.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app.kubernetes.io/instance: "{{ .Release.Name }}"
+ kube-version/major: "{{ .Capabilities.KubeVersion.Major }}"
+ kube-version/minor: "{{ .Capabilities.KubeVersion.Minor }}"
+ kube-version/version: "v{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}.0"
+{{- if .Capabilities.APIVersions.Has "helm.k8s.io/test" }}
+ kube-api-version/test: v1
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/role.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/role.yaml
new file mode 100644
index 000000000..91b954e5f
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/role.yaml
@@ -0,0 +1,7 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ .Chart.Name }}-role
+rules:
+- resources: ["*"]
+ verbs: ["get","list","watch"]
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/rolebinding.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/rolebinding.yaml
new file mode 100644
index 000000000..5d193f1a6
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/rolebinding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ .Chart.Name }}-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ .Chart.Name }}-role
+subjects:
+- kind: ServiceAccount
+ name: {{ .Chart.Name }}-sa
+ namespace: default
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/serviceaccount.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/serviceaccount.yaml
new file mode 100644
index 000000000..7126c7d89
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/templates/subdir/serviceaccount.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Chart.Name }}-sa
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/values.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/values.yaml
new file mode 100644
index 000000000..a974e316a
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart1/values.yaml
@@ -0,0 +1,55 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+# subchart1
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+
+
+SC1data:
+ SC1bool: true
+ SC1float: 3.14
+ SC1int: 100
+ SC1string: "dollywood"
+ SC1extra1: 11
+
+imported-chartA:
+ SC1extra2: 1.337
+
+overridden-chartA:
+ SCAbool: true
+ SCAfloat: 3.14
+ SCAint: 100
+ SCAstring: "jabbathehut"
+ SC1extra3: true
+
+imported-chartA-B:
+ SC1extra5: "tiller"
+
+overridden-chartA-B:
+ SCAbool: true
+ SCAfloat: 3.33
+ SCAint: 555
+ SCAstring: "wormwood"
+ SCAextra1: 23
+
+ SCBbool: true
+ SCBfloat: 0.25
+ SCBint: 98
+ SCBstring: "murkwood"
+ SCBextra1: 13
+
+ SC1extra6: 77
+
+SCBexported1A:
+ SC1extra7: true
+
+exports:
+ SC1exported1:
+ global:
+ SC1exported2:
+ all:
+ SC1exported3: "SC1expstr"
\ No newline at end of file
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/Chart.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/Chart.yaml
new file mode 100644
index 000000000..e77657040
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/Chart.yaml
@@ -0,0 +1,19 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subchart2
+version: 0.1.0
+dependencies:
+ - name: subchartb
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchartb.enabled
+ tags:
+ - back-end
+ - subchartb
+ - name: subchartc
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchartc.enabled
+ tags:
+ - back-end
+ - subchartc
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/Chart.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/Chart.yaml
new file mode 100644
index 000000000..bf12fe8f3
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subchartb
+version: 0.1.0
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/templates/service.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/templates/service.yaml
new file mode 100644
index 000000000..fb3dfc445
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart2-{{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: subchart2-{{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/values.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/values.yaml
new file mode 100644
index 000000000..5e5b21065
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartB/values.yaml
@@ -0,0 +1,21 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/Chart.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/Chart.yaml
new file mode 100644
index 000000000..e8c0ef5e5
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: subchartc
+version: 0.1.0
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/templates/service.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/values.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/values.yaml
new file mode 100644
index 000000000..5e5b21065
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/charts/subchartC/values.yaml
@@ -0,0 +1,21 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/templates/service.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/values.yaml b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/values.yaml
new file mode 100644
index 000000000..5e5b21065
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/charts/subchart2/values.yaml
@@ -0,0 +1,21 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
diff --git a/helm/internal/chart/v3/util/testdata/subpop/noreqs/Chart.yaml b/helm/internal/chart/v3/util/testdata/subpop/noreqs/Chart.yaml
new file mode 100644
index 000000000..09eb05a96
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/noreqs/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v3
+description: A Helm chart for Kubernetes
+name: parentchart
+version: 0.1.0
diff --git a/helm/internal/chart/v3/util/testdata/subpop/noreqs/templates/service.yaml b/helm/internal/chart/v3/util/testdata/subpop/noreqs/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/noreqs/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/internal/chart/v3/util/testdata/subpop/noreqs/values.yaml b/helm/internal/chart/v3/util/testdata/subpop/noreqs/values.yaml
new file mode 100644
index 000000000..4ed3b7ad3
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/noreqs/values.yaml
@@ -0,0 +1,26 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+
+# switch-like
+tags:
+ front-end: true
+ back-end: false
diff --git a/helm/internal/chart/v3/util/testdata/subpop/values.yaml b/helm/internal/chart/v3/util/testdata/subpop/values.yaml
new file mode 100644
index 000000000..ba70ed406
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/subpop/values.yaml
@@ -0,0 +1,45 @@
+# parent/values.yaml
+
+imported-chart1:
+ SPextra1: "helm rocks"
+
+overridden-chart1:
+ SC1bool: false
+ SC1float: 3.141592
+ SC1int: 99
+ SC1string: "pollywog"
+ SPextra2: 42
+
+
+imported-chartA:
+ SPextra3: 1.337
+
+overridden-chartA:
+ SCAbool: true
+ SCAfloat: 41.3
+ SCAint: 808
+ SCAstring: "jabberwocky"
+ SPextra4: true
+
+imported-chartA-B:
+ SPextra5: "k8s"
+
+overridden-chartA-B:
+ SCAbool: true
+ SCAfloat: 41.3
+ SCAint: 808
+ SCAstring: "jabberwocky"
+ SCBbool: false
+ SCBfloat: 1.99
+ SCBint: 77
+ SCBstring: "jango"
+ SPextra6: 111
+
+tags:
+ front-end: true
+ back-end: false
+
+subchart2alias:
+ enabled: false
+
+ensurenull: null
diff --git a/helm/internal/chart/v3/util/testdata/test-values-invalid.schema.json b/helm/internal/chart/v3/util/testdata/test-values-invalid.schema.json
new file mode 100644
index 000000000..35a16a2c4
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/test-values-invalid.schema.json
@@ -0,0 +1 @@
+ 1E1111111
diff --git a/helm/internal/chart/v3/util/testdata/test-values-negative.yaml b/helm/internal/chart/v3/util/testdata/test-values-negative.yaml
new file mode 100644
index 000000000..5a1250bff
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/test-values-negative.yaml
@@ -0,0 +1,14 @@
+firstname: John
+lastname: Doe
+age: -5
+likesCoffee: true
+addresses:
+ - city: Springfield
+ street: Main
+ number: 12345
+ - city: New York
+ street: Broadway
+ number: 67890
+phoneNumbers:
+ - "(888) 888-8888"
+ - "(555) 555-5555"
diff --git a/helm/internal/chart/v3/util/testdata/test-values.schema.json b/helm/internal/chart/v3/util/testdata/test-values.schema.json
new file mode 100644
index 000000000..4df89bbe8
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/test-values.schema.json
@@ -0,0 +1,67 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "properties": {
+ "addresses": {
+ "description": "List of addresses",
+ "items": {
+ "properties": {
+ "city": {
+ "type": "string"
+ },
+ "number": {
+ "type": "number"
+ },
+ "street": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "age": {
+ "description": "Age",
+ "minimum": 0,
+ "type": "integer"
+ },
+ "employmentInfo": {
+ "properties": {
+ "salary": {
+ "minimum": 0,
+ "type": "number"
+ },
+ "title": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "salary"
+ ],
+ "type": "object"
+ },
+ "firstname": {
+ "description": "First name",
+ "type": "string"
+ },
+ "lastname": {
+ "type": "string"
+ },
+ "likesCoffee": {
+ "type": "boolean"
+ },
+ "phoneNumbers": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "firstname",
+ "lastname",
+ "addresses",
+ "employmentInfo"
+ ],
+ "title": "Values",
+ "type": "object"
+}
diff --git a/helm/internal/chart/v3/util/testdata/test-values.yaml b/helm/internal/chart/v3/util/testdata/test-values.yaml
new file mode 100644
index 000000000..042dea664
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/test-values.yaml
@@ -0,0 +1,17 @@
+firstname: John
+lastname: Doe
+age: 25
+likesCoffee: true
+employmentInfo:
+ title: Software Developer
+ salary: 100000
+addresses:
+ - city: Springfield
+ street: Main
+ number: 12345
+ - city: New York
+ street: Broadway
+ number: 67890
+phoneNumbers:
+ - "(888) 888-8888"
+ - "(555) 555-5555"
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/README.md b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/README.md
new file mode 100644
index 000000000..536bb9792
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/README.md
@@ -0,0 +1,16 @@
+# Three Level Dependent Chart
+
+This chart is for testing the processing of multi-level dependencies.
+
+Consists of the following charts:
+
+- Library Chart
+- App Chart (Uses Library Chart as dependency, 2x: app1/app2)
+- Umbrella Chart (Has all the app charts as dependencies)
+
+The precedence is as follows: `library < app < umbrella`
+
+Catches two use-cases:
+
+- app overwriting library (app2)
+- umbrella overwriting app and library (app1)
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/Chart.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/Chart.yaml
new file mode 100644
index 000000000..1026f8901
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/Chart.yaml
@@ -0,0 +1,19 @@
+apiVersion: v3
+name: umbrella
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: app1
+ version: 0.1.0
+ condition: app1.enabled
+- name: app2
+ version: 0.1.0
+ condition: app2.enabled
+- name: app3
+ version: 0.1.0
+ condition: app3.enabled
+- name: app4
+ version: 0.1.0
+ condition: app4.enabled
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/Chart.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/Chart.yaml
new file mode 100644
index 000000000..5bdf21570
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v3
+name: app1
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
+ import-values:
+ - defaults
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/Chart.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/Chart.yaml
new file mode 100644
index 000000000..9bc306361
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/templates/service.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/values.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/templates/service.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/values.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/values.yaml
new file mode 100644
index 000000000..3728aa930
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app1/values.yaml
@@ -0,0 +1,3 @@
+service:
+ type: ClusterIP
+ port: 1234
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/Chart.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/Chart.yaml
new file mode 100644
index 000000000..1313ce4e9
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v3
+name: app2
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
+ import-values:
+ - defaults
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/Chart.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/Chart.yaml
new file mode 100644
index 000000000..9bc306361
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/templates/service.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/values.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/templates/service.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/values.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/values.yaml
new file mode 100644
index 000000000..98bd6d24b
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app2/values.yaml
@@ -0,0 +1,3 @@
+service:
+ type: ClusterIP
+ port: 8080
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/Chart.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/Chart.yaml
new file mode 100644
index 000000000..1a80533d0
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v3
+name: app3
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
+ import-values:
+ - defaults
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/Chart.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/Chart.yaml
new file mode 100644
index 000000000..9bc306361
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/templates/service.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/values.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/templates/service.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/values.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/values.yaml
new file mode 100644
index 000000000..b738e2a57
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app3/values.yaml
@@ -0,0 +1,2 @@
+service:
+ type: ClusterIP
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/Chart.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/Chart.yaml
new file mode 100644
index 000000000..886b4b1e4
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/Chart.yaml
@@ -0,0 +1,9 @@
+apiVersion: v3
+name: app4
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/Chart.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/Chart.yaml
new file mode 100644
index 000000000..9bc306361
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v3
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/templates/service.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/values.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/templates/service.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/values.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/values.yaml
new file mode 100644
index 000000000..3728aa930
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/charts/app4/values.yaml
@@ -0,0 +1,3 @@
+service:
+ type: ClusterIP
+ port: 1234
diff --git a/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/values.yaml b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/values.yaml
new file mode 100644
index 000000000..de0bafa51
--- /dev/null
+++ b/helm/internal/chart/v3/util/testdata/three-level-dependent-chart/umbrella/values.yaml
@@ -0,0 +1,14 @@
+app1:
+ enabled: true
+ service:
+ type: ClusterIP
+ port: 3456
+
+app2:
+ enabled: true
+
+app3:
+ enabled: true
+
+app4:
+ enabled: true
diff --git a/helm/internal/chart/v3/util/validate_name.go b/helm/internal/chart/v3/util/validate_name.go
new file mode 100644
index 000000000..6595e085d
--- /dev/null
+++ b/helm/internal/chart/v3/util/validate_name.go
@@ -0,0 +1,111 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+)
+
+// validName is a regular expression for resource names.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+var validName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
+
+var (
+ // errMissingName indicates that a release (name) was not provided.
+ errMissingName = errors.New("no name provided")
+
+ // errInvalidName indicates that an invalid release name was provided
+ errInvalidName = fmt.Errorf(
+ "invalid release name, must match regex %s and the length must not be longer than 53",
+ validName.String())
+
+ // errInvalidKubernetesName indicates that the name does not meet the Kubernetes
+ // restrictions on metadata names.
+ errInvalidKubernetesName = fmt.Errorf(
+ "invalid metadata name, must match regex %s and the length must not be longer than 253",
+ validName.String())
+)
+
+const (
+ // According to the Kubernetes docs (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#rfc-1035-label-names)
+ // some resource names have a max length of 63 characters while others have a max
+ // length of 253 characters. As we cannot be sure the resources used in a chart, we
+ // therefore need to limit it to 63 chars and reserve 10 chars for additional part to name
+ // of the resource. The reason is that chart maintainers can use release name as part of
+ // the resource name (and some additional chars).
+ maxReleaseNameLen = 53
+ // maxMetadataNameLen is the maximum length Kubernetes allows for any name.
+ maxMetadataNameLen = 253
+)
+
+// ValidateReleaseName performs checks for an entry for a Helm release name
+//
+// For Helm to allow a name, it must be below a certain character count (53) and also match
+// a regular expression.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+func ValidateReleaseName(name string) error {
+ // This case is preserved for backwards compatibility
+ if name == "" {
+ return errMissingName
+
+ }
+ if len(name) > maxReleaseNameLen || !validName.MatchString(name) {
+ return errInvalidName
+ }
+ return nil
+}
+
+// ValidateMetadataName validates the name field of a Kubernetes metadata object.
+//
+// Empty strings, strings longer than 253 chars, or strings that don't match the regexp
+// will fail.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+//
+// Deprecated: remove in Helm 4. Name validation now uses rules defined in
+// pkg/lint/rules.validateMetadataNameFunc()
+func ValidateMetadataName(name string) error {
+ if name == "" || len(name) > maxMetadataNameLen || !validName.MatchString(name) {
+ return errInvalidKubernetesName
+ }
+ return nil
+}
diff --git a/helm/internal/chart/v3/util/validate_name_test.go b/helm/internal/chart/v3/util/validate_name_test.go
new file mode 100644
index 000000000..cfc62a0f7
--- /dev/null
+++ b/helm/internal/chart/v3/util/validate_name_test.go
@@ -0,0 +1,91 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import "testing"
+
+// TestValidateReleaseName is a regression test for ValidateName
+//
+// Kubernetes has strict naming conventions for resource names. This test represents
+// those conventions.
+//
+// See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+//
+// NOTE: At the time of this writing, the docs above say that names cannot begin with
+// digits. However, `kubectl`'s regular expression explicit allows this, and
+// Kubernetes (at least as of 1.18) also accepts resources whose names begin with digits.
+func TestValidateReleaseName(t *testing.T) {
+ names := map[string]bool{
+ "": false,
+ "foo": true,
+ "foo.bar1234baz.seventyone": true,
+ "FOO": false,
+ "123baz": true,
+ "foo.BAR.baz": false,
+ "one-two": true,
+ "-two": false,
+ "one_two": false,
+ "a..b": false,
+ "%^$%*@^*@^": false,
+ "example:com": false,
+ "example%%com": false,
+ "a1111111111111111111111111111111111111111111111111111111111z": false,
+ }
+ for input, expectPass := range names {
+ if err := ValidateReleaseName(input); (err == nil) != expectPass {
+ st := "fail"
+ if expectPass {
+ st = "succeed"
+ }
+ t.Errorf("Expected %q to %s", input, st)
+ }
+ }
+}
+
+func TestValidateMetadataName(t *testing.T) {
+ names := map[string]bool{
+ "": false,
+ "foo": true,
+ "foo.bar1234baz.seventyone": true,
+ "FOO": false,
+ "123baz": true,
+ "foo.BAR.baz": false,
+ "one-two": true,
+ "-two": false,
+ "one_two": false,
+ "a..b": false,
+ "%^$%*@^*@^": false,
+ "example:com": false,
+ "example%%com": false,
+ "a1111111111111111111111111111111111111111111111111111111111z": true,
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z": false,
+ }
+ for input, expectPass := range names {
+ if err := ValidateMetadataName(input); (err == nil) != expectPass {
+ st := "fail"
+ if expectPass {
+ st = "succeed"
+ }
+ t.Errorf("Expected %q to %s", input, st)
+ }
+ }
+}
diff --git a/helm/internal/cli/output/color.go b/helm/internal/cli/output/color.go
new file mode 100644
index 000000000..e59cdde87
--- /dev/null
+++ b/helm/internal/cli/output/color.go
@@ -0,0 +1,67 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package output
+
+import (
+ "github.com/fatih/color"
+
+ "helm.sh/helm/v4/pkg/release/common"
+)
+
+// ColorizeStatus returns a colorized version of the status string based on the status value
+func ColorizeStatus(status common.Status, noColor bool) string {
+ // Disable color if requested
+ if noColor {
+ return status.String()
+ }
+
+ switch status {
+ case common.StatusDeployed:
+ return color.GreenString(status.String())
+ case common.StatusFailed:
+ return color.RedString(status.String())
+ case common.StatusPendingInstall, common.StatusPendingUpgrade, common.StatusPendingRollback, common.StatusUninstalling:
+ return color.YellowString(status.String())
+ case common.StatusUnknown:
+ return color.RedString(status.String())
+ default:
+ // For uninstalled, superseded, and any other status
+ return status.String()
+ }
+}
+
+// ColorizeHeader returns a colorized version of a header string
+func ColorizeHeader(header string, noColor bool) string {
+ // Disable color if requested
+ if noColor {
+ return header
+ }
+
+ // Use bold for headers
+ return color.New(color.Bold).Sprint(header)
+}
+
+// ColorizeNamespace returns a colorized version of a namespace string
+func ColorizeNamespace(namespace string, noColor bool) string {
+ // Disable color if requested
+ if noColor {
+ return namespace
+ }
+
+ // Use cyan for namespaces
+ return color.CyanString(namespace)
+}
diff --git a/helm/internal/cli/output/color_test.go b/helm/internal/cli/output/color_test.go
new file mode 100644
index 000000000..3b8de39e8
--- /dev/null
+++ b/helm/internal/cli/output/color_test.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package output
+
+import (
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/release/common"
+)
+
+func TestColorizeStatus(t *testing.T) {
+
+ tests := []struct {
+ name string
+ status common.Status
+ noColor bool
+ envNoColor string
+ wantColor bool // whether we expect color codes in output
+ }{
+ {
+ name: "deployed status with color",
+ status: common.StatusDeployed,
+ noColor: false,
+ envNoColor: "",
+ wantColor: true,
+ },
+ {
+ name: "deployed status without color flag",
+ status: common.StatusDeployed,
+ noColor: true,
+ envNoColor: "",
+ wantColor: false,
+ },
+ {
+ name: "deployed status with NO_COLOR env",
+ status: common.StatusDeployed,
+ noColor: false,
+ envNoColor: "1",
+ wantColor: false,
+ },
+ {
+ name: "failed status with color",
+ status: common.StatusFailed,
+ noColor: false,
+ envNoColor: "",
+ wantColor: true,
+ },
+ {
+ name: "pending install status with color",
+ status: common.StatusPendingInstall,
+ noColor: false,
+ envNoColor: "",
+ wantColor: true,
+ },
+ {
+ name: "unknown status with color",
+ status: common.StatusUnknown,
+ noColor: false,
+ envNoColor: "",
+ wantColor: true,
+ },
+ {
+ name: "superseded status with color",
+ status: common.StatusSuperseded,
+ noColor: false,
+ envNoColor: "",
+ wantColor: false, // superseded doesn't get colored
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Setenv("NO_COLOR", tt.envNoColor)
+
+ result := ColorizeStatus(tt.status, tt.noColor)
+
+ // Check if result contains ANSI escape codes
+ hasColor := strings.Contains(result, "\033[")
+
+ // In test environment, term.IsTerminal will be false, so we won't get color
+ // unless we're testing the logic without terminal detection
+ if hasColor && !tt.wantColor {
+ t.Errorf("ColorizeStatus() returned color when none expected: %q", result)
+ }
+
+ // Always check the status text is present
+ if !strings.Contains(result, tt.status.String()) {
+ t.Errorf("ColorizeStatus() = %q, want to contain %q", result, tt.status.String())
+ }
+ })
+ }
+}
+
+func TestColorizeHeader(t *testing.T) {
+
+ tests := []struct {
+ name string
+ header string
+ noColor bool
+ envNoColor string
+ }{
+ {
+ name: "header with color",
+ header: "NAME",
+ noColor: false,
+ envNoColor: "",
+ },
+ {
+ name: "header without color flag",
+ header: "NAME",
+ noColor: true,
+ envNoColor: "",
+ },
+ {
+ name: "header with NO_COLOR env",
+ header: "NAME",
+ noColor: false,
+ envNoColor: "1",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Setenv("NO_COLOR", tt.envNoColor)
+
+ result := ColorizeHeader(tt.header, tt.noColor)
+
+ // Always check the header text is present
+ if !strings.Contains(result, tt.header) {
+ t.Errorf("ColorizeHeader() = %q, want to contain %q", result, tt.header)
+ }
+ })
+ }
+}
+
+func TestColorizeNamespace(t *testing.T) {
+
+ tests := []struct {
+ name string
+ namespace string
+ noColor bool
+ envNoColor string
+ }{
+ {
+ name: "namespace with color",
+ namespace: "default",
+ noColor: false,
+ envNoColor: "",
+ },
+ {
+ name: "namespace without color flag",
+ namespace: "default",
+ noColor: true,
+ envNoColor: "",
+ },
+ {
+ name: "namespace with NO_COLOR env",
+ namespace: "default",
+ noColor: false,
+ envNoColor: "1",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Setenv("NO_COLOR", tt.envNoColor)
+
+ result := ColorizeNamespace(tt.namespace, tt.noColor)
+
+ // Always check the namespace text is present
+ if !strings.Contains(result, tt.namespace) {
+ t.Errorf("ColorizeNamespace() = %q, want to contain %q", result, tt.namespace)
+ }
+ })
+ }
+}
diff --git a/helm/internal/copystructure/copystructure.go b/helm/internal/copystructure/copystructure.go
new file mode 100644
index 000000000..aa5510298
--- /dev/null
+++ b/helm/internal/copystructure/copystructure.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package copystructure
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Copy performs a deep copy of the given src.
+// This implementation handles the specific use cases needed by Helm.
+func Copy(src any) (any, error) {
+ if src == nil {
+ return make(map[string]any), nil
+ }
+ return copyValue(reflect.ValueOf(src))
+}
+
+// copyValue handles copying using reflection for non-map types
+func copyValue(original reflect.Value) (any, error) {
+ switch original.Kind() {
+ case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64,
+ reflect.Complex64, reflect.Complex128, reflect.String, reflect.Array:
+ return original.Interface(), nil
+
+ case reflect.Interface:
+ if original.IsNil() {
+ return original.Interface(), nil
+ }
+ return copyValue(original.Elem())
+
+ case reflect.Map:
+ if original.IsNil() {
+ return original.Interface(), nil
+ }
+ copied := reflect.MakeMap(original.Type())
+
+ var err error
+ var child any
+ iter := original.MapRange()
+ for iter.Next() {
+ key := iter.Key()
+ value := iter.Value()
+
+ if value.Kind() == reflect.Interface && value.IsNil() {
+ copied.SetMapIndex(key, value)
+ continue
+ }
+
+ child, err = copyValue(value)
+ if err != nil {
+ return nil, err
+ }
+ copied.SetMapIndex(key, reflect.ValueOf(child))
+ }
+ return copied.Interface(), nil
+
+ case reflect.Pointer:
+ if original.IsNil() {
+ return original.Interface(), nil
+ }
+ copied, err := copyValue(original.Elem())
+ if err != nil {
+ return nil, err
+ }
+ ptr := reflect.New(original.Type().Elem())
+ ptr.Elem().Set(reflect.ValueOf(copied))
+ return ptr.Interface(), nil
+
+ case reflect.Slice:
+ if original.IsNil() {
+ return original.Interface(), nil
+ }
+ copied := reflect.MakeSlice(original.Type(), original.Len(), original.Cap())
+ for i := 0; i < original.Len(); i++ {
+ val, err := copyValue(original.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ copied.Index(i).Set(reflect.ValueOf(val))
+ }
+ return copied.Interface(), nil
+
+ case reflect.Struct:
+ copied := reflect.New(original.Type()).Elem()
+ for i := 0; i < original.NumField(); i++ {
+ elem, err := copyValue(original.Field(i))
+ if err != nil {
+ return nil, err
+ }
+ copied.Field(i).Set(reflect.ValueOf(elem))
+ }
+ return copied.Interface(), nil
+
+ case reflect.Func, reflect.Chan, reflect.UnsafePointer:
+ if original.IsNil() {
+ return original.Interface(), nil
+ }
+ return original.Interface(), nil
+
+ default:
+ return original.Interface(), fmt.Errorf("unsupported type %v", original)
+ }
+}
diff --git a/helm/internal/copystructure/copystructure_test.go b/helm/internal/copystructure/copystructure_test.go
new file mode 100644
index 000000000..d1708dc75
--- /dev/null
+++ b/helm/internal/copystructure/copystructure_test.go
@@ -0,0 +1,374 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package copystructure
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCopy_Nil(t *testing.T) {
+ result, err := Copy(nil)
+ require.NoError(t, err)
+ assert.Equal(t, map[string]any{}, result)
+}
+
+func TestCopy_PrimitiveTypes(t *testing.T) {
+ tests := []struct {
+ name string
+ input any
+ }{
+ {"bool", true},
+ {"int", 42},
+ {"int8", int8(8)},
+ {"int16", int16(16)},
+ {"int32", int32(32)},
+ {"int64", int64(64)},
+ {"uint", uint(42)},
+ {"uint8", uint8(8)},
+ {"uint16", uint16(16)},
+ {"uint32", uint32(32)},
+ {"uint64", uint64(64)},
+ {"float32", float32(3.14)},
+ {"float64", 3.14159},
+ {"complex64", complex64(1 + 2i)},
+ {"complex128", 1 + 2i},
+ {"string", "hello world"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := Copy(tt.input)
+ require.NoError(t, err)
+ assert.Equal(t, tt.input, result)
+ })
+ }
+}
+
+func TestCopy_Array(t *testing.T) {
+ input := [3]int{1, 2, 3}
+ result, err := Copy(input)
+ require.NoError(t, err)
+ assert.Equal(t, input, result)
+}
+
+func TestCopy_Slice(t *testing.T) {
+ t.Run("slice of ints", func(t *testing.T) {
+ input := []int{1, 2, 3, 4, 5}
+ result, err := Copy(input)
+ require.NoError(t, err)
+
+ resultSlice, ok := result.([]int)
+ require.True(t, ok)
+ assert.Equal(t, input, resultSlice)
+
+ // Verify it's a deep copy by modifying original
+ input[0] = 999
+ assert.Equal(t, 1, resultSlice[0])
+ })
+
+ t.Run("slice of strings", func(t *testing.T) {
+ input := []string{"a", "b", "c"}
+ result, err := Copy(input)
+ require.NoError(t, err)
+ assert.Equal(t, input, result)
+ })
+
+ t.Run("nil slice", func(t *testing.T) {
+ var input []int
+ result, err := Copy(input)
+ require.NoError(t, err)
+ assert.Nil(t, result)
+ })
+
+ t.Run("slice of maps", func(t *testing.T) {
+ input := []map[string]any{
+ {"key1": "value1"},
+ {"key2": "value2"},
+ }
+ result, err := Copy(input)
+ require.NoError(t, err)
+
+ resultSlice, ok := result.([]map[string]any)
+ require.True(t, ok)
+ assert.Equal(t, input, resultSlice)
+
+ // Verify deep copy
+ input[0]["key1"] = "modified"
+ assert.Equal(t, "value1", resultSlice[0]["key1"])
+ })
+}
+
+func TestCopy_Map(t *testing.T) {
+ t.Run("map[string]any", func(t *testing.T) {
+ input := map[string]any{
+ "string": "value",
+ "int": 42,
+ "bool": true,
+ "nested": map[string]any{
+ "inner": "value",
+ },
+ }
+
+ result, err := Copy(input)
+ require.NoError(t, err)
+
+ resultMap, ok := result.(map[string]any)
+ require.True(t, ok)
+ assert.Equal(t, input, resultMap)
+
+ // Verify deep copy
+ input["string"] = "modified"
+ assert.Equal(t, "value", resultMap["string"])
+
+ nestedInput := input["nested"].(map[string]any)
+ nestedResult := resultMap["nested"].(map[string]any)
+ nestedInput["inner"] = "modified"
+ assert.Equal(t, "value", nestedResult["inner"])
+ })
+
+ t.Run("map[string]string", func(t *testing.T) {
+ input := map[string]string{
+ "key1": "value1",
+ "key2": "value2",
+ }
+
+ result, err := Copy(input)
+ require.NoError(t, err)
+ assert.Equal(t, input, result)
+ })
+
+ t.Run("nil map", func(t *testing.T) {
+ var input map[string]any
+ result, err := Copy(input)
+ require.NoError(t, err)
+ assert.Nil(t, result)
+ })
+
+ t.Run("map with nil values", func(t *testing.T) {
+ input := map[string]any{
+ "key1": "value1",
+ "key2": nil,
+ }
+
+ result, err := Copy(input)
+ require.NoError(t, err)
+
+ resultMap, ok := result.(map[string]any)
+ require.True(t, ok)
+ assert.Equal(t, input, resultMap)
+ assert.Nil(t, resultMap["key2"])
+ })
+}
+
+func TestCopy_Struct(t *testing.T) {
+ type TestStruct struct {
+ Name string
+ Age int
+ Active bool
+ Scores []int
+ Metadata map[string]any
+ }
+
+ input := TestStruct{
+ Name: "John",
+ Age: 30,
+ Active: true,
+ Scores: []int{95, 87, 92},
+ Metadata: map[string]any{
+ "level": "advanced",
+ "tags": []string{"go", "programming"},
+ },
+ }
+
+ result, err := Copy(input)
+ require.NoError(t, err)
+
+ resultStruct, ok := result.(TestStruct)
+ require.True(t, ok)
+ assert.Equal(t, input, resultStruct)
+
+ // Verify deep copy
+ input.Name = "Modified"
+ input.Scores[0] = 999
+ assert.Equal(t, "John", resultStruct.Name)
+ assert.Equal(t, 95, resultStruct.Scores[0])
+}
+
+func TestCopy_Pointer(t *testing.T) {
+ t.Run("pointer to int", func(t *testing.T) {
+ value := 42
+ input := &value
+
+ result, err := Copy(input)
+ require.NoError(t, err)
+
+ resultPtr, ok := result.(*int)
+ require.True(t, ok)
+ assert.Equal(t, *input, *resultPtr)
+
+ // Verify they point to different memory locations
+ assert.NotSame(t, input, resultPtr)
+
+ // Verify deep copy
+ *input = 999
+ assert.Equal(t, 42, *resultPtr)
+ })
+
+ t.Run("pointer to struct", func(t *testing.T) {
+ type Person struct {
+ Name string
+ Age int
+ }
+
+ input := &Person{Name: "Alice", Age: 25}
+
+ result, err := Copy(input)
+ require.NoError(t, err)
+
+ resultPtr, ok := result.(*Person)
+ require.True(t, ok)
+ assert.Equal(t, *input, *resultPtr)
+ assert.NotSame(t, input, resultPtr)
+ })
+
+ t.Run("nil pointer", func(t *testing.T) {
+ var input *int
+ result, err := Copy(input)
+ require.NoError(t, err)
+ assert.Nil(t, result)
+ })
+}
+
+func TestCopy_Interface(t *testing.T) {
+ t.Run("any with value", func(t *testing.T) {
+ var input any = "hello"
+ result, err := Copy(input)
+ require.NoError(t, err)
+ assert.Equal(t, input, result)
+ })
+
+ t.Run("nil any", func(t *testing.T) {
+ var input any
+ result, err := Copy(input)
+ require.NoError(t, err)
+ // Copy(nil) returns an empty map according to the implementation
+ assert.Equal(t, map[string]any{}, result)
+ })
+
+ t.Run("any with complex value", func(t *testing.T) {
+ var input any = map[string]any{
+ "key": "value",
+ "nested": map[string]any{
+ "inner": 42,
+ },
+ }
+
+ result, err := Copy(input)
+ require.NoError(t, err)
+ assert.Equal(t, input, result)
+ })
+}
+
+func TestCopy_ComplexNested(t *testing.T) {
+ input := map[string]any{
+ "users": []map[string]any{
+ {
+ "name": "Alice",
+ "age": 30,
+ "addresses": []map[string]any{
+ {"type": "home", "city": "NYC"},
+ {"type": "work", "city": "SF"},
+ },
+ },
+ {
+ "name": "Bob",
+ "age": 25,
+ "addresses": []map[string]any{
+ {"type": "home", "city": "LA"},
+ },
+ },
+ },
+ "metadata": map[string]any{
+ "version": "1.0",
+ "flags": []bool{true, false, true},
+ },
+ }
+
+ result, err := Copy(input)
+ require.NoError(t, err)
+
+ resultMap, ok := result.(map[string]any)
+ require.True(t, ok)
+ assert.Equal(t, input, resultMap)
+
+ // Verify deep copy by modifying nested values
+ users := input["users"].([]map[string]any)
+ addresses := users[0]["addresses"].([]map[string]any)
+ addresses[0]["city"] = "Modified"
+
+ resultUsers := resultMap["users"].([]map[string]any)
+ resultAddresses := resultUsers[0]["addresses"].([]map[string]any)
+ assert.Equal(t, "NYC", resultAddresses[0]["city"])
+}
+
+func TestCopy_Functions(t *testing.T) {
+ t.Run("function", func(t *testing.T) {
+ input := func() string { return "hello" }
+ result, err := Copy(input)
+ require.NoError(t, err)
+
+ // Functions should be copied as-is (same reference)
+ resultFunc, ok := result.(func() string)
+ require.True(t, ok)
+ assert.Equal(t, input(), resultFunc())
+ })
+
+ t.Run("nil function", func(t *testing.T) {
+ var input func()
+ result, err := Copy(input)
+ require.NoError(t, err)
+ assert.Nil(t, result)
+ })
+}
+
+func TestCopy_Channels(t *testing.T) {
+ t.Run("channel", func(t *testing.T) {
+ input := make(chan int, 1)
+ input <- 42
+
+ result, err := Copy(input)
+ require.NoError(t, err)
+
+ // Channels should be copied as-is (same reference)
+ resultChan, ok := result.(chan int)
+ require.True(t, ok)
+
+ // Since channels are copied as references, verify we can read from the result channel
+ value := <-resultChan
+ assert.Equal(t, 42, value)
+ })
+
+ t.Run("nil channel", func(t *testing.T) {
+ var input chan int
+ result, err := Copy(input)
+ require.NoError(t, err)
+ assert.Nil(t, result)
+ })
+}
diff --git a/helm/internal/fileutil/fileutil.go b/helm/internal/fileutil/fileutil.go
new file mode 100644
index 000000000..39e0e330f
--- /dev/null
+++ b/helm/internal/fileutil/fileutil.go
@@ -0,0 +1,50 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fileutil
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+
+ "helm.sh/helm/v4/internal/third_party/dep/fs"
+)
+
+// AtomicWriteFile atomically (as atomic as os.Rename allows) writes a file to a
+// disk.
+func AtomicWriteFile(filename string, reader io.Reader, mode os.FileMode) error {
+ tempFile, err := os.CreateTemp(filepath.Split(filename))
+ if err != nil {
+ return err
+ }
+ tempName := tempFile.Name()
+
+ if _, err := io.Copy(tempFile, reader); err != nil {
+ tempFile.Close() // return value is ignored as we are already on error path
+ return err
+ }
+
+ if err := tempFile.Close(); err != nil {
+ return err
+ }
+
+ if err := os.Chmod(tempName, mode); err != nil {
+ return err
+ }
+
+ return fs.RenameWithFallback(tempName, filename)
+}
diff --git a/helm/internal/fileutil/fileutil_test.go b/helm/internal/fileutil/fileutil_test.go
new file mode 100644
index 000000000..881fbb49d
--- /dev/null
+++ b/helm/internal/fileutil/fileutil_test.go
@@ -0,0 +1,121 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fileutil
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+// TestAtomicWriteFile tests the happy path of AtomicWriteFile function.
+// It verifies that the function correctly writes content to a file with the specified mode.
+func TestAtomicWriteFile(t *testing.T) {
+ dir := t.TempDir()
+
+ testpath := filepath.Join(dir, "test")
+ stringContent := "Test content"
+ reader := bytes.NewReader([]byte(stringContent))
+ mode := os.FileMode(0644)
+
+ err := AtomicWriteFile(testpath, reader, mode)
+ if err != nil {
+ t.Errorf("AtomicWriteFile error: %s", err)
+ }
+
+ got, err := os.ReadFile(testpath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if stringContent != string(got) {
+ t.Fatalf("expected: %s, got: %s", stringContent, string(got))
+ }
+
+ gotinfo, err := os.Stat(testpath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if mode != gotinfo.Mode() {
+ t.Fatalf("expected %s: to be the same mode as %s",
+ mode, gotinfo.Mode())
+ }
+}
+
+// TestAtomicWriteFile_CreateTempError tests the error path when os.CreateTemp fails
+func TestAtomicWriteFile_CreateTempError(t *testing.T) {
+ invalidPath := "/invalid/path/that/does/not/exist/testfile"
+
+ reader := bytes.NewReader([]byte("test content"))
+ mode := os.FileMode(0644)
+
+ err := AtomicWriteFile(invalidPath, reader, mode)
+ if err == nil {
+ t.Error("Expected error when CreateTemp fails, but got nil")
+ }
+}
+
+// TestAtomicWriteFile_EmptyContent tests with empty content
+func TestAtomicWriteFile_EmptyContent(t *testing.T) {
+ dir := t.TempDir()
+ testpath := filepath.Join(dir, "empty_helm")
+
+ reader := bytes.NewReader([]byte(""))
+ mode := os.FileMode(0644)
+
+ err := AtomicWriteFile(testpath, reader, mode)
+ if err != nil {
+ t.Errorf("AtomicWriteFile error with empty content: %s", err)
+ }
+
+ got, err := os.ReadFile(testpath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(got) != 0 {
+ t.Fatalf("expected empty content, got: %s", string(got))
+ }
+}
+
+// TestAtomicWriteFile_LargeContent tests with large content
+func TestAtomicWriteFile_LargeContent(t *testing.T) {
+ dir := t.TempDir()
+ testpath := filepath.Join(dir, "large_test")
+
+ // Create a large content string
+ largeContent := strings.Repeat("HELM", 1024*1024)
+ reader := bytes.NewReader([]byte(largeContent))
+ mode := os.FileMode(0644)
+
+ err := AtomicWriteFile(testpath, reader, mode)
+ if err != nil {
+ t.Errorf("AtomicWriteFile error with large content: %s", err)
+ }
+
+ got, err := os.ReadFile(testpath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if largeContent != string(got) {
+ t.Fatalf("expected large content to match, got different length: %d vs %d", len(largeContent), len(got))
+ }
+}
diff --git a/helm/internal/logging/logging.go b/helm/internal/logging/logging.go
new file mode 100644
index 000000000..674e2db34
--- /dev/null
+++ b/helm/internal/logging/logging.go
@@ -0,0 +1,125 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logging
+
+import (
+ "context"
+ "log/slog"
+ "os"
+ "sync/atomic"
+)
+
+// DebugEnabledFunc is a function type that determines if debug logging is enabled
+// We use a function because we want to check the setting at log time, not when the logger is created
+type DebugEnabledFunc func() bool
+
+// DebugCheckHandler checks settings.Debug at log time
+type DebugCheckHandler struct {
+ handler slog.Handler
+ debugEnabled DebugEnabledFunc
+}
+
+// Enabled implements slog.Handler.Enabled
+func (h *DebugCheckHandler) Enabled(_ context.Context, level slog.Level) bool {
+ if level == slog.LevelDebug {
+ if h.debugEnabled == nil {
+ return false
+ }
+ return h.debugEnabled()
+ }
+ return true // Always log other levels
+}
+
+// Handle implements slog.Handler.Handle
+func (h *DebugCheckHandler) Handle(ctx context.Context, r slog.Record) error {
+ return h.handler.Handle(ctx, r)
+}
+
+// WithAttrs implements slog.Handler.WithAttrs
+func (h *DebugCheckHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ return &DebugCheckHandler{
+ handler: h.handler.WithAttrs(attrs),
+ debugEnabled: h.debugEnabled,
+ }
+}
+
+// WithGroup implements slog.Handler.WithGroup
+func (h *DebugCheckHandler) WithGroup(name string) slog.Handler {
+ return &DebugCheckHandler{
+ handler: h.handler.WithGroup(name),
+ debugEnabled: h.debugEnabled,
+ }
+}
+
+// NewLogger creates a new logger with dynamic debug checking
+func NewLogger(debugEnabled DebugEnabledFunc) *slog.Logger {
+ // Create base handler that removes timestamps
+ baseHandler := slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
+ // Always use LevelDebug here to allow all messages through
+ // Our custom handler will do the filtering
+ Level: slog.LevelDebug,
+ ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr {
+ // Remove the time attribute
+ if a.Key == slog.TimeKey {
+ return slog.Attr{}
+ }
+ return a
+ },
+ })
+
+ // Wrap with our dynamic debug-checking handler
+ dynamicHandler := &DebugCheckHandler{
+ handler: baseHandler,
+ debugEnabled: debugEnabled,
+ }
+
+ return slog.New(dynamicHandler)
+}
+
+// LoggerSetterGetter is an interface that can set and get a logger
+type LoggerSetterGetter interface {
+ // SetLogger sets a new slog.Handler
+ SetLogger(newHandler slog.Handler)
+ // Logger returns the slog.Logger created from the slog.Handler
+ Logger() *slog.Logger
+}
+
+type LogHolder struct {
+ // logger is an atomic.Pointer[slog.Logger] to store the slog.Logger
+ // We use atomic.Pointer for thread safety
+ logger atomic.Pointer[slog.Logger]
+}
+
+// Logger returns the logger for the LogHolder. If nil, returns slog.Default().
+func (l *LogHolder) Logger() *slog.Logger {
+ if lg := l.logger.Load(); lg != nil {
+ return lg
+ }
+ return slog.New(slog.DiscardHandler) // Should never be reached
+}
+
+// SetLogger sets the logger for the LogHolder. If nil, sets the default logger.
+func (l *LogHolder) SetLogger(newHandler slog.Handler) {
+ if newHandler == nil {
+ l.logger.Store(slog.New(slog.DiscardHandler)) // Assume nil as discarding logs
+ return
+ }
+ l.logger.Store(slog.New(newHandler))
+}
+
+// Ensure LogHolder implements LoggerSetterGetter
+var _ LoggerSetterGetter = &LogHolder{}
diff --git a/helm/internal/logging/logging_test.go b/helm/internal/logging/logging_test.go
new file mode 100644
index 000000000..d22a47a31
--- /dev/null
+++ b/helm/internal/logging/logging_test.go
@@ -0,0 +1,373 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logging
+
+import (
+ "bytes"
+ "context"
+ "log/slog"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestLogHolder_Logger(t *testing.T) {
+ t.Run("should return new logger with a then set handler", func(t *testing.T) {
+ holder := &LogHolder{}
+ buf := &bytes.Buffer{}
+ handler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+
+ holder.SetLogger(handler)
+ logger := holder.Logger()
+
+ assert.NotNil(t, logger)
+
+ // Test that the logger works
+ logger.Info("test message")
+ assert.Contains(t, buf.String(), "test message")
+ })
+
+ t.Run("should return discard - defaultlogger when no handler is set", func(t *testing.T) {
+ holder := &LogHolder{}
+ logger := holder.Logger()
+
+ assert.Equal(t, slog.Handler(slog.DiscardHandler), logger.Handler())
+ })
+}
+
+func TestLogHolder_SetLogger(t *testing.T) {
+ t.Run("sets logger with valid handler", func(t *testing.T) {
+ holder := &LogHolder{}
+ buf := &bytes.Buffer{}
+ handler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+
+ holder.SetLogger(handler)
+ logger := holder.Logger()
+
+ assert.NotNil(t, logger)
+
+ // Compare the handler directly
+ assert.Equal(t, handler, logger.Handler())
+ })
+
+ t.Run("sets discard logger with nil handler", func(t *testing.T) {
+ holder := &LogHolder{}
+
+ holder.SetLogger(nil)
+ logger := holder.Logger()
+
+ assert.NotNil(t, logger)
+
+ assert.Equal(t, slog.Handler(slog.DiscardHandler), logger.Handler())
+ })
+
+ t.Run("can replace existing logger", func(t *testing.T) {
+ holder := &LogHolder{}
+
+ // Set first logger
+ buf1 := &bytes.Buffer{}
+ handler1 := slog.NewTextHandler(buf1, &slog.HandlerOptions{Level: slog.LevelDebug})
+ holder.SetLogger(handler1)
+
+ logger1 := holder.Logger()
+ assert.Equal(t, handler1, logger1.Handler())
+
+ // Replace with second logger
+ buf2 := &bytes.Buffer{}
+ handler2 := slog.NewTextHandler(buf2, &slog.HandlerOptions{Level: slog.LevelDebug})
+ holder.SetLogger(handler2)
+
+ logger2 := holder.Logger()
+ assert.Equal(t, handler2, logger2.Handler())
+ })
+}
+
+func TestLogHolder_InterfaceCompliance(t *testing.T) {
+ t.Run("implements LoggerSetterGetter interface", func(_ *testing.T) {
+ var _ LoggerSetterGetter = &LogHolder{}
+ })
+
+ t.Run("interface methods work correctly", func(t *testing.T) {
+ var holder LoggerSetterGetter = &LogHolder{}
+
+ buf := &bytes.Buffer{}
+ handler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+
+ holder.SetLogger(handler)
+ logger := holder.Logger()
+
+ assert.NotNil(t, logger)
+ assert.Equal(t, handler, logger.Handler())
+ })
+}
+
+func TestDebugCheckHandler_Enabled(t *testing.T) {
+ t.Run("returns debugEnabled function result for debug level", func(t *testing.T) {
+ // Test with debug enabled
+ debugEnabled := func() bool { return true }
+ buf := &bytes.Buffer{}
+ baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+ handler := &DebugCheckHandler{
+ handler: baseHandler,
+ debugEnabled: debugEnabled,
+ }
+
+ assert.True(t, handler.Enabled(t.Context(), slog.LevelDebug))
+ })
+
+ t.Run("returns false for debug level when debug disabled", func(t *testing.T) {
+ // Test with debug disabled
+ debugEnabled := func() bool { return false }
+ buf := &bytes.Buffer{}
+ baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+ handler := &DebugCheckHandler{
+ handler: baseHandler,
+ debugEnabled: debugEnabled,
+ }
+
+ assert.False(t, handler.Enabled(t.Context(), slog.LevelDebug))
+ })
+
+ t.Run("always returns true for non-debug levels", func(t *testing.T) {
+ debugEnabled := func() bool { return false } // Debug disabled
+ buf := &bytes.Buffer{}
+ baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+ handler := &DebugCheckHandler{
+ handler: baseHandler,
+ debugEnabled: debugEnabled,
+ }
+
+ // Even with debug disabled, other levels should always be enabled
+ assert.True(t, handler.Enabled(t.Context(), slog.LevelInfo))
+ assert.True(t, handler.Enabled(t.Context(), slog.LevelWarn))
+ assert.True(t, handler.Enabled(t.Context(), slog.LevelError))
+ })
+
+ t.Run("calls debugEnabled function dynamically", func(t *testing.T) {
+ callCount := 0
+ debugEnabled := func() bool {
+ callCount++
+ return callCount%2 == 1 // Alternates between true and false
+ }
+
+ buf := &bytes.Buffer{}
+ baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+ handler := &DebugCheckHandler{
+ handler: baseHandler,
+ debugEnabled: debugEnabled,
+ }
+
+ // First call should return true
+ assert.True(t, handler.Enabled(t.Context(), slog.LevelDebug))
+ assert.Equal(t, 1, callCount)
+
+ // Second call should return false
+ assert.False(t, handler.Enabled(t.Context(), slog.LevelDebug))
+ assert.Equal(t, 2, callCount)
+
+ // Third call should return true again
+ assert.True(t, handler.Enabled(t.Context(), slog.LevelDebug))
+ assert.Equal(t, 3, callCount)
+ })
+}
+
+func TestDebugCheckHandler_Handle(t *testing.T) {
+ t.Run("delegates to underlying handler", func(t *testing.T) {
+ buf := &bytes.Buffer{}
+ baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+ handler := &DebugCheckHandler{
+ handler: baseHandler,
+ debugEnabled: func() bool { return true },
+ }
+
+ record := slog.NewRecord(time.Now(), slog.LevelInfo, "test message", 0)
+ err := handler.Handle(t.Context(), record)
+
+ assert.NoError(t, err)
+ assert.Contains(t, buf.String(), "test message")
+ })
+
+ t.Run("handles context correctly", func(t *testing.T) {
+ buf := &bytes.Buffer{}
+ baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+ handler := &DebugCheckHandler{
+ handler: baseHandler,
+ debugEnabled: func() bool { return true },
+ }
+
+ type testKey string
+ ctx := context.WithValue(t.Context(), testKey("test"), "value")
+ record := slog.NewRecord(time.Now(), slog.LevelInfo, "context test", 0)
+ err := handler.Handle(ctx, record)
+
+ assert.NoError(t, err)
+ assert.Contains(t, buf.String(), "context test")
+ })
+}
+
+func TestDebugCheckHandler_WithAttrs(t *testing.T) {
+ t.Run("returns new DebugCheckHandler with attributes", func(t *testing.T) {
+ logger := NewLogger(func() bool { return true })
+ handler := logger.Handler()
+ newHandler := handler.WithAttrs([]slog.Attr{
+ slog.String("key1", "value1"),
+ slog.Int("key2", 42),
+ })
+
+ // Should return a DebugCheckHandler
+ debugHandler, ok := newHandler.(*DebugCheckHandler)
+ assert.True(t, ok)
+ assert.NotNil(t, debugHandler)
+
+ // Should preserve the debugEnabled function
+ assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelDebug))
+
+ // Should have the attributes applied to the underlying handler
+ assert.NotEqual(t, handler, debugHandler.handler)
+ })
+
+ t.Run("preserves debugEnabled function", func(t *testing.T) {
+ callCount := 0
+ debugEnabled := func() bool {
+ callCount++
+ return callCount%2 == 1
+ }
+
+ buf := &bytes.Buffer{}
+ baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+ handler := &DebugCheckHandler{
+ handler: baseHandler,
+ debugEnabled: debugEnabled,
+ }
+
+ attrs := []slog.Attr{slog.String("test", "value")}
+ newHandler := handler.WithAttrs(attrs)
+
+ // The new handler should use the same debugEnabled function
+ assert.True(t, newHandler.Enabled(t.Context(), slog.LevelDebug))
+ assert.Equal(t, 1, callCount)
+
+ assert.False(t, newHandler.Enabled(t.Context(), slog.LevelDebug))
+ assert.Equal(t, 2, callCount)
+ })
+}
+
+func TestDebugCheckHandler_WithGroup(t *testing.T) {
+ t.Run("returns new DebugCheckHandler with group", func(t *testing.T) {
+ buf := &bytes.Buffer{}
+ baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+ handler := &DebugCheckHandler{
+ handler: baseHandler,
+ debugEnabled: func() bool { return true },
+ }
+
+ newHandler := handler.WithGroup("testgroup")
+
+ // Should return a DebugCheckHandler
+ debugHandler, ok := newHandler.(*DebugCheckHandler)
+ assert.True(t, ok)
+ assert.NotNil(t, debugHandler)
+
+ // Should preserve the debugEnabled function
+ assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelDebug))
+
+ // Should have the group applied to the underlying handler
+ assert.NotEqual(t, handler.handler, debugHandler.handler)
+ })
+
+ t.Run("preserves debugEnabled function", func(t *testing.T) {
+ callCount := 0
+ debugEnabled := func() bool {
+ callCount++
+ return callCount%2 == 1
+ }
+
+ buf := &bytes.Buffer{}
+ baseHandler := slog.NewTextHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug})
+ handler := &DebugCheckHandler{
+ handler: baseHandler,
+ debugEnabled: debugEnabled,
+ }
+
+ newHandler := handler.WithGroup("testgroup")
+
+ // The new handler should use the same debugEnabled function
+ assert.True(t, newHandler.Enabled(t.Context(), slog.LevelDebug))
+ assert.Equal(t, 1, callCount)
+
+ assert.False(t, newHandler.Enabled(t.Context(), slog.LevelDebug))
+ assert.Equal(t, 2, callCount)
+ })
+}
+
+func TestDebugCheckHandler_Integration(t *testing.T) {
+ t.Run("works with NewLogger function", func(t *testing.T) {
+ debugEnabled := func() bool { return true }
+ logger := NewLogger(debugEnabled)
+
+ assert.NotNil(t, logger)
+
+ // The logger should have a DebugCheckHandler
+ handler := logger.Handler()
+ debugHandler, ok := handler.(*DebugCheckHandler)
+ assert.True(t, ok)
+
+ // Should enable debug when debugEnabled returns true
+ assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelDebug))
+
+ // Should enable other levels regardless
+ assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelInfo))
+ })
+
+ t.Run("dynamic debug checking works in practice", func(t *testing.T) {
+ debugState := false
+ debugEnabled := func() bool { return debugState }
+
+ logger := NewLogger(debugEnabled)
+
+ // Initially debug should be disabled
+ assert.False(t, logger.Handler().(*DebugCheckHandler).Enabled(t.Context(), slog.LevelDebug))
+
+ // Enable debug
+ debugState = true
+ assert.True(t, logger.Handler().(*DebugCheckHandler).Enabled(t.Context(), slog.LevelDebug))
+
+ // Disable debug again
+ debugState = false
+ assert.False(t, logger.Handler().(*DebugCheckHandler).Enabled(t.Context(), slog.LevelDebug))
+ })
+
+ t.Run("handles nil debugEnabled function", func(t *testing.T) {
+ logger := NewLogger(nil)
+
+ assert.NotNil(t, logger)
+
+ // The logger should have a DebugCheckHandler
+ handler := logger.Handler()
+ debugHandler, ok := handler.(*DebugCheckHandler)
+ assert.True(t, ok)
+
+ // When debugEnabled is nil, debug level should be disabled (default behavior)
+ assert.False(t, debugHandler.Enabled(t.Context(), slog.LevelDebug))
+
+ // Other levels should always be enabled
+ assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelInfo))
+ assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelWarn))
+ assert.True(t, debugHandler.Enabled(t.Context(), slog.LevelError))
+ })
+}
diff --git a/helm/internal/monocular/client.go b/helm/internal/monocular/client.go
new file mode 100644
index 000000000..f4ef5d647
--- /dev/null
+++ b/helm/internal/monocular/client.go
@@ -0,0 +1,62 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package monocular
+
+import (
+ "errors"
+ "net/url"
+)
+
+// ErrHostnameNotProvided indicates the url is missing a hostname
+var ErrHostnameNotProvided = errors.New("no hostname provided")
+
+// Client represents a client capable of communicating with the Monocular API.
+type Client struct {
+
+ // The base URL for requests
+ BaseURL string
+}
+
+// New creates a new client
+func New(u string) (*Client, error) {
+
+ // Validate we have a URL
+ if err := validate(u); err != nil {
+ return nil, err
+ }
+
+ return &Client{
+ BaseURL: u,
+ }, nil
+}
+
+// Validate if the base URL for monocular is valid.
+func validate(u string) error {
+
+ // Check if it is parsable
+ p, err := url.Parse(u)
+ if err != nil {
+ return err
+ }
+
+ // Check that a host is attached
+ if p.Hostname() == "" {
+ return ErrHostnameNotProvided
+ }
+
+ return nil
+}
diff --git a/helm/internal/monocular/client_test.go b/helm/internal/monocular/client_test.go
new file mode 100644
index 000000000..abf914ef5
--- /dev/null
+++ b/helm/internal/monocular/client_test.go
@@ -0,0 +1,31 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package monocular
+
+import (
+ "testing"
+)
+
+func TestNew(t *testing.T) {
+ c, err := New("https://hub.helm.sh")
+ if err != nil {
+ t.Errorf("error creating client: %s", err)
+ }
+ if c.BaseURL != "https://hub.helm.sh" {
+ t.Errorf("incorrect BaseURL. Expected \"https://hub.helm.sh\" but got %q", c.BaseURL)
+ }
+}
diff --git a/helm/internal/monocular/doc.go b/helm/internal/monocular/doc.go
new file mode 100644
index 000000000..5d402d35f
--- /dev/null
+++ b/helm/internal/monocular/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package monocular contains the logic for interacting with a Monocular
+// compatible search API endpoint. For example, as implemented by the Artifact
+// Hub.
+//
+// This is a library for interacting with a monocular compatible search API
+package monocular
diff --git a/helm/internal/monocular/search.go b/helm/internal/monocular/search.go
new file mode 100644
index 000000000..fcf04b7a4
--- /dev/null
+++ b/helm/internal/monocular/search.go
@@ -0,0 +1,145 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package monocular
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "time"
+
+ "helm.sh/helm/v4/internal/version"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+// SearchPath is the url path to the search API in monocular.
+const SearchPath = "api/chartsvc/v1/charts/search"
+
+// The structs below represent the structure of the response from the monocular
+// search API. The structs were not imported from monocular because monocular
+// imports from Helm v2 (avoiding circular version dependency) and the mappings
+// are slightly different (monocular search results do not directly reflect
+// the struct definitions).
+
+// SearchResult represents an individual chart result
+type SearchResult struct {
+ ID string `json:"id"`
+ ArtifactHub ArtifactHub `json:"artifactHub"`
+ Type string `json:"type"`
+ Attributes Chart `json:"attributes"`
+ Links Links `json:"links"`
+ Relationships Relationships `json:"relationships"`
+}
+
+// ArtifactHub represents data specific to Artifact Hub instances
+type ArtifactHub struct {
+ PackageURL string `json:"packageUrl"`
+}
+
+// Chart is the attributes for the chart
+type Chart struct {
+ Name string `json:"name"`
+ Repo Repo `json:"repo"`
+ Description string `json:"description"`
+ Home string `json:"home"`
+ Keywords []string `json:"keywords"`
+ Maintainers []chart.Maintainer `json:"maintainers"`
+ Sources []string `json:"sources"`
+ Icon string `json:"icon"`
+}
+
+// Repo contains the name in monocular the url for the repository
+type Repo struct {
+ Name string `json:"name"`
+ URL string `json:"url"`
+}
+
+// Links provides a set of links relative to the chartsvc base
+type Links struct {
+ Self string `json:"self"`
+}
+
+// Relationships provides information on the latest version of the chart
+type Relationships struct {
+ LatestChartVersion LatestChartVersion `json:"latestChartVersion"`
+}
+
+// LatestChartVersion provides the details on the latest version of the chart
+type LatestChartVersion struct {
+ Data ChartVersion `json:"data"`
+ Links Links `json:"links"`
+}
+
+// ChartVersion provides the specific data on the chart version
+type ChartVersion struct {
+ Version string `json:"version"`
+ AppVersion string `json:"app_version"`
+ Created time.Time `json:"created"`
+ Digest string `json:"digest"`
+ Urls []string `json:"urls"`
+ Readme string `json:"readme"`
+ Values string `json:"values"`
+}
+
+// Search performs a search against the monocular search API
+func (c *Client) Search(term string) ([]SearchResult, error) {
+
+ // Create the URL to the search endpoint
+ // Note, this is currently an internal API for the Hub. This should be
+ // formatted without showing how monocular operates.
+ p, err := url.Parse(c.BaseURL)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set the path to the monocular API endpoint for search
+ p.Path = path.Join(p.Path, SearchPath)
+
+ p.RawQuery = "q=" + url.QueryEscape(term)
+
+ // Create request
+ req, err := http.NewRequest(http.MethodGet, p.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set the user agent so that monocular can identify where the request
+ // is coming from
+ req.Header.Set("User-Agent", version.GetUserAgent())
+
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+
+ if res.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("failed to fetch %s : %s", p.String(), res.Status)
+ }
+
+ result := &searchResponse{}
+
+ json.NewDecoder(res.Body).Decode(result)
+
+ return result.Data, nil
+}
+
+type searchResponse struct {
+ Data []SearchResult `json:"data"`
+}
diff --git a/helm/internal/monocular/search_test.go b/helm/internal/monocular/search_test.go
new file mode 100644
index 000000000..fc82ef4b4
--- /dev/null
+++ b/helm/internal/monocular/search_test.go
@@ -0,0 +1,49 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package monocular
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+// A search response for phpmyadmin containing 2 results
+var searchResult = `{"data":[{"id":"stable/phpmyadmin","type":"chart","attributes":{"name":"phpmyadmin","repo":{"name":"stable","url":"https://charts.helm.sh/stable"},"description":"phpMyAdmin is an mysql administration frontend","home":"https://www.phpmyadmin.net/","keywords":["mariadb","mysql","phpmyadmin"],"maintainers":[{"name":"Bitnami","email":"containers@bitnami.com"}],"sources":["https://github.com/bitnami/bitnami-docker-phpmyadmin"],"icon":""},"links":{"self":"/v1/charts/stable/phpmyadmin"},"relationships":{"latestChartVersion":{"data":{"version":"3.0.0","app_version":"4.9.0-1","created":"2019-08-08T17:57:31.38Z","digest":"119c499251bffd4b06ff0cd5ac98c2ce32231f84899fb4825be6c2d90971c742","urls":["https://charts.helm.sh/stable/phpmyadmin-3.0.0.tgz"],"readme":"/v1/assets/stable/phpmyadmin/versions/3.0.0/README.md","values":"/v1/assets/stable/phpmyadmin/versions/3.0.0/values.yaml"},"links":{"self":"/v1/charts/stable/phpmyadmin/versions/3.0.0"}}}},{"id":"bitnami/phpmyadmin","type":"chart","attributes":{"name":"phpmyadmin","repo":{"name":"bitnami","url":"https://charts.bitnami.com"},"description":"phpMyAdmin is an mysql administration frontend","home":"https://www.phpmyadmin.net/","keywords":["mariadb","mysql","phpmyadmin"],"maintainers":[{"name":"Bitnami","email":"containers@bitnami.com"}],"sources":["https://github.com/bitnami/bitnami-docker-phpmyadmin"],"icon":""},"links":{"self":"/v1/charts/bitnami/phpmyadmin"},"relationships":{"latestChartVersion":{"data":{"version":"3.0.0","app_version":"4.9.0-1","created":"2019-08-08T18:34:13.341Z","digest":"66d77cf6d8c2b52c488d0a294cd4996bd5bad8dc41d3829c394498fb401c008a","urls":["https://charts.bitnami.com/bitnami/phpmyadmin-3.0.0.tgz"],"readme":"/v1/assets/bitnami/phpmyadmin/versions/3.0.0/README.md","values":"/v1/assets/bitnami/phpmyadmin/versions/3.0.0/values.yaml"},"links":{"self":"/v1/charts/bitnami/phpmyadmin/versions/3.0.0"}}}}]}`
+
+func TestSearch(t *testing.T) {
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ fmt.Fprintln(w, searchResult)
+ }))
+ defer ts.Close()
+
+ c, err := New(ts.URL)
+ if err != nil {
+ t.Errorf("unable to create monocular client: %s", err)
+ }
+
+ results, err := c.Search("phpmyadmin")
+ if err != nil {
+ t.Errorf("unable to search monocular: %s", err)
+ }
+
+ if len(results) != 2 {
+ t.Error("Did not receive the expected number of results")
+ }
+}
diff --git a/helm/internal/plugin/cache/cache.go b/helm/internal/plugin/cache/cache.go
new file mode 100644
index 000000000..f3b737477
--- /dev/null
+++ b/helm/internal/plugin/cache/cache.go
@@ -0,0 +1,67 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package cache provides a key generator for vcs urls.
+package cache // import "helm.sh/helm/v4/internal/plugin/cache"
+
+import (
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// Thanks glide!
+
+// scpSyntaxRe matches the SCP-like addresses used to access repos over SSH.
+var scpSyntaxRe = regexp.MustCompile(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`)
+
+// Key generates a cache key based on a url or scp string. The key is file
+// system safe.
+func Key(repo string) (string, error) {
+ var (
+ u *url.URL
+ err error
+ )
+ if m := scpSyntaxRe.FindStringSubmatch(repo); m != nil {
+ // Match SCP-like syntax and convert it to a URL.
+ // Eg, "git@github.com:user/repo" becomes
+ // "ssh://git@github.com/user/repo".
+ u = &url.URL{
+ User: url.User(m[1]),
+ Host: m[2],
+ Path: "/" + m[3],
+ }
+ } else {
+ u, err = url.Parse(repo)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ var key strings.Builder
+ if u.Scheme != "" {
+ key.WriteString(u.Scheme)
+ key.WriteString("-")
+ }
+ if u.User != nil && u.User.Username() != "" {
+ key.WriteString(u.User.Username())
+ key.WriteString("-")
+ }
+ key.WriteString(u.Host)
+ if u.Path != "" {
+ key.WriteString(strings.ReplaceAll(u.Path, "/", "-"))
+ }
+ return strings.ReplaceAll(key.String(), ":", "-"), nil
+}
diff --git a/helm/internal/plugin/config.go b/helm/internal/plugin/config.go
new file mode 100644
index 000000000..785bde840
--- /dev/null
+++ b/helm/internal/plugin/config.go
@@ -0,0 +1,53 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+
+ "go.yaml.in/yaml/v3"
+)
+
+// Config represents a plugin type specific configuration
+// It is expected to type assert (cast) the Config to its expected underlying type (schema.ConfigCLIV1, schema.ConfigGetterV1, etc).
+type Config interface {
+ Validate() error
+}
+
+func unmarshalConfig(pluginType string, configData map[string]any) (Config, error) {
+ pluginTypeMeta, ok := pluginTypesIndex[pluginType]
+ if !ok {
+ return nil, fmt.Errorf("unknown plugin type %q", pluginType)
+ }
+
+ // TODO: Avoid (yaml) serialization/deserialization for type conversion here
+
+ data, err := yaml.Marshal(configData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshel config data (plugin type %s): %w", pluginType, err)
+ }
+
+ config := reflect.New(pluginTypeMeta.configType)
+ d := yaml.NewDecoder(bytes.NewReader(data))
+ d.KnownFields(true)
+ if err := d.Decode(config.Interface()); err != nil {
+ return nil, err
+ }
+
+ return config.Interface().(Config), nil
+}
diff --git a/helm/internal/plugin/config_test.go b/helm/internal/plugin/config_test.go
new file mode 100644
index 000000000..beac05abf
--- /dev/null
+++ b/helm/internal/plugin/config_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func TestUnmarshaConfig(t *testing.T) {
+ // Test unmarshalling a CLI plugin config
+ {
+ config, err := unmarshalConfig("cli/v1", map[string]any{
+ "usage": "usage string",
+ "shortHelp": "short help string",
+ "longHelp": "long help string",
+ "ignoreFlags": true,
+ })
+ require.NoError(t, err)
+
+ require.IsType(t, &schema.ConfigCLIV1{}, config)
+ assert.Equal(t, schema.ConfigCLIV1{
+ Usage: "usage string",
+ ShortHelp: "short help string",
+ LongHelp: "long help string",
+ IgnoreFlags: true,
+ }, *(config.(*schema.ConfigCLIV1)))
+ }
+
+ // Test unmarshalling invalid config data
+ {
+ config, err := unmarshalConfig("cli/v1", map[string]any{
+ "invalid field": "foo",
+ })
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "field not found")
+ assert.Nil(t, config)
+ }
+}
diff --git a/helm/internal/plugin/descriptor.go b/helm/internal/plugin/descriptor.go
new file mode 100644
index 000000000..ba92b3c55
--- /dev/null
+++ b/helm/internal/plugin/descriptor.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+// Descriptor describes a plugin to find
+type Descriptor struct {
+ // Name is the name of the plugin
+ Name string
+ // Type is the type of the plugin (cli, getter, postrenderer)
+ Type string
+}
diff --git a/helm/internal/plugin/doc.go b/helm/internal/plugin/doc.go
new file mode 100644
index 000000000..39ba6300b
--- /dev/null
+++ b/helm/internal/plugin/doc.go
@@ -0,0 +1,89 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+---
+TODO: move this section to public plugin package
+
+Package plugin provides the implementation of the Helm plugin system.
+
+Conceptually, "plugins" enable extending Helm's functionality external to Helm's core codebase. The plugin system allows
+code to fetch plugins by type, then invoke the plugin with an input as required by that plugin type. The plugin
+returning an output for the caller to consume.
+
+An example of a plugin invocation:
+```
+d := plugin.Descriptor{
+ Type: "example/v1", //
+}
+plgs, err := plugin.FindPlugins([]string{settings.PluginsDirectory}, d)
+
+for _, plg := range plgs {
+ input := &plugin.Input{
+ Message: schema.InputMessageExampleV1{ // The type of the input message is defined by the plugin's "type" (example/v1 here)
+ ...
+ },
+ }
+ output, err := plg.Invoke(context.Background(), input)
+ if err != nil {
+ ...
+ }
+
+ // consume the output, using type assertion to convert to the expected output type (as defined by the plugin's "type")
+ outputMessage, ok := output.Message.(schema.OutputMessageExampleV1)
+}
+
+---
+
+Package `plugin` provides the implementation of the Helm plugin system.
+
+Helm plugins are exposed to uses as the "Plugin" type, the basic interface that primarily support the "Invoke" method.
+
+# Plugin Runtimes
+Internally, plugins must be implemented by a "runtime" that is responsible for creating the plugin instance, and dispatching the plugin's invocation to the plugin's implementation.
+For example:
+- forming environment variables and command line args for subprocess execution
+- converting input to JSON and invoking a function in a Wasm runtime
+
+Internally, the code structure is:
+Runtime.CreatePlugin()
+ |
+ | (creates)
+ |
+ \---> PluginRuntime
+ |
+ | (implements)
+ v
+ Plugin.Invoke()
+
+# Plugin Types
+Each plugin implements a specific functionality, denoted by the plugin's "type" e.g. "getter/v1". The "type" includes a version, in order to allow a given types messaging schema and invocation options to evolve.
+
+Specifically, the plugin's "type" specifies the contract for the input and output messages that are expected to be passed to the plugin, and returned from the plugin. The plugin's "type" also defines the options that can be passed to the plugin when invoking it.
+
+# Metadata
+Each plugin must have a `plugin.yaml`, that defines the plugin's metadata. The metadata includes the plugin's name, version, and other information.
+
+For legacy plugins, the type is inferred by which fields are set on the plugin: a downloader plugin is inferred when metadata contains a "downloaders" yaml node, otherwise it is assumed to define a Helm CLI subcommand.
+
+For v1 plugins, the metadata includes explicit apiVersion and type fields. It will also contain type-specific Config, and RuntimeConfig fields.
+
+# Runtime and type cardinality
+From a cardinality perspective, this means there a "few" runtimes, and "many" plugins types. It is also expected that the subprocess runtime will not be extended to support extra plugin types, and deprecated in a future version of Helm.
+
+Future ideas that are intended to be implemented include extending the plugin system to support future Wasm standards. Or allowing Helm SDK user's to inject "plugins" that are actually implemented as native go modules. Or even moving Helm's internal functionality e.g. yaml rendering engine to be used as an "in-built" plugin, along side other plugins that may implement other (non-go template) rendering engines.
+*/
+
+package plugin
diff --git a/helm/internal/plugin/error.go b/helm/internal/plugin/error.go
new file mode 100644
index 000000000..212460cea
--- /dev/null
+++ b/helm/internal/plugin/error.go
@@ -0,0 +1,29 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+// InvokeExecError is returned when a plugin invocation returns a non-zero status/exit code
+// - subprocess plugin: child process exit code
+// - extism plugin: wasm function return code
+type InvokeExecError struct {
+ ExitCode int // Exit code from plugin code execution
+ Err error // Underlying error
+}
+
+// Error implements the error interface
+func (e *InvokeExecError) Error() string {
+ return e.Err.Error()
+}
diff --git a/helm/internal/plugin/installer/base.go b/helm/internal/plugin/installer/base.go
new file mode 100644
index 000000000..c21a245a8
--- /dev/null
+++ b/helm/internal/plugin/installer/base.go
@@ -0,0 +1,45 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "path/filepath"
+
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+type base struct {
+ // Source is the reference to a plugin
+ Source string
+ // PluginsDirectory is the directory where plugins are installed
+ PluginsDirectory string
+}
+
+func newBase(source string) base {
+ settings := cli.New()
+ return base{
+ Source: source,
+ PluginsDirectory: settings.PluginsDirectory,
+ }
+}
+
+// Path is where the plugin will be installed.
+func (b *base) Path() string {
+ if b.Source == "" {
+ return ""
+ }
+ return filepath.Join(b.PluginsDirectory, filepath.Base(b.Source))
+}
diff --git a/helm/internal/plugin/installer/base_test.go b/helm/internal/plugin/installer/base_test.go
new file mode 100644
index 000000000..62b77bde5
--- /dev/null
+++ b/helm/internal/plugin/installer/base_test.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "testing"
+)
+
+func TestPath(t *testing.T) {
+ tests := []struct {
+ source string
+ helmPluginsDir string
+ expectPath string
+ }{
+ {
+ source: "",
+ helmPluginsDir: "/helm/data/plugins",
+ expectPath: "",
+ }, {
+ source: "https://github.com/jkroepke/helm-secrets",
+ helmPluginsDir: "/helm/data/plugins",
+ expectPath: "/helm/data/plugins/helm-secrets",
+ },
+ }
+
+ for _, tt := range tests {
+
+ t.Setenv("HELM_PLUGINS", tt.helmPluginsDir)
+ baseIns := newBase(tt.source)
+ baseInsPath := baseIns.Path()
+ if baseInsPath != tt.expectPath {
+ t.Errorf("expected name %s, got %s", tt.expectPath, baseInsPath)
+ }
+ }
+}
diff --git a/helm/internal/plugin/installer/doc.go b/helm/internal/plugin/installer/doc.go
new file mode 100644
index 000000000..a4cf384bf
--- /dev/null
+++ b/helm/internal/plugin/installer/doc.go
@@ -0,0 +1,17 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package installer provides an interface for installing Helm plugins.
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
diff --git a/helm/internal/plugin/installer/extractor.go b/helm/internal/plugin/installer/extractor.go
new file mode 100644
index 000000000..b753dfbca
--- /dev/null
+++ b/helm/internal/plugin/installer/extractor.go
@@ -0,0 +1,195 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "slices"
+ "strings"
+
+ securejoin "github.com/cyphar/filepath-securejoin"
+)
+
+// TarGzExtractor extracts gzip compressed tar archives
+type TarGzExtractor struct{}
+
+// Extractor provides an interface for extracting archives
+type Extractor interface {
+ Extract(buffer *bytes.Buffer, targetDir string) error
+}
+
+// Extractors contains a map of suffixes and matching implementations of extractor to return
+var Extractors = map[string]Extractor{
+ ".tar.gz": &TarGzExtractor{},
+ ".tgz": &TarGzExtractor{},
+}
+
+// Convert a media type to an extractor extension.
+//
+// This should be refactored in Helm 4, combined with the extension-based mechanism.
+func mediaTypeToExtension(mt string) (string, bool) {
+ switch strings.ToLower(mt) {
+ case "application/gzip", "application/x-gzip", "application/x-tgz", "application/x-gtar":
+ return ".tgz", true
+ case "application/octet-stream":
+ // Generic binary type - we'll need to check the URL suffix
+ return "", false
+ default:
+ return "", false
+ }
+}
+
+// NewExtractor creates a new extractor matching the source file name
+func NewExtractor(source string) (Extractor, error) {
+ for suffix, extractor := range Extractors {
+ if strings.HasSuffix(source, suffix) {
+ return extractor, nil
+ }
+ }
+ return nil, fmt.Errorf("no extractor implemented yet for %s", source)
+}
+
+// cleanJoin resolves dest as a subpath of root.
+//
+// This function runs several security checks on the path, generating an error if
+// the supplied `dest` looks suspicious or would result in dubious behavior on the
+// filesystem.
+//
+// cleanJoin assumes that any attempt by `dest` to break out of the CWD is an attempt
+// to be malicious. (If you don't care about this, use the securejoin-filepath library.)
+// It will emit an error if it detects paths that _look_ malicious, operating on the
+// assumption that we don't actually want to do anything with files that already
+// appear to be nefarious.
+//
+// - The character `:` is considered illegal because it is a separator on UNIX and a
+// drive designator on Windows.
+// - The path component `..` is considered suspicious, and therefore illegal
+// - The character \ (backslash) is treated as a path separator and is converted to /.
+// - Beginning a path with a path separator is illegal
+// - Rudimentary symlink protections are offered by SecureJoin.
+func cleanJoin(root, dest string) (string, error) {
+
+ // On Windows, this is a drive separator. On UNIX-like, this is the path list separator.
+ // In neither case do we want to trust a TAR that contains these.
+ if strings.Contains(dest, ":") {
+ return "", errors.New("path contains ':', which is illegal")
+ }
+
+ // The Go tar library does not convert separators for us.
+ // We assume here, as we do elsewhere, that `\\` means a Windows path.
+ dest = strings.ReplaceAll(dest, "\\", "/")
+
+ // We want to alert the user that something bad was attempted. Cleaning it
+ // is not a good practice.
+ if slices.Contains(strings.Split(dest, "/"), "..") {
+ return "", errors.New("path contains '..', which is illegal")
+ }
+
+ // If a path is absolute, the creator of the TAR is doing something shady.
+ if path.IsAbs(dest) {
+ return "", errors.New("path is absolute, which is illegal")
+ }
+
+ // SecureJoin will do some cleaning, as well as some rudimentary checking of symlinks.
+ // The directory needs to be cleaned prior to passing to SecureJoin or the location may end up
+ // being wrong or returning an error. This was introduced in v0.4.0.
+ root = filepath.Clean(root)
+ newpath, err := securejoin.SecureJoin(root, dest)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.ToSlash(newpath), nil
+}
+
+// Extract extracts compressed archives
+//
+// Implements Extractor.
+func (g *TarGzExtractor) Extract(buffer *bytes.Buffer, targetDir string) error {
+ uncompressedStream, err := gzip.NewReader(buffer)
+ if err != nil {
+ return err
+ }
+
+ if err := os.MkdirAll(targetDir, 0755); err != nil {
+ return err
+ }
+
+ tarReader := tar.NewReader(uncompressedStream)
+ for {
+ header, err := tarReader.Next()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ path, err := cleanJoin(targetDir, header.Name)
+ if err != nil {
+ return err
+ }
+
+ switch header.Typeflag {
+ case tar.TypeDir:
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ case tar.TypeReg:
+ // Ensure parent directory exists
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ outFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(outFile, tarReader); err != nil {
+ outFile.Close()
+ return err
+ }
+ outFile.Close()
+ // We don't want to process these extension header files.
+ case tar.TypeXGlobalHeader, tar.TypeXHeader:
+ continue
+ default:
+ return fmt.Errorf("unknown type: %b in %s", header.Typeflag, header.Name)
+ }
+ }
+ return nil
+}
+
+// stripPluginName is a helper that relies on some sort of convention for plugin name (plugin-name-)
+func stripPluginName(name string) string {
+ var strippedName string
+ for suffix := range Extractors {
+ if before, ok := strings.CutSuffix(name, suffix); ok {
+ strippedName = before
+ break
+ }
+ }
+ re := regexp.MustCompile(`(.*)-[0-9]+\..*`)
+ return re.ReplaceAllString(strippedName, `$1`)
+}
diff --git a/helm/internal/plugin/installer/http_installer.go b/helm/internal/plugin/installer/http_installer.go
new file mode 100644
index 000000000..bb96314f4
--- /dev/null
+++ b/helm/internal/plugin/installer/http_installer.go
@@ -0,0 +1,191 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "bytes"
+ "fmt"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/cache"
+ "helm.sh/helm/v4/internal/third_party/dep/fs"
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+// HTTPInstaller installs plugins from an archive served by a web server.
+type HTTPInstaller struct {
+ CacheDir string
+ PluginName string
+ base
+ extractor Extractor
+ getter getter.Getter
+ // Cached data to avoid duplicate downloads
+ pluginData []byte
+ provData []byte
+}
+
+// NewHTTPInstaller creates a new HttpInstaller.
+func NewHTTPInstaller(source string) (*HTTPInstaller, error) {
+ key, err := cache.Key(source)
+ if err != nil {
+ return nil, err
+ }
+
+ extractor, err := NewExtractor(source)
+ if err != nil {
+ return nil, err
+ }
+
+ get, err := getter.All(new(cli.EnvSettings)).ByScheme("http")
+ if err != nil {
+ return nil, err
+ }
+
+ i := &HTTPInstaller{
+ CacheDir: helmpath.CachePath("plugins", key),
+ PluginName: stripPluginName(filepath.Base(source)),
+ base: newBase(source),
+ extractor: extractor,
+ getter: get,
+ }
+ return i, nil
+}
+
+// Install downloads and extracts the tarball into the cache directory
+// and installs into the plugin directory.
+//
+// Implements Installer.
+func (i *HTTPInstaller) Install() error {
+ // Ensure plugin data is cached
+ if i.pluginData == nil {
+ pluginData, err := i.getter.Get(i.Source)
+ if err != nil {
+ return err
+ }
+ i.pluginData = pluginData.Bytes()
+ }
+
+ // Save the original tarball to plugins directory for verification
+ // Extract metadata to get the actual plugin name and version
+ metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(i.pluginData))
+ if err != nil {
+ return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err)
+ }
+ filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version)
+ tarballPath := helmpath.DataPath("plugins", filename)
+ if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil {
+ return fmt.Errorf("failed to create plugins directory: %w", err)
+ }
+ if err := os.WriteFile(tarballPath, i.pluginData, 0644); err != nil {
+ return fmt.Errorf("failed to save tarball: %w", err)
+ }
+
+ // Ensure prov data is cached if available
+ if i.provData == nil {
+ // Try to download .prov file if it exists
+ provURL := i.Source + ".prov"
+ if provData, err := i.getter.Get(provURL); err == nil {
+ i.provData = provData.Bytes()
+ }
+ }
+
+ // Save prov file if we have the data
+ if i.provData != nil {
+ provPath := tarballPath + ".prov"
+ if err := os.WriteFile(provPath, i.provData, 0644); err != nil {
+ slog.Debug("failed to save provenance file", "error", err)
+ }
+ }
+
+ if err := i.extractor.Extract(bytes.NewBuffer(i.pluginData), i.CacheDir); err != nil {
+ return fmt.Errorf("extracting files from archive: %w", err)
+ }
+
+ // Detect where the plugin.yaml actually is
+ pluginRoot, err := detectPluginRoot(i.CacheDir)
+ if err != nil {
+ return err
+ }
+
+ // Validate plugin structure if needed
+ if err := validatePluginName(pluginRoot, i.PluginName); err != nil {
+ return err
+ }
+
+ src, err := filepath.Abs(pluginRoot)
+ if err != nil {
+ return err
+ }
+
+ slog.Debug("copying", "source", src, "path", i.Path())
+ return fs.CopyDir(src, i.Path())
+}
+
+// Update updates a local repository
+// Not implemented for now since tarball most likely will be packaged by version
+func (i *HTTPInstaller) Update() error {
+ return fmt.Errorf("method Update() not implemented for HttpInstaller")
+}
+
+// Path is overridden because we want to join on the plugin name not the file name
+func (i HTTPInstaller) Path() string {
+ if i.Source == "" {
+ return ""
+ }
+ return helmpath.DataPath("plugins", i.PluginName)
+}
+
+// SupportsVerification returns true if the HTTP installer can verify plugins
+func (i *HTTPInstaller) SupportsVerification() bool {
+ // Only support verification for tarball URLs
+ return strings.HasSuffix(i.Source, ".tgz") || strings.HasSuffix(i.Source, ".tar.gz")
+}
+
+// GetVerificationData returns cached plugin and provenance data for verification
+func (i *HTTPInstaller) GetVerificationData() (archiveData, provData []byte, filename string, err error) {
+ if !i.SupportsVerification() {
+ return nil, nil, "", fmt.Errorf("verification not supported for this source")
+ }
+
+ // Download plugin data once and cache it
+ if i.pluginData == nil {
+ data, err := i.getter.Get(i.Source)
+ if err != nil {
+ return nil, nil, "", fmt.Errorf("failed to download plugin: %w", err)
+ }
+ i.pluginData = data.Bytes()
+ }
+
+ // Download prov data once and cache it if available
+ if i.provData == nil {
+ provData, err := i.getter.Get(i.Source + ".prov")
+ if err != nil {
+ // If provenance file doesn't exist, set provData to nil
+ // The verification logic will handle this gracefully
+ i.provData = nil
+ } else {
+ i.provData = provData.Bytes()
+ }
+ }
+
+ return i.pluginData, i.provData, filepath.Base(i.Source), nil
+}
diff --git a/helm/internal/plugin/installer/http_installer_test.go b/helm/internal/plugin/installer/http_installer_test.go
new file mode 100644
index 000000000..7f7e6cef6
--- /dev/null
+++ b/helm/internal/plugin/installer/http_installer_test.go
@@ -0,0 +1,601 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io/fs"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "testing"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+var _ Installer = new(HTTPInstaller)
+
+// Fake http client
+type TestHTTPGetter struct {
+ MockResponse *bytes.Buffer
+ MockError error
+}
+
+func (t *TestHTTPGetter) Get(_ string, _ ...getter.Option) (*bytes.Buffer, error) {
+ return t.MockResponse, t.MockError
+}
+
+// Fake plugin tarball data
+var fakePluginB64 = "H4sIAAAAAAAAA+3SQUvDMBgG4Jz7K0LwapdvSxrwJig6mCKC5xHabBaXdDSt4L+3cQ56mV42ZPg+lw+SF5LwZmXf3OV206/rMGEnIgdG6zTJaDmee4y01FOlZpqGHJGZSsb1qS401sfOtpyz0FTup9xv+2dqNep/N/IP6zdHPSMVXCh1sH8yhtGMDBUFFTL1r4iIcXnUWxzwz/sP1rsrLkbfQGTvro11E4ZlmcucRNZHu04py1OO73OVi2Vbb7td9vp7nXevtvsKRpGVjfc2VMP2xf3t4mH5tHi5mz8ub+bPk9JXIvvr5wMAAAAAAAAAAAAAAAAAAAAAnLVPqwHcXQAoAAA="
+
+func TestStripName(t *testing.T) {
+ if stripPluginName("fake-plugin-0.0.1.tar.gz") != "fake-plugin" {
+ t.Errorf("name does not match expected value")
+ }
+ if stripPluginName("fake-plugin-0.0.1.tgz") != "fake-plugin" {
+ t.Errorf("name does not match expected value")
+ }
+ if stripPluginName("fake-plugin.tgz") != "fake-plugin" {
+ t.Errorf("name does not match expected value")
+ }
+ if stripPluginName("fake-plugin.tar.gz") != "fake-plugin" {
+ t.Errorf("name does not match expected value")
+ }
+}
+
+func mockArchiveServer() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !strings.HasSuffix(r.URL.Path, ".tar.gz") {
+ w.Header().Add("Content-Type", "text/html")
+ fmt.Fprintln(w, "broken")
+ return
+ }
+ w.Header().Add("Content-Type", "application/gzip")
+ fmt.Fprintln(w, "test")
+ }))
+}
+
+func TestHTTPInstaller(t *testing.T) {
+ ensure.HelmHome(t)
+
+ srv := mockArchiveServer()
+ defer srv.Close()
+ source := srv.URL + "/plugins/fake-plugin-0.0.1.tar.gz"
+
+ if err := os.MkdirAll(helmpath.DataPath("plugins"), 0755); err != nil {
+ t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err)
+ }
+
+ i, err := NewForSource(source, "0.0.1")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // ensure a HTTPInstaller was returned
+ httpInstaller, ok := i.(*HTTPInstaller)
+ if !ok {
+ t.Fatal("expected a HTTPInstaller")
+ }
+
+ // inject fake http client responding with minimal plugin tarball
+ mockTgz, err := base64.StdEncoding.DecodeString(fakePluginB64)
+ if err != nil {
+ t.Fatalf("Could not decode fake tgz plugin: %s", err)
+ }
+
+ httpInstaller.getter = &TestHTTPGetter{
+ MockResponse: bytes.NewBuffer(mockTgz),
+ }
+
+ // install the plugin
+ if err := Install(i); err != nil {
+ t.Fatal(err)
+ }
+ if i.Path() != helmpath.DataPath("plugins", "fake-plugin") {
+ t.Fatalf("expected path '$XDG_CONFIG_HOME/helm/plugins/fake-plugin', got %q", i.Path())
+ }
+
+ // Install again to test plugin exists error
+ if err := Install(i); err == nil {
+ t.Fatal("expected error for plugin exists, got none")
+ } else if err.Error() != "plugin already exists" {
+ t.Fatalf("expected error for plugin exists, got (%v)", err)
+ }
+
+}
+
+func TestHTTPInstallerNonExistentVersion(t *testing.T) {
+ ensure.HelmHome(t)
+ srv := mockArchiveServer()
+ defer srv.Close()
+ source := srv.URL + "/plugins/fake-plugin-0.0.1.tar.gz"
+
+ if err := os.MkdirAll(helmpath.DataPath("plugins"), 0755); err != nil {
+ t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err)
+ }
+
+ i, err := NewForSource(source, "0.0.2")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // ensure a HTTPInstaller was returned
+ httpInstaller, ok := i.(*HTTPInstaller)
+ if !ok {
+ t.Fatal("expected a HTTPInstaller")
+ }
+
+ // inject fake http client responding with error
+ httpInstaller.getter = &TestHTTPGetter{
+ MockError: fmt.Errorf("failed to download plugin for some reason"),
+ }
+
+ // attempt to install the plugin
+ if err := Install(i); err == nil {
+ t.Fatal("expected error from http client")
+ }
+
+}
+
+func TestHTTPInstallerUpdate(t *testing.T) {
+ srv := mockArchiveServer()
+ defer srv.Close()
+ source := srv.URL + "/plugins/fake-plugin-0.0.1.tar.gz"
+ ensure.HelmHome(t)
+
+ if err := os.MkdirAll(helmpath.DataPath("plugins"), 0755); err != nil {
+ t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err)
+ }
+
+ i, err := NewForSource(source, "0.0.1")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // ensure a HTTPInstaller was returned
+ httpInstaller, ok := i.(*HTTPInstaller)
+ if !ok {
+ t.Fatal("expected a HTTPInstaller")
+ }
+
+ // inject fake http client responding with minimal plugin tarball
+ mockTgz, err := base64.StdEncoding.DecodeString(fakePluginB64)
+ if err != nil {
+ t.Fatalf("Could not decode fake tgz plugin: %s", err)
+ }
+
+ httpInstaller.getter = &TestHTTPGetter{
+ MockResponse: bytes.NewBuffer(mockTgz),
+ }
+
+ // install the plugin before updating
+ if err := Install(i); err != nil {
+ t.Fatal(err)
+ }
+ if i.Path() != helmpath.DataPath("plugins", "fake-plugin") {
+ t.Fatalf("expected path '$XDG_CONFIG_HOME/helm/plugins/fake-plugin', got %q", i.Path())
+ }
+
+ // Update plugin, should fail because it is not implemented
+ if err := Update(i); err == nil {
+ t.Fatal("update method not implemented for http installer")
+ }
+}
+
+func TestExtract(t *testing.T) {
+ source := "https://repo.localdomain/plugins/fake-plugin-0.0.1.tar.gz"
+
+ tempDir := t.TempDir()
+
+ // Get current umask to predict expected permissions
+ currentUmask := syscall.Umask(0)
+ syscall.Umask(currentUmask)
+
+ // Write a tarball to a buffer for us to extract
+ var tarbuf bytes.Buffer
+ tw := tar.NewWriter(&tarbuf)
+ var files = []struct {
+ Name, Body string
+ Mode int64
+ }{
+ {"plugin.yaml", "plugin metadata", 0600},
+ {"README.md", "some text", 0777},
+ }
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Typeflag: tar.TypeReg,
+ Mode: file.Mode,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Add pax global headers. This should be ignored.
+ // Note the PAX header that isn't global cannot be written using WriteHeader.
+ // Details are in the internal Go function for the tar packaged named
+ // allowedFormats. For a TypeXHeader it will return a message stating
+ // "cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"
+ if err := tw.WriteHeader(&tar.Header{
+ Name: "pax_global_header",
+ Typeflag: tar.TypeXGlobalHeader,
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := tw.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ gz := gzip.NewWriter(&buf)
+ if _, err := gz.Write(tarbuf.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ gz.Close()
+ // END tarball creation
+
+ extractor, err := NewExtractor(source)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err = extractor.Extract(&buf, tempDir); err != nil {
+ t.Fatalf("Did not expect error but got error: %v", err)
+ }
+
+ // Calculate expected permissions after umask is applied
+ expectedPluginYAMLPerm := os.FileMode(0600 &^ currentUmask)
+ expectedReadmePerm := os.FileMode(0777 &^ currentUmask)
+
+ pluginYAMLFullPath := filepath.Join(tempDir, "plugin.yaml")
+ if info, err := os.Stat(pluginYAMLFullPath); err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ t.Fatalf("Expected %s to exist but doesn't", pluginYAMLFullPath)
+ }
+ t.Fatal(err)
+ } else if info.Mode().Perm() != expectedPluginYAMLPerm {
+ t.Fatalf("Expected %s to have %o mode but has %o (umask: %o)",
+ pluginYAMLFullPath, expectedPluginYAMLPerm, info.Mode().Perm(), currentUmask)
+ }
+
+ readmeFullPath := filepath.Join(tempDir, "README.md")
+ if info, err := os.Stat(readmeFullPath); err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ t.Fatalf("Expected %s to exist but doesn't", readmeFullPath)
+ }
+ t.Fatal(err)
+ } else if info.Mode().Perm() != expectedReadmePerm {
+ t.Fatalf("Expected %s to have %o mode but has %o (umask: %o)",
+ readmeFullPath, expectedReadmePerm, info.Mode().Perm(), currentUmask)
+ }
+
+}
+
+func TestCleanJoin(t *testing.T) {
+ for i, fixture := range []struct {
+ path string
+ expect string
+ expectError bool
+ }{
+ {"foo/bar.txt", "/tmp/foo/bar.txt", false},
+ {"/foo/bar.txt", "", true},
+ {"./foo/bar.txt", "/tmp/foo/bar.txt", false},
+ {"./././././foo/bar.txt", "/tmp/foo/bar.txt", false},
+ {"../../../../foo/bar.txt", "", true},
+ {"foo/../../../../bar.txt", "", true},
+ {"c:/foo/bar.txt", "/tmp/c:/foo/bar.txt", true},
+ {"foo\\bar.txt", "/tmp/foo/bar.txt", false},
+ {"c:\\foo\\bar.txt", "", true},
+ } {
+ out, err := cleanJoin("/tmp", fixture.path)
+ if err != nil {
+ if !fixture.expectError {
+ t.Errorf("Test %d: Path was not cleaned: %s", i, err)
+ }
+ continue
+ }
+ if fixture.expect != out {
+ t.Errorf("Test %d: Expected %q but got %q", i, fixture.expect, out)
+ }
+ }
+
+}
+
+func TestMediaTypeToExtension(t *testing.T) {
+
+ for mt, shouldPass := range map[string]bool{
+ "": false,
+ "application/gzip": true,
+ "application/x-gzip": true,
+ "application/x-tgz": true,
+ "application/x-gtar": true,
+ "application/json": false,
+ } {
+ ext, ok := mediaTypeToExtension(mt)
+ if ok != shouldPass {
+ t.Errorf("Media type %q failed test", mt)
+ }
+ if shouldPass && ext == "" {
+ t.Errorf("Expected an extension but got empty string")
+ }
+ if !shouldPass && len(ext) != 0 {
+ t.Error("Expected extension to be empty for unrecognized type")
+ }
+ }
+}
+
+func TestExtractWithNestedDirectories(t *testing.T) {
+ source := "https://repo.localdomain/plugins/nested-plugin-0.0.1.tar.gz"
+ tempDir := t.TempDir()
+
+ // Write a tarball with nested directory structure
+ var tarbuf bytes.Buffer
+ tw := tar.NewWriter(&tarbuf)
+ var files = []struct {
+ Name string
+ Body string
+ Mode int64
+ TypeFlag byte
+ }{
+ {"plugin.yaml", "plugin metadata", 0600, tar.TypeReg},
+ {"bin/", "", 0755, tar.TypeDir},
+ {"bin/plugin", "#!/usr/bin/env sh\necho plugin", 0755, tar.TypeReg},
+ {"docs/", "", 0755, tar.TypeDir},
+ {"docs/README.md", "readme content", 0644, tar.TypeReg},
+ {"docs/examples/", "", 0755, tar.TypeDir},
+ {"docs/examples/example1.yaml", "example content", 0644, tar.TypeReg},
+ }
+
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Typeflag: file.TypeFlag,
+ Mode: file.Mode,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if file.TypeFlag == tar.TypeReg {
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ gz := gzip.NewWriter(&buf)
+ if _, err := gz.Write(tarbuf.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ gz.Close()
+
+ extractor, err := NewExtractor(source)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // First extraction
+ if err = extractor.Extract(&buf, tempDir); err != nil {
+ t.Fatalf("First extraction failed: %v", err)
+ }
+
+ // Verify nested structure was created
+ nestedFile := filepath.Join(tempDir, "docs", "examples", "example1.yaml")
+ if _, err := os.Stat(nestedFile); err != nil {
+ t.Fatalf("Expected nested file %s to exist but got error: %v", nestedFile, err)
+ }
+
+ // Reset buffer for second extraction
+ buf.Reset()
+ gz = gzip.NewWriter(&buf)
+ if _, err := gz.Write(tarbuf.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ gz.Close()
+
+ // Second extraction to same directory (should not fail)
+ if err = extractor.Extract(&buf, tempDir); err != nil {
+ t.Fatalf("Second extraction to existing directory failed: %v", err)
+ }
+}
+
+func TestExtractWithExistingDirectory(t *testing.T) {
+ source := "https://repo.localdomain/plugins/test-plugin-0.0.1.tar.gz"
+ tempDir := t.TempDir()
+
+ // Pre-create the cache directory structure
+ cacheDir := filepath.Join(tempDir, "cache")
+ if err := os.MkdirAll(filepath.Join(cacheDir, "existing", "dir"), 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a file in the existing directory
+ existingFile := filepath.Join(cacheDir, "existing", "file.txt")
+ if err := os.WriteFile(existingFile, []byte("existing content"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Write a tarball
+ var tarbuf bytes.Buffer
+ tw := tar.NewWriter(&tarbuf)
+ files := []struct {
+ Name string
+ Body string
+ Mode int64
+ TypeFlag byte
+ }{
+ {"plugin.yaml", "plugin metadata", 0600, tar.TypeReg},
+ {"existing/", "", 0755, tar.TypeDir},
+ {"existing/dir/", "", 0755, tar.TypeDir},
+ {"existing/dir/newfile.txt", "new content", 0644, tar.TypeReg},
+ }
+
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Typeflag: file.TypeFlag,
+ Mode: file.Mode,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if file.TypeFlag == tar.TypeReg {
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ gz := gzip.NewWriter(&buf)
+ if _, err := gz.Write(tarbuf.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ gz.Close()
+
+ extractor, err := NewExtractor(source)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Extract to directory with existing content
+ if err = extractor.Extract(&buf, cacheDir); err != nil {
+ t.Fatalf("Extraction to directory with existing content failed: %v", err)
+ }
+
+ // Verify new file was created
+ newFile := filepath.Join(cacheDir, "existing", "dir", "newfile.txt")
+ if _, err := os.Stat(newFile); err != nil {
+ t.Fatalf("Expected new file %s to exist but got error: %v", newFile, err)
+ }
+
+ // Verify existing file is still there
+ if _, err := os.Stat(existingFile); err != nil {
+ t.Fatalf("Expected existing file %s to still exist but got error: %v", existingFile, err)
+ }
+}
+
+func TestExtractPluginInSubdirectory(t *testing.T) {
+ ensure.HelmHome(t)
+ source := "https://repo.localdomain/plugins/subdir-plugin-1.0.0.tar.gz"
+ tempDir := t.TempDir()
+
+ // Create a tarball where plugin files are in a subdirectory
+ var tarbuf bytes.Buffer
+ tw := tar.NewWriter(&tarbuf)
+ files := []struct {
+ Name string
+ Body string
+ Mode int64
+ TypeFlag byte
+ }{
+ {"my-plugin/", "", 0755, tar.TypeDir},
+ {"my-plugin/plugin.yaml", "name: my-plugin\nversion: 1.0.0\nusage: test\ndescription: test plugin\ncommand: $HELM_PLUGIN_DIR/bin/my-plugin", 0644, tar.TypeReg},
+ {"my-plugin/bin/", "", 0755, tar.TypeDir},
+ {"my-plugin/bin/my-plugin", "#!/usr/bin/env sh\necho test", 0755, tar.TypeReg},
+ }
+
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Typeflag: file.TypeFlag,
+ Mode: file.Mode,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if file.TypeFlag == tar.TypeReg {
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ gz := gzip.NewWriter(&buf)
+ if _, err := gz.Write(tarbuf.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ gz.Close()
+
+ // Test the installer
+ installer := &HTTPInstaller{
+ CacheDir: tempDir,
+ PluginName: "subdir-plugin",
+ base: newBase(source),
+ extractor: &TarGzExtractor{},
+ }
+
+ // Create a mock getter
+ installer.getter = &TestHTTPGetter{
+ MockResponse: &buf,
+ }
+
+ // Ensure the destination directory doesn't exist
+ // (In a real scenario, this is handled by installer.Install() wrapper)
+ destPath := installer.Path()
+ if err := os.RemoveAll(destPath); err != nil {
+ t.Fatalf("Failed to clean destination path: %v", err)
+ }
+
+ // Install should handle the subdirectory correctly
+ if err := installer.Install(); err != nil {
+ t.Fatalf("Failed to install plugin with subdirectory: %v", err)
+ }
+
+ // The plugin should be installed from the subdirectory
+ // Check that detectPluginRoot found the correct location
+ pluginRoot, err := detectPluginRoot(tempDir)
+ if err != nil {
+ t.Fatalf("Failed to detect plugin root: %v", err)
+ }
+
+ expectedRoot := filepath.Join(tempDir, "my-plugin")
+ if pluginRoot != expectedRoot {
+ t.Errorf("Expected plugin root to be %s but got %s", expectedRoot, pluginRoot)
+ }
+}
diff --git a/helm/internal/plugin/installer/installer.go b/helm/internal/plugin/installer/installer.go
new file mode 100644
index 000000000..e3975c2d7
--- /dev/null
+++ b/helm/internal/plugin/installer/installer.go
@@ -0,0 +1,223 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer
+
+import (
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+// ErrMissingMetadata indicates that plugin.yaml is missing.
+var ErrMissingMetadata = errors.New("plugin metadata (plugin.yaml) missing")
+
+// Options contains options for plugin installation.
+type Options struct {
+ // Verify enables signature verification before installation
+ Verify bool
+ // Keyring is the path to the keyring for verification
+ Keyring string
+}
+
+// Installer provides an interface for installing helm client plugins.
+type Installer interface {
+ // Install adds a plugin.
+ Install() error
+ // Path is the directory of the installed plugin.
+ Path() string
+ // Update updates a plugin.
+ Update() error
+}
+
+// Verifier provides an interface for installers that support verification.
+type Verifier interface {
+ // SupportsVerification returns true if this installer can verify plugins
+ SupportsVerification() bool
+ // GetVerificationData returns plugin and provenance data for verification
+ GetVerificationData() (archiveData, provData []byte, filename string, err error)
+}
+
+// Install installs a plugin.
+func Install(i Installer) error {
+ _, err := InstallWithOptions(i, Options{})
+ return err
+}
+
+// VerificationResult contains the result of plugin verification
+type VerificationResult struct {
+ SignedBy []string
+ Fingerprint string
+ FileHash string
+}
+
+// InstallWithOptions installs a plugin with options.
+func InstallWithOptions(i Installer, opts Options) (*VerificationResult, error) {
+
+ if err := os.MkdirAll(filepath.Dir(i.Path()), 0755); err != nil {
+ return nil, err
+ }
+ if _, pathErr := os.Stat(i.Path()); !os.IsNotExist(pathErr) {
+ slog.Warn("plugin already exists", slog.String("path", i.Path()), slog.Any("error", pathErr))
+ return nil, errors.New("plugin already exists")
+ }
+
+ var result *VerificationResult
+
+ // If verification is requested, check if installer supports it
+ if opts.Verify {
+ verifier, ok := i.(Verifier)
+ if !ok || !verifier.SupportsVerification() {
+ return nil, fmt.Errorf("--verify is only supported for plugin tarballs (.tgz files)")
+ }
+
+ // Get verification data (works for both memory and file-based installers)
+ archiveData, provData, filename, err := verifier.GetVerificationData()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get verification data: %w", err)
+ }
+
+ // Check if provenance data exists
+ if len(provData) == 0 {
+ // No .prov file found - emit warning but continue installation
+ fmt.Fprintf(os.Stderr, "WARNING: No provenance file found for plugin. Plugin is not signed and cannot be verified.\n")
+ } else {
+ // Provenance data exists - verify the plugin
+ verification, err := plugin.VerifyPlugin(archiveData, provData, filename, opts.Keyring)
+ if err != nil {
+ return nil, fmt.Errorf("plugin verification failed: %w", err)
+ }
+
+ // Collect verification info
+ result = &VerificationResult{
+ SignedBy: make([]string, 0),
+ Fingerprint: fmt.Sprintf("%X", verification.SignedBy.PrimaryKey.Fingerprint),
+ FileHash: verification.FileHash,
+ }
+ for name := range verification.SignedBy.Identities {
+ result.SignedBy = append(result.SignedBy, name)
+ }
+ }
+ }
+
+ if err := i.Install(); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// Update updates a plugin.
+func Update(i Installer) error {
+ if _, pathErr := os.Stat(i.Path()); os.IsNotExist(pathErr) {
+ slog.Warn("plugin does not exist", slog.String("path", i.Path()), slog.Any("error", pathErr))
+ return errors.New("plugin does not exist")
+ }
+ return i.Update()
+}
+
+// NewForSource determines the correct Installer for the given source.
+func NewForSource(source, version string) (installer Installer, err error) {
+ if strings.HasPrefix(source, fmt.Sprintf("%s://", registry.OCIScheme)) {
+ // Source is an OCI registry reference
+ installer, err = NewOCIInstaller(source)
+ } else if isLocalReference(source) {
+ // Source is a local directory
+ installer, err = NewLocalInstaller(source)
+ } else if isRemoteHTTPArchive(source) {
+ installer, err = NewHTTPInstaller(source)
+ } else {
+ installer, err = NewVCSInstaller(source, version)
+ }
+
+ if err != nil {
+ return installer, fmt.Errorf("cannot get information about plugin source %q (if it's a local directory, does it exist?), last error was: %w", source, err)
+ }
+
+ return
+}
+
+// FindSource determines the correct Installer for the given source.
+func FindSource(location string) (Installer, error) {
+ installer, err := existingVCSRepo(location)
+ if err != nil && err.Error() == "Cannot detect VCS" {
+ slog.Warn(
+ "cannot get information about plugin source",
+ slog.String("location", location),
+ slog.Any("error", err),
+ )
+ return installer, errors.New("cannot get information about plugin source")
+ }
+ return installer, err
+}
+
+// isLocalReference checks if the source exists on the filesystem.
+func isLocalReference(source string) bool {
+ _, err := os.Stat(source)
+ return err == nil
+}
+
+// isRemoteHTTPArchive checks if the source is a http/https url and is an archive
+//
+// It works by checking whether the source looks like a URL and, if it does, running a
+// HEAD operation to see if the remote resource is a file that we understand.
+func isRemoteHTTPArchive(source string) bool {
+ if strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://") {
+ // First, check if the URL ends with a known archive suffix
+ // This is more reliable than content-type detection
+ for suffix := range Extractors {
+ if strings.HasSuffix(source, suffix) {
+ return true
+ }
+ }
+
+ // If no suffix match, try HEAD request to check content type
+ res, err := http.Head(source)
+ if err != nil {
+ // If we get an error at the network layer, we can't install it. So
+ // we return false.
+ return false
+ }
+
+ // Next, we look for the content type or content disposition headers to see
+ // if they have matching extractors.
+ contentType := res.Header.Get("content-type")
+ foundSuffix, ok := mediaTypeToExtension(contentType)
+ if !ok {
+ // Media type not recognized
+ return false
+ }
+
+ for suffix := range Extractors {
+ if strings.HasSuffix(foundSuffix, suffix) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// isPlugin checks if the directory contains a plugin.yaml file.
+func isPlugin(dirname string) bool {
+ _, err := os.Stat(filepath.Join(dirname, plugin.PluginFileName))
+ return err == nil
+}
diff --git a/helm/internal/plugin/installer/installer_test.go b/helm/internal/plugin/installer/installer_test.go
new file mode 100644
index 000000000..dcd76fe9c
--- /dev/null
+++ b/helm/internal/plugin/installer/installer_test.go
@@ -0,0 +1,47 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer
+
+import "testing"
+
+func TestIsRemoteHTTPArchive(t *testing.T) {
+ srv := mockArchiveServer()
+ defer srv.Close()
+ source := srv.URL + "/plugins/fake-plugin-0.0.1.tar.gz"
+
+ if isRemoteHTTPArchive("/not/a/URL") {
+ t.Errorf("Expected non-URL to return false")
+ }
+
+ // URLs with valid archive extensions are considered valid archives
+ // even if the server is unreachable (optimization to avoid unnecessary HTTP requests)
+ if !isRemoteHTTPArchive("https://127.0.0.1:123/fake/plugin-1.2.3.tgz") {
+ t.Errorf("URL with .tgz extension should be considered a valid archive")
+ }
+
+ // Test with invalid extension and unreachable server
+ if isRemoteHTTPArchive("https://127.0.0.1:123/fake/plugin-1.2.3.notanarchive") {
+ t.Errorf("Bad URL without valid extension should not succeed")
+ }
+
+ if !isRemoteHTTPArchive(source) {
+ t.Errorf("Expected %q to be a valid archive URL", source)
+ }
+
+ if isRemoteHTTPArchive(source + "-not-an-extension") {
+ t.Error("Expected media type match to fail")
+ }
+}
diff --git a/helm/internal/plugin/installer/local_installer.go b/helm/internal/plugin/installer/local_installer.go
new file mode 100644
index 000000000..1c8314282
--- /dev/null
+++ b/helm/internal/plugin/installer/local_installer.go
@@ -0,0 +1,219 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/third_party/dep/fs"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+// ErrPluginNotADirectory indicates that the plugin path is not a directory.
+var ErrPluginNotADirectory = errors.New("expected plugin to be a directory (containing a file 'plugin.yaml')")
+
+// LocalInstaller installs plugins from the filesystem.
+type LocalInstaller struct {
+ base
+ isArchive bool
+ extractor Extractor
+ pluginData []byte // Cached plugin data
+ provData []byte // Cached provenance data
+}
+
+// NewLocalInstaller creates a new LocalInstaller.
+func NewLocalInstaller(source string) (*LocalInstaller, error) {
+ src, err := filepath.Abs(source)
+ if err != nil {
+ return nil, fmt.Errorf("unable to get absolute path to plugin: %w", err)
+ }
+ i := &LocalInstaller{
+ base: newBase(src),
+ }
+
+ // Check if source is an archive
+ if isLocalArchive(src) {
+ i.isArchive = true
+ extractor, err := NewExtractor(src)
+ if err != nil {
+ return nil, fmt.Errorf("unsupported archive format: %w", err)
+ }
+ i.extractor = extractor
+ }
+
+ return i, nil
+}
+
+// isLocalArchive checks if the file is a supported archive format
+func isLocalArchive(path string) bool {
+ for suffix := range Extractors {
+ if strings.HasSuffix(path, suffix) {
+ return true
+ }
+ }
+ return false
+}
+
+// Install creates a symlink to the plugin directory.
+//
+// Implements Installer.
+func (i *LocalInstaller) Install() error {
+ if i.isArchive {
+ return i.installFromArchive()
+ }
+ return i.installFromDirectory()
+}
+
+// installFromDirectory creates a symlink to the plugin directory
+func (i *LocalInstaller) installFromDirectory() error {
+ stat, err := os.Stat(i.Source)
+ if err != nil {
+ return err
+ }
+ if !stat.IsDir() {
+ return ErrPluginNotADirectory
+ }
+
+ if !isPlugin(i.Source) {
+ return ErrMissingMetadata
+ }
+ slog.Debug("symlinking", "source", i.Source, "path", i.Path())
+ return os.Symlink(i.Source, i.Path())
+}
+
+// installFromArchive extracts and installs a plugin from a tarball
+func (i *LocalInstaller) installFromArchive() error {
+ // Read the archive file
+ data, err := os.ReadFile(i.Source)
+ if err != nil {
+ return fmt.Errorf("failed to read archive: %w", err)
+ }
+
+ // Copy the original tarball to plugins directory for verification
+ // Extract metadata to get the actual plugin name and version
+ metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(data))
+ if err != nil {
+ return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err)
+ }
+ filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version)
+ tarballPath := helmpath.DataPath("plugins", filename)
+ if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil {
+ return fmt.Errorf("failed to create plugins directory: %w", err)
+ }
+ if err := os.WriteFile(tarballPath, data, 0644); err != nil {
+ return fmt.Errorf("failed to save tarball: %w", err)
+ }
+
+ // Check for and copy .prov file if it exists
+ provSource := i.Source + ".prov"
+ if provData, err := os.ReadFile(provSource); err == nil {
+ provPath := tarballPath + ".prov"
+ if err := os.WriteFile(provPath, provData, 0644); err != nil {
+ slog.Debug("failed to save provenance file", "error", err)
+ }
+ }
+
+ // Create a temporary directory for extraction
+ tempDir, err := os.MkdirTemp("", "helm-plugin-extract-")
+ if err != nil {
+ return fmt.Errorf("failed to create temp directory: %w", err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ // Extract the archive
+ buffer := bytes.NewBuffer(data)
+ if err := i.extractor.Extract(buffer, tempDir); err != nil {
+ return fmt.Errorf("failed to extract archive: %w", err)
+ }
+
+ // Plugin directory should be named after the plugin at the archive root
+ pluginName := stripPluginName(filepath.Base(i.Source))
+ pluginDir := filepath.Join(tempDir, pluginName)
+ if _, err = os.Stat(filepath.Join(pluginDir, "plugin.yaml")); err != nil {
+ return fmt.Errorf("plugin.yaml not found in expected directory %s: %w", pluginDir, err)
+ }
+
+ // Copy to the final destination
+ slog.Debug("copying", "source", pluginDir, "path", i.Path())
+ return fs.CopyDir(pluginDir, i.Path())
+}
+
+// Update updates a local repository
+func (i *LocalInstaller) Update() error {
+ slog.Debug("local repository is auto-updated")
+ return nil
+}
+
+// Path is overridden to handle archive plugin names properly
+func (i *LocalInstaller) Path() string {
+ if i.Source == "" {
+ return ""
+ }
+
+ pluginName := filepath.Base(i.Source)
+ if i.isArchive {
+ // Strip archive extension to get plugin name
+ pluginName = stripPluginName(pluginName)
+ }
+
+ return helmpath.DataPath("plugins", pluginName)
+}
+
+// SupportsVerification returns true if the local installer can verify plugins
+func (i *LocalInstaller) SupportsVerification() bool {
+ // Only support verification for local tarball files
+ return i.isArchive
+}
+
+// GetVerificationData loads plugin and provenance data from local files for verification
+func (i *LocalInstaller) GetVerificationData() (archiveData, provData []byte, filename string, err error) {
+ if !i.SupportsVerification() {
+ return nil, nil, "", fmt.Errorf("verification not supported for directories")
+ }
+
+ // Read and cache the plugin archive file
+ if i.pluginData == nil {
+ i.pluginData, err = os.ReadFile(i.Source)
+ if err != nil {
+ return nil, nil, "", fmt.Errorf("failed to read plugin file: %w", err)
+ }
+ }
+
+ // Read and cache the provenance file if it exists
+ if i.provData == nil {
+ provFile := i.Source + ".prov"
+ i.provData, err = os.ReadFile(provFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ // If provenance file doesn't exist, set provData to nil
+ // The verification logic will handle this gracefully
+ i.provData = nil
+ } else {
+ // If file exists but can't be read (permissions, etc), return error
+ return nil, nil, "", fmt.Errorf("failed to access provenance file %s: %w", provFile, err)
+ }
+ }
+ }
+
+ return i.pluginData, i.provData, filepath.Base(i.Source), nil
+}
diff --git a/helm/internal/plugin/installer/local_installer_test.go b/helm/internal/plugin/installer/local_installer_test.go
new file mode 100644
index 000000000..3ee8ab6d0
--- /dev/null
+++ b/helm/internal/plugin/installer/local_installer_test.go
@@ -0,0 +1,148 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+var _ Installer = new(LocalInstaller)
+
+func TestLocalInstaller(t *testing.T) {
+ ensure.HelmHome(t)
+ // Make a temp dir
+ tdir := t.TempDir()
+ if err := os.WriteFile(filepath.Join(tdir, "plugin.yaml"), []byte{}, 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ source := "../testdata/plugdir/good/echo-v1"
+ i, err := NewForSource(source, "")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ if err := Install(i); err != nil {
+ t.Fatal(err)
+ }
+
+ if i.Path() != helmpath.DataPath("plugins", "echo-v1") {
+ t.Fatalf("expected path '$XDG_CONFIG_HOME/helm/plugins/helm-env', got %q", i.Path())
+ }
+ defer os.RemoveAll(filepath.Dir(helmpath.DataPath())) // helmpath.DataPath is like /tmp/helm013130971/helm
+}
+
+func TestLocalInstallerNotAFolder(t *testing.T) {
+ source := "../testdata/plugdir/good/echo-v1/plugin.yaml"
+ i, err := NewForSource(source, "")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ err = Install(i)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if err != ErrPluginNotADirectory {
+ t.Fatalf("expected error to equal: %q", err)
+ }
+}
+
+func TestLocalInstallerTarball(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a test tarball
+ tempDir := t.TempDir()
+ tarballPath := filepath.Join(tempDir, "test-plugin-1.0.0.tar.gz")
+
+ // Create tarball content
+ var buf bytes.Buffer
+ gw := gzip.NewWriter(&buf)
+ tw := tar.NewWriter(gw)
+
+ files := []struct {
+ Name string
+ Body string
+ Mode int64
+ }{
+ {"test-plugin/plugin.yaml", "name: test-plugin\napiVersion: v1\ntype: cli/v1\nruntime: subprocess\nversion: 1.0.0\nconfig:\n shortHelp: test\n longHelp: test\nruntimeConfig:\n platformCommand:\n - command: echo", 0644},
+ {"test-plugin/bin/test-plugin", "#!/usr/bin/env sh\necho test", 0755},
+ }
+
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Mode: file.Mode,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if err := gw.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Write tarball to file
+ if err := os.WriteFile(tarballPath, buf.Bytes(), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Test installation
+ i, err := NewForSource(tarballPath, "")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // Verify it's detected as LocalInstaller
+ localInstaller, ok := i.(*LocalInstaller)
+ if !ok {
+ t.Fatal("expected LocalInstaller")
+ }
+
+ if !localInstaller.isArchive {
+ t.Fatal("expected isArchive to be true")
+ }
+
+ if err := Install(i); err != nil {
+ t.Fatal(err)
+ }
+
+ expectedPath := helmpath.DataPath("plugins", "test-plugin")
+ if i.Path() != expectedPath {
+ t.Fatalf("expected path %q, got %q", expectedPath, i.Path())
+ }
+
+ // Verify plugin was installed
+ if _, err := os.Stat(i.Path()); err != nil {
+ t.Fatalf("plugin not found at %s: %v", i.Path(), err)
+ }
+}
diff --git a/helm/internal/plugin/installer/oci_installer.go b/helm/internal/plugin/installer/oci_installer.go
new file mode 100644
index 000000000..67f99b6f8
--- /dev/null
+++ b/helm/internal/plugin/installer/oci_installer.go
@@ -0,0 +1,302 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "os"
+ "path/filepath"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/cache"
+ "helm.sh/helm/v4/internal/third_party/dep/fs"
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+// Ensure OCIInstaller implements Verifier
+var _ Verifier = (*OCIInstaller)(nil)
+
+// OCIInstaller installs plugins from OCI registries
+type OCIInstaller struct {
+ CacheDir string
+ PluginName string
+ base
+ settings *cli.EnvSettings
+ getter getter.Getter
+ // Cached data to avoid duplicate downloads
+ pluginData []byte
+ provData []byte
+}
+
+// NewOCIInstaller creates a new OCIInstaller with optional getter options
+func NewOCIInstaller(source string, options ...getter.Option) (*OCIInstaller, error) {
+ // Extract plugin name from OCI reference using robust registry parsing
+ pluginName, err := registry.GetPluginName(source)
+ if err != nil {
+ return nil, err
+ }
+
+ key, err := cache.Key(source)
+ if err != nil {
+ return nil, err
+ }
+
+ settings := cli.New()
+
+ // Always add plugin artifact type and any provided options
+ pluginOptions := append([]getter.Option{getter.WithArtifactType("plugin")}, options...)
+ getterProvider, err := getter.NewOCIGetter(pluginOptions...)
+ if err != nil {
+ return nil, err
+ }
+
+ i := &OCIInstaller{
+ CacheDir: helmpath.CachePath("plugins", key),
+ PluginName: pluginName,
+ base: newBase(source),
+ settings: settings,
+ getter: getterProvider,
+ }
+ return i, nil
+}
+
+// Install downloads and installs a plugin from OCI registry
+// Implements Installer.
+func (i *OCIInstaller) Install() error {
+ slog.Debug("pulling OCI plugin", "source", i.Source)
+
+ // Ensure plugin data is cached
+ if i.pluginData == nil {
+ pluginData, err := i.getter.Get(i.Source)
+ if err != nil {
+ return fmt.Errorf("failed to pull plugin from %s: %w", i.Source, err)
+ }
+ i.pluginData = pluginData.Bytes()
+ }
+
+ // Extract metadata to get the actual plugin name and version
+ metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(i.pluginData))
+ if err != nil {
+ return fmt.Errorf("failed to extract plugin metadata from tarball: %w", err)
+ }
+ filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version)
+
+ tarballPath := helmpath.DataPath("plugins", filename)
+ if err := os.MkdirAll(filepath.Dir(tarballPath), 0755); err != nil {
+ return fmt.Errorf("failed to create plugins directory: %w", err)
+ }
+ if err := os.WriteFile(tarballPath, i.pluginData, 0644); err != nil {
+ return fmt.Errorf("failed to save tarball: %w", err)
+ }
+
+ // Ensure prov data is cached if available
+ if i.provData == nil {
+ // Try to download .prov file if it exists
+ provSource := i.Source + ".prov"
+ if provData, err := i.getter.Get(provSource); err == nil {
+ i.provData = provData.Bytes()
+ }
+ }
+
+ // Save prov file if we have the data
+ if i.provData != nil {
+ provPath := tarballPath + ".prov"
+ if err := os.WriteFile(provPath, i.provData, 0644); err != nil {
+ slog.Debug("failed to save provenance file", "error", err)
+ }
+ }
+
+ // Check if this is a gzip compressed file
+ if len(i.pluginData) < 2 || i.pluginData[0] != 0x1f || i.pluginData[1] != 0x8b {
+ return fmt.Errorf("plugin data is not a gzip compressed archive")
+ }
+
+ // Create cache directory
+ if err := os.MkdirAll(i.CacheDir, 0755); err != nil {
+ return fmt.Errorf("failed to create cache directory: %w", err)
+ }
+
+ // Extract as gzipped tar
+ if err := extractTarGz(bytes.NewReader(i.pluginData), i.CacheDir); err != nil {
+ return fmt.Errorf("failed to extract plugin: %w", err)
+ }
+
+ // Verify plugin.yaml exists - check root and subdirectories
+ pluginDir := i.CacheDir
+ if !isPlugin(pluginDir) {
+ // Check if plugin.yaml is in a subdirectory
+ entries, err := os.ReadDir(i.CacheDir)
+ if err != nil {
+ return err
+ }
+
+ foundPluginDir := ""
+ for _, entry := range entries {
+ if entry.IsDir() {
+ subDir := filepath.Join(i.CacheDir, entry.Name())
+ if isPlugin(subDir) {
+ foundPluginDir = subDir
+ break
+ }
+ }
+ }
+
+ if foundPluginDir == "" {
+ return ErrMissingMetadata
+ }
+
+ // Use the subdirectory as the plugin directory
+ pluginDir = foundPluginDir
+ }
+
+ // Copy from cache to final destination
+ src, err := filepath.Abs(pluginDir)
+ if err != nil {
+ return err
+ }
+
+ slog.Debug("copying", "source", src, "path", i.Path())
+ return fs.CopyDir(src, i.Path())
+}
+
+// Update updates a plugin by reinstalling it
+func (i *OCIInstaller) Update() error {
+ // For OCI, update means removing the old version and installing the new one
+ if err := os.RemoveAll(i.Path()); err != nil {
+ return err
+ }
+ return i.Install()
+}
+
+// Path is where the plugin will be installed
+func (i OCIInstaller) Path() string {
+ if i.Source == "" {
+ return ""
+ }
+ return filepath.Join(i.settings.PluginsDirectory, i.PluginName)
+}
+
+// extractTarGz extracts a gzipped tar archive to a directory
+func extractTarGz(r io.Reader, targetDir string) error {
+ gzr, err := gzip.NewReader(r)
+ if err != nil {
+ return err
+ }
+ defer gzr.Close()
+
+ return extractTar(gzr, targetDir)
+}
+
+// extractTar extracts a tar archive to a directory
+func extractTar(r io.Reader, targetDir string) error {
+ tarReader := tar.NewReader(r)
+
+ for {
+ header, err := tarReader.Next()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ path, err := cleanJoin(targetDir, header.Name)
+ if err != nil {
+ return err
+ }
+
+ switch header.Typeflag {
+ case tar.TypeDir:
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ case tar.TypeReg:
+ dir := filepath.Dir(path)
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return err
+ }
+
+ outFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+ if _, err := io.Copy(outFile, tarReader); err != nil {
+ return err
+ }
+ case tar.TypeXGlobalHeader, tar.TypeXHeader:
+ // Skip these
+ continue
+ default:
+ return fmt.Errorf("unknown type: %b in %s", header.Typeflag, header.Name)
+ }
+ }
+
+ return nil
+}
+
+// SupportsVerification returns true since OCI plugins can be verified
+func (i *OCIInstaller) SupportsVerification() bool {
+ return true
+}
+
+// GetVerificationData downloads and caches plugin and provenance data from OCI registry for verification
+func (i *OCIInstaller) GetVerificationData() (archiveData, provData []byte, filename string, err error) {
+ slog.Debug("getting verification data for OCI plugin", "source", i.Source)
+
+ // Download plugin data once and cache it
+ if i.pluginData == nil {
+ pluginDataBuffer, err := i.getter.Get(i.Source)
+ if err != nil {
+ return nil, nil, "", fmt.Errorf("failed to pull plugin from %s: %w", i.Source, err)
+ }
+ i.pluginData = pluginDataBuffer.Bytes()
+ }
+
+ // Download prov data once and cache it if available
+ if i.provData == nil {
+ provSource := i.Source + ".prov"
+ // Calling getter.Get again is reasonable because: 1. The OCI registry client already optimizes the underlying network calls
+ // 2. Both calls use the same underlying manifest and memory store 3. The second .prov call is very fast since the data is already pulled
+ provDataBuffer, err := i.getter.Get(provSource)
+ if err != nil {
+ // If provenance file doesn't exist, set provData to nil
+ // The verification logic will handle this gracefully
+ i.provData = nil
+ } else {
+ i.provData = provDataBuffer.Bytes()
+ }
+ }
+
+ // Extract metadata to get the filename
+ metadata, err := plugin.ExtractTgzPluginMetadata(bytes.NewReader(i.pluginData))
+ if err != nil {
+ return nil, nil, "", fmt.Errorf("failed to extract plugin metadata from tarball: %w", err)
+ }
+ filename = fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version)
+
+ slog.Debug("got verification data for OCI plugin", "filename", filename)
+ return i.pluginData, i.provData, filename, nil
+}
diff --git a/helm/internal/plugin/installer/oci_installer_test.go b/helm/internal/plugin/installer/oci_installer_test.go
new file mode 100644
index 000000000..1280cf97d
--- /dev/null
+++ b/helm/internal/plugin/installer/oci_installer_test.go
@@ -0,0 +1,806 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+var _ Installer = new(OCIInstaller)
+
+// createTestPluginTarGz creates a test plugin tar.gz with plugin.yaml
+func createTestPluginTarGz(t *testing.T, pluginName string) []byte {
+ t.Helper()
+
+ var buf bytes.Buffer
+ gzWriter := gzip.NewWriter(&buf)
+ tarWriter := tar.NewWriter(gzWriter)
+
+ // Add plugin.yaml
+ pluginYAML := fmt.Sprintf(`name: %s
+version: "1.0.0"
+description: "Test plugin for OCI installer"
+command: "$HELM_PLUGIN_DIR/bin/%s"
+`, pluginName, pluginName)
+ header := &tar.Header{
+ Name: "plugin.yaml",
+ Mode: 0644,
+ Size: int64(len(pluginYAML)),
+ Typeflag: tar.TypeReg,
+ }
+ if err := tarWriter.WriteHeader(header); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := tarWriter.Write([]byte(pluginYAML)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Add bin directory
+ dirHeader := &tar.Header{
+ Name: "bin/",
+ Mode: 0755,
+ Typeflag: tar.TypeDir,
+ }
+ if err := tarWriter.WriteHeader(dirHeader); err != nil {
+ t.Fatal(err)
+ }
+
+ // Add executable
+ execContent := fmt.Sprintf("#!/bin/sh\necho '%s test plugin'", pluginName)
+ execHeader := &tar.Header{
+ Name: fmt.Sprintf("bin/%s", pluginName),
+ Mode: 0755,
+ Size: int64(len(execContent)),
+ Typeflag: tar.TypeReg,
+ }
+ if err := tarWriter.WriteHeader(execHeader); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := tarWriter.Write([]byte(execContent)); err != nil {
+ t.Fatal(err)
+ }
+
+ tarWriter.Close()
+ gzWriter.Close()
+
+ return buf.Bytes()
+}
+
+// mockOCIRegistryWithArtifactType creates a mock OCI registry server using the new artifact type approach
+func mockOCIRegistryWithArtifactType(t *testing.T, pluginName string) (*httptest.Server, string) {
+ t.Helper()
+
+ pluginData := createTestPluginTarGz(t, pluginName)
+ layerDigest := fmt.Sprintf("sha256:%x", sha256Sum(pluginData))
+
+ // Create empty config data (as per OCI v1.1+ spec)
+ configData := []byte("{}")
+ configDigest := fmt.Sprintf("sha256:%x", sha256Sum(configData))
+
+ // Create manifest with artifact type
+ manifest := ocispec.Manifest{
+ MediaType: ocispec.MediaTypeImageManifest,
+ ArtifactType: "application/vnd.helm.plugin.v1+json", // Using artifact type
+ Config: ocispec.Descriptor{
+ MediaType: "application/vnd.oci.empty.v1+json", // Empty config
+ Digest: digest.Digest(configDigest),
+ Size: int64(len(configData)),
+ },
+ Layers: []ocispec.Descriptor{
+ {
+ MediaType: "application/vnd.oci.image.layer.v1.tar",
+ Digest: digest.Digest(layerDigest),
+ Size: int64(len(pluginData)),
+ Annotations: map[string]string{
+ ocispec.AnnotationTitle: pluginName + "-1.0.0.tgz", // Layer named with version
+ },
+ },
+ },
+ }
+
+ manifestData, err := json.Marshal(manifest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ manifestDigest := fmt.Sprintf("sha256:%x", sha256Sum(manifestData))
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch {
+ case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/v2/") && !strings.Contains(r.URL.Path, "/manifests/") && !strings.Contains(r.URL.Path, "/blobs/"):
+ // API version check
+ w.Header().Set("Docker-Distribution-API-Version", "registry/2.0")
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("{}"))
+
+ case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/manifests/") && strings.Contains(r.URL.Path, pluginName):
+ // Return manifest
+ w.Header().Set("Content-Type", ocispec.MediaTypeImageManifest)
+ w.Header().Set("Docker-Content-Digest", manifestDigest)
+ w.WriteHeader(http.StatusOK)
+ w.Write(manifestData)
+
+ case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/blobs/"+layerDigest):
+ // Return layer data
+ w.Header().Set("Content-Type", "application/vnd.oci.image.layer.v1.tar")
+ w.WriteHeader(http.StatusOK)
+ w.Write(pluginData)
+
+ case r.Method == http.MethodGet && strings.Contains(r.URL.Path, "/blobs/"+configDigest):
+ // Return config data
+ w.Header().Set("Content-Type", "application/vnd.oci.empty.v1+json")
+ w.WriteHeader(http.StatusOK)
+ w.Write(configData)
+
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ // Parse server URL to get host:port format for OCI reference
+ serverURL, err := url.Parse(server.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ registryHost := serverURL.Host
+
+ return server, registryHost
+}
+
+// sha256Sum calculates SHA256 sum of data
+func sha256Sum(data []byte) []byte {
+ h := sha256.New()
+ h.Write(data)
+ return h.Sum(nil)
+}
+
+func TestNewOCIInstaller(t *testing.T) {
+ tests := []struct {
+ name string
+ source string
+ expectName string
+ expectError bool
+ }{
+ {
+ name: "valid OCI reference with tag",
+ source: "oci://ghcr.io/user/plugin-name:v1.0.0",
+ expectName: "plugin-name",
+ expectError: false,
+ },
+ {
+ name: "valid OCI reference with digest",
+ source: "oci://ghcr.io/user/plugin-name@sha256:1234567890abcdef",
+ expectName: "plugin-name",
+ expectError: false,
+ },
+ {
+ name: "valid OCI reference without tag",
+ source: "oci://ghcr.io/user/plugin-name",
+ expectName: "plugin-name",
+ expectError: false,
+ },
+ {
+ name: "valid OCI reference with multiple path segments",
+ source: "oci://registry.example.com/org/team/plugin-name:latest",
+ expectName: "plugin-name",
+ expectError: false,
+ },
+ {
+ name: "invalid OCI reference - no path",
+ source: "oci://registry.example.com",
+ expectName: "",
+ expectError: true,
+ },
+ {
+ name: "valid OCI reference - single path segment",
+ source: "oci://registry.example.com/plugin",
+ expectName: "plugin",
+ expectError: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ installer, err := NewOCIInstaller(tt.source)
+
+ if tt.expectError {
+ if err == nil {
+ t.Errorf("expected error but got none")
+ }
+ return
+ }
+
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ return
+ }
+
+ // Check all fields thoroughly
+ if installer.PluginName != tt.expectName {
+ t.Errorf("expected plugin name %s, got %s", tt.expectName, installer.PluginName)
+ }
+
+ if installer.Source != tt.source {
+ t.Errorf("expected source %s, got %s", tt.source, installer.Source)
+ }
+
+ if installer.CacheDir == "" {
+ t.Error("expected non-empty cache directory")
+ }
+
+ if !strings.Contains(installer.CacheDir, "plugins") {
+ t.Errorf("expected cache directory to contain 'plugins', got %s", installer.CacheDir)
+ }
+
+ if installer.settings == nil {
+ t.Error("expected settings to be initialized")
+ }
+
+ // Check that Path() method works
+ expectedPath := helmpath.DataPath("plugins", tt.expectName)
+ if installer.Path() != expectedPath {
+ t.Errorf("expected path %s, got %s", expectedPath, installer.Path())
+ }
+ })
+ }
+}
+
+func TestOCIInstaller_Path(t *testing.T) {
+ tests := []struct {
+ name string
+ source string
+ pluginName string
+ expectPath string
+ }{
+ {
+ name: "valid plugin name",
+ source: "oci://ghcr.io/user/plugin-name:v1.0.0",
+ pluginName: "plugin-name",
+ expectPath: helmpath.DataPath("plugins", "plugin-name"),
+ },
+ {
+ name: "empty source",
+ source: "",
+ pluginName: "",
+ expectPath: "",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ installer := &OCIInstaller{
+ PluginName: tt.pluginName,
+ base: newBase(tt.source),
+ settings: cli.New(),
+ }
+
+ path := installer.Path()
+ if path != tt.expectPath {
+ t.Errorf("expected path %s, got %s", tt.expectPath, path)
+ }
+ })
+ }
+}
+
+func TestOCIInstaller_Install(t *testing.T) {
+ // Set up isolated test environment
+ ensure.HelmHome(t)
+
+ pluginName := "test-plugin-basic"
+ server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName)
+ defer server.Close()
+
+ // Test OCI reference
+ source := fmt.Sprintf("oci://%s/%s:latest", registryHost, pluginName)
+
+ // Test with plain HTTP (since test server uses HTTP)
+ installer, err := NewOCIInstaller(source, getter.WithPlainHTTP(true))
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+
+ // The OCI installer uses helmpath.DataPath, which is isolated by ensure.HelmHome(t)
+ actualPath := installer.Path()
+ t.Logf("Installer will use path: %s", actualPath)
+
+ // Install the plugin
+ if err := Install(installer); err != nil {
+ t.Fatalf("Expected installation to succeed, got error: %v", err)
+ }
+
+ // Verify plugin was installed to the correct location
+ if !isPlugin(actualPath) {
+ t.Errorf("Expected plugin directory %s to contain plugin.yaml", actualPath)
+ }
+
+ // Debug: list what was actually created
+ if entries, err := os.ReadDir(actualPath); err != nil {
+ t.Fatalf("Could not read plugin directory %s: %v", actualPath, err)
+ } else {
+ t.Logf("Plugin directory %s contains:", actualPath)
+ for _, entry := range entries {
+ t.Logf(" - %s", entry.Name())
+ }
+ }
+
+ // Verify the plugin.yaml file exists and is valid
+ pluginFile := filepath.Join(actualPath, "plugin.yaml")
+ if _, err := os.Stat(pluginFile); err != nil {
+ t.Errorf("Expected plugin.yaml to exist, got error: %v", err)
+ }
+}
+
+func TestOCIInstaller_Install_WithGetterOptions(t *testing.T) {
+ testCases := []struct {
+ name string
+ pluginName string
+ options []getter.Option
+ wantErr bool
+ }{
+ {
+ name: "plain HTTP",
+ pluginName: "example-cli-plain-http",
+ options: []getter.Option{getter.WithPlainHTTP(true)},
+ wantErr: false,
+ },
+ {
+ name: "insecure skip TLS verify",
+ pluginName: "example-cli-insecure",
+ options: []getter.Option{getter.WithPlainHTTP(true), getter.WithInsecureSkipVerifyTLS(true)},
+ wantErr: false,
+ },
+ {
+ name: "with timeout",
+ pluginName: "example-cli-timeout",
+ options: []getter.Option{getter.WithPlainHTTP(true), getter.WithTimeout(30 * time.Second)},
+ wantErr: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ // Set up isolated test environment for each subtest
+ ensure.HelmHome(t)
+
+ server, registryHost := mockOCIRegistryWithArtifactType(t, tc.pluginName)
+ defer server.Close()
+
+ source := fmt.Sprintf("oci://%s/%s:latest", registryHost, tc.pluginName)
+
+ installer, err := NewOCIInstaller(source, tc.options...)
+ if err != nil {
+ if !tc.wantErr {
+ t.Fatalf("Expected no error creating installer, got %v", err)
+ }
+ return
+ }
+
+ // The installer now uses our isolated test directory
+ actualPath := installer.Path()
+
+ // Install the plugin
+ err = Install(installer)
+ if tc.wantErr {
+ if err == nil {
+ t.Errorf("Expected installation to fail, but it succeeded")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Expected installation to succeed, got error: %v", err)
+ } else {
+ // Verify plugin was installed to the actual path
+ if !isPlugin(actualPath) {
+ t.Errorf("Expected plugin directory %s to contain plugin.yaml", actualPath)
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestOCIInstaller_Install_AlreadyExists(t *testing.T) {
+ // Set up isolated test environment
+ ensure.HelmHome(t)
+
+ pluginName := "test-plugin-exists"
+ server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName)
+ defer server.Close()
+
+ source := fmt.Sprintf("oci://%s/%s:latest", registryHost, pluginName)
+ installer, err := NewOCIInstaller(source, getter.WithPlainHTTP(true))
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+
+ // First install should succeed
+ if err := Install(installer); err != nil {
+ t.Fatalf("Expected first installation to succeed, got error: %v", err)
+ }
+
+ // Verify plugin was installed
+ if !isPlugin(installer.Path()) {
+ t.Errorf("Expected plugin directory %s to contain plugin.yaml", installer.Path())
+ }
+
+ // Second install should fail with "plugin already exists"
+ err = Install(installer)
+ if err == nil {
+ t.Error("Expected error when installing plugin that already exists")
+ } else if !strings.Contains(err.Error(), "plugin already exists") {
+ t.Errorf("Expected 'plugin already exists' error, got: %v", err)
+ }
+}
+
+func TestOCIInstaller_Update(t *testing.T) {
+ // Set up isolated test environment
+ ensure.HelmHome(t)
+
+ pluginName := "test-plugin-update"
+ server, registryHost := mockOCIRegistryWithArtifactType(t, pluginName)
+ defer server.Close()
+
+ source := fmt.Sprintf("oci://%s/%s:latest", registryHost, pluginName)
+ installer, err := NewOCIInstaller(source, getter.WithPlainHTTP(true))
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+
+ // Test update when plugin does not exist - should fail
+ err = Update(installer)
+ if err == nil {
+ t.Error("Expected error when updating plugin that does not exist")
+ } else if !strings.Contains(err.Error(), "plugin does not exist") {
+ t.Errorf("Expected 'plugin does not exist' error, got: %v", err)
+ }
+
+ // Install plugin first
+ if err := Install(installer); err != nil {
+ t.Fatalf("Expected installation to succeed, got error: %v", err)
+ }
+
+ // Verify plugin was installed
+ if !isPlugin(installer.Path()) {
+ t.Errorf("Expected plugin directory %s to contain plugin.yaml", installer.Path())
+ }
+
+ // Test update when plugin exists - should succeed
+ // For OCI, Update() removes old version and reinstalls
+ if err := Update(installer); err != nil {
+ t.Errorf("Expected update to succeed, got error: %v", err)
+ }
+
+ // Verify plugin is still installed after update
+ if !isPlugin(installer.Path()) {
+ t.Errorf("Expected plugin directory %s to contain plugin.yaml after update", installer.Path())
+ }
+}
+
+func TestOCIInstaller_Install_ComponentExtraction(t *testing.T) {
+ // Test that we can extract a plugin archive properly
+ // This tests the extraction logic that Install() uses
+ tempDir := t.TempDir()
+ pluginName := "test-plugin-extract"
+
+ pluginData := createTestPluginTarGz(t, pluginName)
+
+ // Test extraction
+ err := extractTarGz(bytes.NewReader(pluginData), tempDir)
+ if err != nil {
+ t.Fatalf("Failed to extract plugin: %v", err)
+ }
+
+ // Verify plugin.yaml exists
+ pluginYAMLPath := filepath.Join(tempDir, "plugin.yaml")
+ if _, err := os.Stat(pluginYAMLPath); os.IsNotExist(err) {
+ t.Errorf("plugin.yaml not found after extraction")
+ }
+
+ // Verify bin directory exists
+ binPath := filepath.Join(tempDir, "bin")
+ if _, err := os.Stat(binPath); os.IsNotExist(err) {
+ t.Errorf("bin directory not found after extraction")
+ }
+
+ // Verify executable exists and has correct permissions
+ execPath := filepath.Join(tempDir, "bin", pluginName)
+ if info, err := os.Stat(execPath); err != nil {
+ t.Errorf("executable not found: %v", err)
+ } else if info.Mode()&0111 == 0 {
+ t.Errorf("file is not executable")
+ }
+
+ // Verify this would be recognized as a plugin
+ if !isPlugin(tempDir) {
+ t.Errorf("extracted directory is not a valid plugin")
+ }
+}
+
+func TestExtractTarGz(t *testing.T) {
+ tempDir := t.TempDir()
+
+ // Create a test tar.gz file
+ var buf bytes.Buffer
+ gzWriter := gzip.NewWriter(&buf)
+ tarWriter := tar.NewWriter(gzWriter)
+
+ // Add a test file to the archive
+ testContent := "test content"
+ header := &tar.Header{
+ Name: "test-file.txt",
+ Mode: 0644,
+ Size: int64(len(testContent)),
+ Typeflag: tar.TypeReg,
+ }
+
+ if err := tarWriter.WriteHeader(header); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tarWriter.Write([]byte(testContent)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Add a test directory
+ dirHeader := &tar.Header{
+ Name: "test-dir/",
+ Mode: 0755,
+ Typeflag: tar.TypeDir,
+ }
+
+ if err := tarWriter.WriteHeader(dirHeader); err != nil {
+ t.Fatal(err)
+ }
+
+ tarWriter.Close()
+ gzWriter.Close()
+
+ // Test extraction
+ err := extractTarGz(bytes.NewReader(buf.Bytes()), tempDir)
+ if err != nil {
+ t.Errorf("extractTarGz failed: %v", err)
+ }
+
+ // Verify extracted file
+ extractedFile := filepath.Join(tempDir, "test-file.txt")
+ content, err := os.ReadFile(extractedFile)
+ if err != nil {
+ t.Errorf("failed to read extracted file: %v", err)
+ }
+
+ if string(content) != testContent {
+ t.Errorf("expected content %s, got %s", testContent, string(content))
+ }
+
+ // Verify extracted directory
+ extractedDir := filepath.Join(tempDir, "test-dir")
+ if _, err := os.Stat(extractedDir); os.IsNotExist(err) {
+ t.Errorf("extracted directory does not exist: %s", extractedDir)
+ }
+}
+
+func TestExtractTarGz_InvalidGzip(t *testing.T) {
+ tempDir := t.TempDir()
+
+ // Test with invalid gzip data
+ invalidGzipData := []byte("not gzip data")
+ err := extractTarGz(bytes.NewReader(invalidGzipData), tempDir)
+ if err == nil {
+ t.Error("expected error for invalid gzip data")
+ }
+}
+
+func TestExtractTar_UnknownFileType(t *testing.T) {
+ tempDir := t.TempDir()
+
+ // Create a test tar file
+ var buf bytes.Buffer
+ tarWriter := tar.NewWriter(&buf)
+
+ // Add a test file
+ testContent := "test content"
+ header := &tar.Header{
+ Name: "test-file.txt",
+ Mode: 0644,
+ Size: int64(len(testContent)),
+ Typeflag: tar.TypeReg,
+ }
+
+ if err := tarWriter.WriteHeader(header); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tarWriter.Write([]byte(testContent)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Test unknown file type
+ unknownHeader := &tar.Header{
+ Name: "unknown-type",
+ Mode: 0644,
+ Typeflag: tar.TypeSymlink, // Use a type that's not handled
+ }
+
+ if err := tarWriter.WriteHeader(unknownHeader); err != nil {
+ t.Fatal(err)
+ }
+
+ tarWriter.Close()
+
+ // Test extraction - should fail due to unknown type
+ err := extractTar(bytes.NewReader(buf.Bytes()), tempDir)
+ if err == nil {
+ t.Error("expected error for unknown tar file type")
+ }
+
+ if !strings.Contains(err.Error(), "unknown type") {
+ t.Errorf("expected 'unknown type' error, got: %v", err)
+ }
+}
+
+func TestExtractTar_SuccessfulExtraction(t *testing.T) {
+ tempDir := t.TempDir()
+
+ // Since we can't easily create extended headers with Go's tar package,
+ // we'll test the logic that skips them by creating a simple tar with regular files
+ // and then testing that the extraction works correctly.
+
+ // Create a test tar file
+ var buf bytes.Buffer
+ tarWriter := tar.NewWriter(&buf)
+
+ // Add a regular file
+ testContent := "test content"
+ header := &tar.Header{
+ Name: "test-file.txt",
+ Mode: 0644,
+ Size: int64(len(testContent)),
+ Typeflag: tar.TypeReg,
+ }
+
+ if err := tarWriter.WriteHeader(header); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tarWriter.Write([]byte(testContent)); err != nil {
+ t.Fatal(err)
+ }
+
+ tarWriter.Close()
+
+ // Test extraction
+ err := extractTar(bytes.NewReader(buf.Bytes()), tempDir)
+ if err != nil {
+ t.Errorf("extractTar failed: %v", err)
+ }
+
+ // Verify the regular file was extracted
+ extractedFile := filepath.Join(tempDir, "test-file.txt")
+ content, err := os.ReadFile(extractedFile)
+ if err != nil {
+ t.Errorf("failed to read extracted file: %v", err)
+ }
+
+ if string(content) != testContent {
+ t.Errorf("expected content %s, got %s", testContent, string(content))
+ }
+}
+
+func TestOCIInstaller_Install_PlainHTTPOption(t *testing.T) {
+ // Test that PlainHTTP option is properly passed to getter
+ source := "oci://example.com/test-plugin:v1.0.0"
+
+ // Test with PlainHTTP=false (default)
+ installer1, err := NewOCIInstaller(source)
+ if err != nil {
+ t.Fatalf("failed to create installer: %v", err)
+ }
+ if installer1.getter == nil {
+ t.Error("getter should be initialized")
+ }
+
+ // Test with PlainHTTP=true
+ installer2, err := NewOCIInstaller(source, getter.WithPlainHTTP(true))
+ if err != nil {
+ t.Fatalf("failed to create installer with PlainHTTP=true: %v", err)
+ }
+ if installer2.getter == nil {
+ t.Error("getter should be initialized with PlainHTTP=true")
+ }
+
+ // Both installers should have the same basic properties
+ if installer1.PluginName != installer2.PluginName {
+ t.Error("plugin names should match")
+ }
+ if installer1.Source != installer2.Source {
+ t.Error("sources should match")
+ }
+
+ // Test with multiple options
+ installer3, err := NewOCIInstaller(source,
+ getter.WithPlainHTTP(true),
+ getter.WithBasicAuth("user", "pass"),
+ )
+ if err != nil {
+ t.Fatalf("failed to create installer with multiple options: %v", err)
+ }
+ if installer3.getter == nil {
+ t.Error("getter should be initialized with multiple options")
+ }
+}
+
+func TestOCIInstaller_Install_ValidationErrors(t *testing.T) {
+ tests := []struct {
+ name string
+ layerData []byte
+ expectError bool
+ errorMsg string
+ }{
+ {
+ name: "non-gzip layer",
+ layerData: []byte("not gzip data"),
+ expectError: true,
+ errorMsg: "is not a gzip compressed archive",
+ },
+ {
+ name: "empty layer",
+ layerData: []byte{},
+ expectError: true,
+ errorMsg: "is not a gzip compressed archive",
+ },
+ {
+ name: "single byte layer",
+ layerData: []byte{0x1f},
+ expectError: true,
+ errorMsg: "is not a gzip compressed archive",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Test the gzip validation logic that's used in the Install method
+ if len(tt.layerData) < 2 || tt.layerData[0] != 0x1f || tt.layerData[1] != 0x8b {
+ // This matches the validation in the Install method
+ if !tt.expectError {
+ t.Error("expected valid gzip data")
+ }
+ if !strings.Contains(tt.errorMsg, "is not a gzip compressed archive") {
+ t.Errorf("expected error message to contain 'is not a gzip compressed archive'")
+ }
+ }
+ })
+ }
+}
diff --git a/helm/internal/plugin/installer/plugin_structure.go b/helm/internal/plugin/installer/plugin_structure.go
new file mode 100644
index 000000000..10647141e
--- /dev/null
+++ b/helm/internal/plugin/installer/plugin_structure.go
@@ -0,0 +1,80 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "helm.sh/helm/v4/internal/plugin"
+)
+
+// detectPluginRoot searches for plugin.yaml in the extracted directory
+// and returns the path to the directory containing it.
+// This handles cases where the tarball contains the plugin in a subdirectory.
+func detectPluginRoot(extractDir string) (string, error) {
+ // First check if plugin.yaml is at the root
+ if _, err := os.Stat(filepath.Join(extractDir, plugin.PluginFileName)); err == nil {
+ return extractDir, nil
+ }
+
+ // Otherwise, look for plugin.yaml in subdirectories (only one level deep)
+ entries, err := os.ReadDir(extractDir)
+ if err != nil {
+ return "", err
+ }
+
+ for _, entry := range entries {
+ if entry.IsDir() {
+ subdir := filepath.Join(extractDir, entry.Name())
+ if _, err := os.Stat(filepath.Join(subdir, plugin.PluginFileName)); err == nil {
+ return subdir, nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("plugin.yaml not found in %s or its immediate subdirectories", extractDir)
+}
+
+// validatePluginName checks if the plugin directory name matches the plugin name
+// from plugin.yaml when the plugin is in a subdirectory.
+func validatePluginName(pluginRoot string, expectedName string) error {
+ // Only validate if plugin is in a subdirectory
+ dirName := filepath.Base(pluginRoot)
+ if dirName == expectedName {
+ return nil
+ }
+
+ // Load plugin.yaml to get the actual name
+ p, err := plugin.LoadDir(pluginRoot)
+ if err != nil {
+ return fmt.Errorf("failed to load plugin from %s: %w", pluginRoot, err)
+ }
+
+ m := p.Metadata()
+ actualName := m.Name
+
+ // For now, just log a warning if names don't match
+ // In the future, we might want to enforce this more strictly
+ if actualName != dirName && actualName != strings.TrimSuffix(expectedName, filepath.Ext(expectedName)) {
+ // This is just informational - not an error
+ return nil
+ }
+
+ return nil
+}
diff --git a/helm/internal/plugin/installer/plugin_structure_test.go b/helm/internal/plugin/installer/plugin_structure_test.go
new file mode 100644
index 000000000..c8766ce59
--- /dev/null
+++ b/helm/internal/plugin/installer/plugin_structure_test.go
@@ -0,0 +1,165 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestDetectPluginRoot(t *testing.T) {
+ tests := []struct {
+ name string
+ setup func(dir string) error
+ expectRoot string
+ expectError bool
+ }{
+ {
+ name: "plugin.yaml at root",
+ setup: func(dir string) error {
+ return os.WriteFile(filepath.Join(dir, "plugin.yaml"), []byte("name: test"), 0644)
+ },
+ expectRoot: ".",
+ expectError: false,
+ },
+ {
+ name: "plugin.yaml in subdirectory",
+ setup: func(dir string) error {
+ subdir := filepath.Join(dir, "my-plugin")
+ if err := os.MkdirAll(subdir, 0755); err != nil {
+ return err
+ }
+ return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte("name: test"), 0644)
+ },
+ expectRoot: "my-plugin",
+ expectError: false,
+ },
+ {
+ name: "no plugin.yaml",
+ setup: func(dir string) error {
+ return os.WriteFile(filepath.Join(dir, "README.md"), []byte("test"), 0644)
+ },
+ expectRoot: "",
+ expectError: true,
+ },
+ {
+ name: "plugin.yaml in nested subdirectory (should not find)",
+ setup: func(dir string) error {
+ subdir := filepath.Join(dir, "outer", "inner")
+ if err := os.MkdirAll(subdir, 0755); err != nil {
+ return err
+ }
+ return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte("name: test"), 0644)
+ },
+ expectRoot: "",
+ expectError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ dir := t.TempDir()
+ if err := tt.setup(dir); err != nil {
+ t.Fatalf("Setup failed: %v", err)
+ }
+
+ root, err := detectPluginRoot(dir)
+ if tt.expectError {
+ if err == nil {
+ t.Error("Expected error but got none")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ expectedPath := dir
+ if tt.expectRoot != "." {
+ expectedPath = filepath.Join(dir, tt.expectRoot)
+ }
+ if root != expectedPath {
+ t.Errorf("Expected root %s but got %s", expectedPath, root)
+ }
+ }
+ })
+ }
+}
+
+func TestValidatePluginName(t *testing.T) {
+ tests := []struct {
+ name string
+ setup func(dir string) error
+ pluginRoot string
+ expectedName string
+ expectError bool
+ }{
+ {
+ name: "matching directory and plugin name",
+ setup: func(dir string) error {
+ subdir := filepath.Join(dir, "my-plugin")
+ if err := os.MkdirAll(subdir, 0755); err != nil {
+ return err
+ }
+ yaml := `name: my-plugin
+version: 1.0.0
+usage: test
+description: test`
+ return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte(yaml), 0644)
+ },
+ pluginRoot: "my-plugin",
+ expectedName: "my-plugin",
+ expectError: false,
+ },
+ {
+ name: "different directory and plugin name",
+ setup: func(dir string) error {
+ subdir := filepath.Join(dir, "wrong-name")
+ if err := os.MkdirAll(subdir, 0755); err != nil {
+ return err
+ }
+ yaml := `name: my-plugin
+version: 1.0.0
+usage: test
+description: test`
+ return os.WriteFile(filepath.Join(subdir, "plugin.yaml"), []byte(yaml), 0644)
+ },
+ pluginRoot: "wrong-name",
+ expectedName: "wrong-name",
+ expectError: false, // Currently we don't error on mismatch
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ dir := t.TempDir()
+ if err := tt.setup(dir); err != nil {
+ t.Fatalf("Setup failed: %v", err)
+ }
+
+ pluginRoot := filepath.Join(dir, tt.pluginRoot)
+ err := validatePluginName(pluginRoot, tt.expectedName)
+ if tt.expectError {
+ if err == nil {
+ t.Error("Expected error but got none")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ }
+ })
+ }
+}
diff --git a/helm/internal/plugin/installer/vcs_installer.go b/helm/internal/plugin/installer/vcs_installer.go
new file mode 100644
index 000000000..3601ec7a8
--- /dev/null
+++ b/helm/internal/plugin/installer/vcs_installer.go
@@ -0,0 +1,179 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "errors"
+ "fmt"
+ stdfs "io/fs"
+ "log/slog"
+ "os"
+ "sort"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/Masterminds/vcs"
+
+ "helm.sh/helm/v4/internal/plugin/cache"
+ "helm.sh/helm/v4/internal/third_party/dep/fs"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+// VCSInstaller installs plugins from remote a repository.
+type VCSInstaller struct {
+ Repo vcs.Repo
+ Version string
+ base
+}
+
+func existingVCSRepo(location string) (Installer, error) {
+ repo, err := vcs.NewRepo("", location)
+ if err != nil {
+ return nil, err
+ }
+ i := &VCSInstaller{
+ Repo: repo,
+ base: newBase(repo.Remote()),
+ }
+ return i, nil
+}
+
+// NewVCSInstaller creates a new VCSInstaller.
+func NewVCSInstaller(source, version string) (*VCSInstaller, error) {
+ key, err := cache.Key(source)
+ if err != nil {
+ return nil, err
+ }
+ cachedpath := helmpath.CachePath("plugins", key)
+ repo, err := vcs.NewRepo(source, cachedpath)
+ if err != nil {
+ return nil, err
+ }
+ i := &VCSInstaller{
+ Repo: repo,
+ Version: version,
+ base: newBase(source),
+ }
+ return i, nil
+}
+
+// Install clones a remote repository and installs into the plugin directory.
+//
+// Implements Installer.
+func (i *VCSInstaller) Install() error {
+ if err := i.sync(i.Repo); err != nil {
+ return err
+ }
+
+ ref, err := i.solveVersion(i.Repo)
+ if err != nil {
+ return err
+ }
+ if ref != "" {
+ if err := i.setVersion(i.Repo, ref); err != nil {
+ return err
+ }
+ }
+
+ if !isPlugin(i.Repo.LocalPath()) {
+ return ErrMissingMetadata
+ }
+
+ slog.Debug("copying files", "source", i.Repo.LocalPath(), "destination", i.Path())
+ return fs.CopyDir(i.Repo.LocalPath(), i.Path())
+}
+
+// Update updates a remote repository
+func (i *VCSInstaller) Update() error {
+ slog.Debug("updating", "source", i.Repo.Remote())
+ if i.Repo.IsDirty() {
+ return errors.New("plugin repo was modified")
+ }
+ if err := i.Repo.Update(); err != nil {
+ return err
+ }
+ if !isPlugin(i.Repo.LocalPath()) {
+ return ErrMissingMetadata
+ }
+ return nil
+}
+
+func (i *VCSInstaller) solveVersion(repo vcs.Repo) (string, error) {
+ if i.Version == "" {
+ return "", nil
+ }
+
+ if repo.IsReference(i.Version) {
+ return i.Version, nil
+ }
+
+ // Create the constraint first to make sure it's valid before
+ // working on the repo.
+ constraint, err := semver.NewConstraint(i.Version)
+ if err != nil {
+ return "", err
+ }
+
+ // Get the tags
+ refs, err := repo.Tags()
+ if err != nil {
+ return "", err
+ }
+ slog.Debug("found refs", "refs", refs)
+
+ // Convert and filter the list to semver.Version instances
+ semvers := getSemVers(refs)
+
+ // Sort semver list
+ sort.Sort(sort.Reverse(semver.Collection(semvers)))
+ for _, v := range semvers {
+ if constraint.Check(v) {
+ // If the constraint passes get the original reference
+ ver := v.Original()
+ slog.Debug("setting to version", "version", ver)
+ return ver, nil
+ }
+ }
+
+ return "", fmt.Errorf("requested version %q does not exist for plugin %q", i.Version, i.Repo.Remote())
+}
+
+// setVersion attempts to checkout the version
+func (i *VCSInstaller) setVersion(repo vcs.Repo, ref string) error {
+ slog.Debug("setting version", "version", i.Version)
+ return repo.UpdateVersion(ref)
+}
+
+// sync will clone or update a remote repo.
+func (i *VCSInstaller) sync(repo vcs.Repo) error {
+ if _, err := os.Stat(repo.LocalPath()); errors.Is(err, stdfs.ErrNotExist) {
+ slog.Debug("cloning", "source", repo.Remote(), "destination", repo.LocalPath())
+ return repo.Get()
+ }
+ slog.Debug("updating", "source", repo.Remote(), "destination", repo.LocalPath())
+ return repo.Update()
+}
+
+// Filter a list of versions to only included semantic versions. The response
+// is a mapping of the original version to the semantic version.
+func getSemVers(refs []string) []*semver.Version {
+ var sv []*semver.Version
+ for _, r := range refs {
+ if v, err := semver.NewVersion(r); err == nil {
+ sv = append(sv, v)
+ }
+ }
+ return sv
+}
diff --git a/helm/internal/plugin/installer/vcs_installer_test.go b/helm/internal/plugin/installer/vcs_installer_test.go
new file mode 100644
index 000000000..d542a0f75
--- /dev/null
+++ b/helm/internal/plugin/installer/vcs_installer_test.go
@@ -0,0 +1,189 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer // import "helm.sh/helm/v4/internal/plugin/installer"
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/Masterminds/vcs"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+var _ Installer = new(VCSInstaller)
+
+type testRepo struct {
+ local, remote, current string
+ tags, branches []string
+ err error
+ vcs.Repo
+}
+
+func (r *testRepo) LocalPath() string { return r.local }
+func (r *testRepo) Remote() string { return r.remote }
+func (r *testRepo) Update() error { return r.err }
+func (r *testRepo) Get() error { return r.err }
+func (r *testRepo) IsReference(string) bool { return false }
+func (r *testRepo) Tags() ([]string, error) { return r.tags, r.err }
+func (r *testRepo) Branches() ([]string, error) { return r.branches, r.err }
+func (r *testRepo) UpdateVersion(version string) error {
+ r.current = version
+ return r.err
+}
+
+func TestVCSInstaller(t *testing.T) {
+ ensure.HelmHome(t)
+
+ if err := os.MkdirAll(helmpath.DataPath("plugins"), 0755); err != nil {
+ t.Fatalf("Could not create %s: %s", helmpath.DataPath("plugins"), err)
+ }
+
+ source := "https://github.com/adamreese/helm-env"
+ testRepoPath, _ := filepath.Abs("../testdata/plugdir/good/echo-v1")
+ repo := &testRepo{
+ local: testRepoPath,
+ tags: []string{"0.1.0", "0.1.1"},
+ }
+
+ i, err := NewForSource(source, "~0.1.0")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // ensure a VCSInstaller was returned
+ vcsInstaller, ok := i.(*VCSInstaller)
+ if !ok {
+ t.Fatal("expected a VCSInstaller")
+ }
+
+ // set the testRepo in the VCSInstaller
+ vcsInstaller.Repo = repo
+
+ if err := Install(i); err != nil {
+ t.Fatal(err)
+ }
+ if repo.current != "0.1.1" {
+ t.Fatalf("expected version '0.1.1', got %q", repo.current)
+ }
+ expectedPath := helmpath.DataPath("plugins", "helm-env")
+ if i.Path() != expectedPath {
+ t.Fatalf("expected path %q, got %q", expectedPath, i.Path())
+ }
+
+ // Install again to test plugin exists error
+ if err := Install(i); err == nil {
+ t.Fatalf("expected error for plugin exists, got none")
+ } else if err.Error() != "plugin already exists" {
+ t.Fatalf("expected error for plugin exists, got (%v)", err)
+ }
+
+ // Testing FindSource method, expect error because plugin code is not a cloned repository
+ if _, err := FindSource(i.Path()); err == nil {
+ t.Fatalf("expected error for inability to find plugin source, got none")
+ } else if err.Error() != "cannot get information about plugin source" {
+ t.Fatalf("expected error for inability to find plugin source, got (%v)", err)
+ }
+}
+
+func TestVCSInstallerNonExistentVersion(t *testing.T) {
+ ensure.HelmHome(t)
+
+ source := "https://github.com/adamreese/helm-env"
+ version := "0.2.0"
+
+ i, err := NewForSource(source, version)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // ensure a VCSInstaller was returned
+ if _, ok := i.(*VCSInstaller); !ok {
+ t.Fatal("expected a VCSInstaller")
+ }
+
+ if err := Install(i); err == nil {
+ t.Fatalf("expected error for version does not exists, got none")
+ } else if strings.Contains(err.Error(), "Could not resolve host: github.com") {
+ t.Skip("Unable to run test without Internet access")
+ } else if err.Error() != fmt.Sprintf("requested version %q does not exist for plugin %q", version, source) {
+ t.Fatalf("expected error for version does not exists, got (%v)", err)
+ }
+}
+func TestVCSInstallerUpdate(t *testing.T) {
+ ensure.HelmHome(t)
+
+ source := "https://github.com/adamreese/helm-env"
+
+ i, err := NewForSource(source, "")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // ensure a VCSInstaller was returned
+ if _, ok := i.(*VCSInstaller); !ok {
+ t.Fatal("expected a VCSInstaller")
+ }
+
+ if err := Update(i); err == nil {
+ t.Fatal("expected error for plugin does not exist, got none")
+ } else if err.Error() != "plugin does not exist" {
+ t.Fatalf("expected error for plugin does not exist, got (%v)", err)
+ }
+
+ // Install plugin before update
+ if err := Install(i); err != nil {
+ if strings.Contains(err.Error(), "Could not resolve host: github.com") {
+ t.Skip("Unable to run test without Internet access")
+ } else {
+ t.Fatal(err)
+ }
+ }
+
+ // Test FindSource method for positive result
+ pluginInfo, err := FindSource(i.Path())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ vcsInstaller := pluginInfo.(*VCSInstaller)
+
+ repoRemote := vcsInstaller.Repo.Remote()
+ if repoRemote != source {
+ t.Fatalf("invalid source found, expected %q got %q", source, repoRemote)
+ }
+
+ // Update plugin
+ if err := Update(i); err != nil {
+ t.Fatal(err)
+ }
+
+ // Test update failure
+ if err := os.Remove(filepath.Join(vcsInstaller.Repo.LocalPath(), "plugin.yaml")); err != nil {
+ t.Fatal(err)
+ }
+ // Testing update for error
+ if err := Update(vcsInstaller); err == nil {
+ t.Fatalf("expected error for plugin modified, got none")
+ } else if err.Error() != "plugin repo was modified" {
+ t.Fatalf("expected error for plugin modified, got (%v)", err)
+ }
+
+}
diff --git a/helm/internal/plugin/installer/verification_test.go b/helm/internal/plugin/installer/verification_test.go
new file mode 100644
index 000000000..22f0a8308
--- /dev/null
+++ b/helm/internal/plugin/installer/verification_test.go
@@ -0,0 +1,421 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package installer
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/test/ensure"
+)
+
+func TestInstallWithOptions_VerifyMissingProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a temporary plugin tarball without .prov file
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+ defer os.Remove(pluginTgz)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Capture stderr to check warning message
+ oldStderr := os.Stderr
+ r, w, _ := os.Pipe()
+ os.Stderr = w
+
+ // Install with verification enabled (should warn but succeed)
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: "dummy"})
+
+ // Restore stderr and read captured output
+ w.Close()
+ os.Stderr = oldStderr
+ var buf bytes.Buffer
+ io.Copy(&buf, r)
+ output := buf.String()
+
+ // Should succeed with nil result (no verification performed)
+ if err != nil {
+ t.Fatalf("Expected installation to succeed despite missing .prov file, got error: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when .prov file is missing, got: %+v", result)
+ }
+
+ // Should contain warning message
+ expectedWarning := "WARNING: No provenance file found for plugin"
+ if !strings.Contains(output, expectedWarning) {
+ t.Errorf("Expected warning message '%s' in output, got: %s", expectedWarning, output)
+ }
+
+ // Plugin should be installed
+ if _, err := os.Stat(installer.Path()); os.IsNotExist(err) {
+ t.Errorf("Plugin should be installed at %s", installer.Path())
+ }
+}
+
+func TestInstallWithOptions_VerifyWithValidProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a temporary plugin tarball with valid .prov file
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+
+ provFile := pluginTgz + ".prov"
+ createProvFile(t, provFile, pluginTgz, "")
+ defer os.Remove(provFile)
+
+ // Create keyring with test key (empty for testing)
+ keyring := createTestKeyring(t)
+ defer os.Remove(keyring)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install with verification enabled
+ // This will fail signature verification but pass hash validation
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring})
+
+ // Should fail due to invalid signature (empty keyring) but we test that it gets past the hash check
+ if err == nil {
+ t.Fatalf("Expected installation to fail with empty keyring")
+ }
+ if !strings.Contains(err.Error(), "plugin verification failed") {
+ t.Errorf("Expected plugin verification failed error, got: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification fails, got: %+v", result)
+ }
+
+ // Plugin should not be installed due to verification failure
+ if _, err := os.Stat(installer.Path()); !os.IsNotExist(err) {
+ t.Errorf("Plugin should not be installed when verification fails")
+ }
+}
+
+func TestInstallWithOptions_VerifyWithInvalidProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a temporary plugin tarball with invalid .prov file
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+ defer os.Remove(pluginTgz)
+
+ provFile := pluginTgz + ".prov"
+ createProvFileInvalidFormat(t, provFile)
+ defer os.Remove(provFile)
+
+ // Create keyring with test key
+ keyring := createTestKeyring(t)
+ defer os.Remove(keyring)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install with verification enabled (should fail)
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring})
+
+ // Should fail with verification error
+ if err == nil {
+ t.Fatalf("Expected installation with invalid .prov file to fail")
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification fails, got: %+v", result)
+ }
+
+ // Should contain verification failure message
+ expectedError := "plugin verification failed"
+ if !strings.Contains(err.Error(), expectedError) {
+ t.Errorf("Expected error message '%s', got: %s", expectedError, err.Error())
+ }
+
+ // Plugin should not be installed
+ if _, err := os.Stat(installer.Path()); !os.IsNotExist(err) {
+ t.Errorf("Plugin should not be installed when verification fails")
+ }
+}
+
+func TestInstallWithOptions_NoVerifyRequested(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a temporary plugin tarball without .prov file
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+ defer os.Remove(pluginTgz)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install without verification (should succeed without any verification)
+ result, err := InstallWithOptions(installer, Options{Verify: false})
+
+ // Should succeed with no verification
+ if err != nil {
+ t.Fatalf("Expected installation without verification to succeed, got error: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification is disabled, got: %+v", result)
+ }
+
+ // Plugin should be installed
+ if _, err := os.Stat(installer.Path()); os.IsNotExist(err) {
+ t.Errorf("Plugin should be installed at %s", installer.Path())
+ }
+}
+
+func TestInstallWithOptions_VerifyDirectoryNotSupported(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a directory-based plugin (not an archive)
+ pluginDir := createTestPluginDir(t)
+
+ // Create local installer for directory
+ installer, err := NewLocalInstaller(pluginDir)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install with verification should fail (directories don't support verification)
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: "dummy"})
+
+ // Should fail with verification not supported error
+ if err == nil {
+ t.Fatalf("Expected installation to fail with verification not supported error")
+ }
+ if !strings.Contains(err.Error(), "--verify is only supported for plugin tarballs") {
+ t.Errorf("Expected verification not supported error, got: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification fails, got: %+v", result)
+ }
+}
+
+func TestInstallWithOptions_VerifyMismatchedProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create plugin tarball
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+ defer os.Remove(pluginTgz)
+
+ provFile := pluginTgz + ".prov"
+ // Create provenance file with wrong hash (for a different file)
+ createProvFile(t, provFile, pluginTgz, "sha256:wronghash")
+ defer os.Remove(provFile)
+
+ // Create keyring with test key
+ keyring := createTestKeyring(t)
+ defer os.Remove(keyring)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install with verification should fail due to hash mismatch
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring})
+
+ // Should fail with verification error
+ if err == nil {
+ t.Fatalf("Expected installation to fail with hash mismatch")
+ }
+ if !strings.Contains(err.Error(), "plugin verification failed") {
+ t.Errorf("Expected plugin verification failed error, got: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification fails, got: %+v", result)
+ }
+}
+
+func TestInstallWithOptions_VerifyProvenanceAccessError(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create plugin tarball
+ pluginDir := createTestPluginDir(t)
+ pluginTgz := createTarballFromPluginDir(t, pluginDir)
+ defer os.Remove(pluginTgz)
+
+ // Create a .prov file but make it inaccessible (simulate permission error)
+ provFile := pluginTgz + ".prov"
+ if err := os.WriteFile(provFile, []byte("test"), 0000); err != nil {
+ t.Fatalf("Failed to create inaccessible provenance file: %v", err)
+ }
+ defer os.Remove(provFile)
+
+ // Create keyring
+ keyring := createTestKeyring(t)
+ defer os.Remove(keyring)
+
+ // Create local installer
+ installer, err := NewLocalInstaller(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to create installer: %v", err)
+ }
+ defer os.RemoveAll(installer.Path())
+
+ // Install with verification should fail due to access error
+ result, err := InstallWithOptions(installer, Options{Verify: true, Keyring: keyring})
+
+ // Should fail with access error (either at stat level or during verification)
+ if err == nil {
+ t.Fatalf("Expected installation to fail with provenance file access error")
+ }
+ // The error could be either "failed to access provenance file" or "plugin verification failed"
+ // depending on when the permission error occurs
+ if !strings.Contains(err.Error(), "failed to access provenance file") &&
+ !strings.Contains(err.Error(), "plugin verification failed") {
+ t.Errorf("Expected provenance file access or verification error, got: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil verification result when verification fails, got: %+v", result)
+ }
+}
+
+// Helper functions for test setup
+
+func createTestPluginDir(t *testing.T) string {
+ t.Helper()
+
+ // Create temporary directory with plugin structure
+ tmpDir := t.TempDir()
+ pluginDir := filepath.Join(tmpDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatalf("Failed to create plugin directory: %v", err)
+ }
+
+ // Create plugin.yaml using the standardized v1 format
+ pluginYaml := `apiVersion: v1
+name: test-plugin
+type: cli/v1
+runtime: subprocess
+version: 1.0.0
+runtimeConfig:
+ platformCommand:
+ - command: echo`
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(pluginYaml), 0644); err != nil {
+ t.Fatalf("Failed to create plugin.yaml: %v", err)
+ }
+
+ return pluginDir
+}
+
+func createTarballFromPluginDir(t *testing.T, pluginDir string) string {
+ t.Helper()
+
+ // Create tarball using the plugin package helper
+ tmpDir := filepath.Dir(pluginDir)
+ tgzPath := filepath.Join(tmpDir, "test-plugin-1.0.0.tgz")
+ tarFile, err := os.Create(tgzPath)
+ if err != nil {
+ t.Fatalf("Failed to create tarball file: %v", err)
+ }
+ defer tarFile.Close()
+
+ if err := plugin.CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil {
+ t.Fatalf("Failed to create tarball: %v", err)
+ }
+
+ return tgzPath
+}
+
+func createProvFile(t *testing.T, provFile, pluginTgz, hash string) {
+ t.Helper()
+
+ var hashStr string
+ if hash == "" {
+ // Calculate actual hash of the tarball for realistic testing
+ data, err := os.ReadFile(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to read tarball for hashing: %v", err)
+ }
+ hashSum := sha256.Sum256(data)
+ hashStr = fmt.Sprintf("sha256:%x", hashSum)
+ } else {
+ // Use provided hash (could be wrong for testing)
+ hashStr = hash
+ }
+
+ // Create properly formatted provenance file with specified hash
+ provContent := fmt.Sprintf(`-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA256
+
+name: test-plugin
+version: 1.0.0
+description: Test plugin for verification
+files:
+ test-plugin-1.0.0.tgz: %s
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1
+
+iQEcBAEBCAAGBQJktest...
+-----END PGP SIGNATURE-----
+`, hashStr)
+ if err := os.WriteFile(provFile, []byte(provContent), 0644); err != nil {
+ t.Fatalf("Failed to create provenance file: %v", err)
+ }
+}
+
+func createProvFileInvalidFormat(t *testing.T, provFile string) {
+ t.Helper()
+
+ // Create an invalid provenance file (not PGP signed format)
+ invalidProv := "This is not a valid PGP signed message"
+ if err := os.WriteFile(provFile, []byte(invalidProv), 0644); err != nil {
+ t.Fatalf("Failed to create invalid provenance file: %v", err)
+ }
+}
+
+func createTestKeyring(t *testing.T) string {
+ t.Helper()
+
+ // Create a temporary keyring file
+ tmpDir := t.TempDir()
+ keyringPath := filepath.Join(tmpDir, "pubring.gpg")
+
+ // Create empty keyring for testing
+ if err := os.WriteFile(keyringPath, []byte{}, 0644); err != nil {
+ t.Fatalf("Failed to create test keyring: %v", err)
+ }
+
+ return keyringPath
+}
diff --git a/helm/internal/plugin/loader.go b/helm/internal/plugin/loader.go
new file mode 100644
index 000000000..2f051b99e
--- /dev/null
+++ b/helm/internal/plugin/loader.go
@@ -0,0 +1,268 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ extism "github.com/extism/go-sdk"
+ "github.com/tetratelabs/wazero"
+ "go.yaml.in/yaml/v3"
+
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+func peekAPIVersion(r io.Reader) (string, error) {
+ type apiVersion struct {
+ APIVersion string `yaml:"apiVersion"`
+ }
+
+ var v apiVersion
+ d := yaml.NewDecoder(r)
+ if err := d.Decode(&v); err != nil {
+ return "", err
+ }
+
+ return v.APIVersion, nil
+}
+
+func loadMetadataLegacy(metadataData []byte) (*Metadata, error) {
+
+ var ml MetadataLegacy
+ d := yaml.NewDecoder(bytes.NewReader(metadataData))
+ // NOTE: No strict unmarshalling for legacy plugins - maintain backwards compatibility
+ if err := d.Decode(&ml); err != nil {
+ return nil, err
+ }
+
+ if err := ml.Validate(); err != nil {
+ return nil, err
+ }
+
+ m := fromMetadataLegacy(ml)
+ if err := m.Validate(); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func loadMetadataV1(metadataData []byte) (*Metadata, error) {
+
+ var mv1 MetadataV1
+ d := yaml.NewDecoder(bytes.NewReader(metadataData))
+ d.KnownFields(true)
+ if err := d.Decode(&mv1); err != nil {
+ return nil, err
+ }
+
+ if err := mv1.Validate(); err != nil {
+ return nil, err
+ }
+
+ m, err := fromMetadataV1(mv1)
+ if err != nil {
+ return nil, fmt.Errorf("failed to convert MetadataV1 to Metadata: %w", err)
+ }
+
+ if err := m.Validate(); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func loadMetadata(metadataData []byte) (*Metadata, error) {
+ apiVersion, err := peekAPIVersion(bytes.NewReader(metadataData))
+ if err != nil {
+ return nil, fmt.Errorf("failed to peek %s API version: %w", PluginFileName, err)
+ }
+
+ switch apiVersion {
+ case "": // legacy
+ return loadMetadataLegacy(metadataData)
+ case "v1":
+ return loadMetadataV1(metadataData)
+ }
+
+ return nil, fmt.Errorf("invalid plugin apiVersion: %q", apiVersion)
+}
+
+type prototypePluginManager struct {
+ runtimes map[string]Runtime
+}
+
+func newPrototypePluginManager() (*prototypePluginManager, error) {
+
+ cc, err := wazero.NewCompilationCacheWithDir(helmpath.CachePath("wazero-build"))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create wazero compilation cache: %w", err)
+ }
+
+ return &prototypePluginManager{
+ runtimes: map[string]Runtime{
+ "subprocess": &RuntimeSubprocess{},
+ "extism/v1": &RuntimeExtismV1{
+ HostFunctions: map[string]extism.HostFunction{},
+ CompilationCache: cc,
+ },
+ },
+ }, nil
+}
+
+func (pm *prototypePluginManager) RegisterRuntime(runtimeName string, runtime Runtime) {
+ pm.runtimes[runtimeName] = runtime
+}
+
+func (pm *prototypePluginManager) CreatePlugin(pluginPath string, metadata *Metadata) (Plugin, error) {
+ rt, ok := pm.runtimes[metadata.Runtime]
+ if !ok {
+ return nil, fmt.Errorf("unsupported plugin runtime type: %q", metadata.Runtime)
+ }
+
+ return rt.CreatePlugin(pluginPath, metadata)
+}
+
+// LoadDir loads a plugin from the given directory.
+func LoadDir(dirname string) (Plugin, error) {
+ pluginfile := filepath.Join(dirname, PluginFileName)
+ metadataData, err := os.ReadFile(pluginfile)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read plugin at %q: %w", pluginfile, err)
+ }
+
+ m, err := loadMetadata(metadataData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load plugin %q: %w", dirname, err)
+ }
+
+ pm, err := newPrototypePluginManager()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create plugin manager: %w", err)
+ }
+ return pm.CreatePlugin(dirname, m)
+}
+
+// LoadAll loads all plugins found beneath the base directory.
+//
+// This scans only one directory level.
+func LoadAll(basedir string) ([]Plugin, error) {
+ var plugins []Plugin
+ // We want basedir/*/plugin.yaml
+ scanpath := filepath.Join(basedir, "*", PluginFileName)
+ matches, err := filepath.Glob(scanpath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to search for plugins in %q: %w", scanpath, err)
+ }
+
+ // empty dir should load
+ if len(matches) == 0 {
+ return plugins, nil
+ }
+
+ for _, yamlFile := range matches {
+ dir := filepath.Dir(yamlFile)
+ p, err := LoadDir(dir)
+ if err != nil {
+ return plugins, err
+ }
+ plugins = append(plugins, p)
+ }
+ return plugins, detectDuplicates(plugins)
+}
+
+// findFunc is a function that finds plugins in a directory
+type findFunc func(pluginsDir string) ([]Plugin, error)
+
+// filterFunc is a function that filters plugins
+type filterFunc func(Plugin) bool
+
+// FindPlugins returns a list of plugins that match the descriptor
+func FindPlugins(pluginsDirs []string, descriptor Descriptor) ([]Plugin, error) {
+ return findPlugins(pluginsDirs, LoadAll, makeDescriptorFilter(descriptor))
+}
+
+// findPlugins is the internal implementation that uses the find and filter functions
+func findPlugins(pluginsDirs []string, findFn findFunc, filterFn filterFunc) ([]Plugin, error) {
+ var found []Plugin
+ for _, pluginsDir := range pluginsDirs {
+ ps, err := findFn(pluginsDir)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for _, p := range ps {
+ if filterFn(p) {
+ found = append(found, p)
+ }
+ }
+
+ }
+
+ return found, nil
+}
+
+// makeDescriptorFilter creates a filter function from a descriptor
+// Additional plugin filter criteria we wish to support can be added here
+func makeDescriptorFilter(descriptor Descriptor) filterFunc {
+ return func(p Plugin) bool {
+ // If name is specified, it must match
+ if descriptor.Name != "" && p.Metadata().Name != descriptor.Name {
+ return false
+
+ }
+ // If type is specified, it must match
+ if descriptor.Type != "" && p.Metadata().Type != descriptor.Type {
+ return false
+ }
+ return true
+ }
+}
+
+// FindPlugin returns a single plugin that matches the descriptor
+func FindPlugin(dirs []string, descriptor Descriptor) (Plugin, error) {
+ plugins, err := FindPlugins(dirs, descriptor)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(plugins) > 0 {
+ return plugins[0], nil
+ }
+
+ return nil, fmt.Errorf("plugin: %+v not found", descriptor)
+}
+
+func detectDuplicates(plugs []Plugin) error {
+ names := map[string]string{}
+
+ for _, plug := range plugs {
+ if oldpath, ok := names[plug.Metadata().Name]; ok {
+ return fmt.Errorf(
+ "two plugins claim the name %q at %q and %q",
+ plug.Metadata().Name,
+ oldpath,
+ plug.Dir(),
+ )
+ }
+ names[plug.Metadata().Name] = plug.Dir()
+ }
+
+ return nil
+}
diff --git a/helm/internal/plugin/loader_test.go b/helm/internal/plugin/loader_test.go
new file mode 100644
index 000000000..e84905248
--- /dev/null
+++ b/helm/internal/plugin/loader_test.go
@@ -0,0 +1,363 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func TestPeekAPIVersion(t *testing.T) {
+ testCases := map[string]struct {
+ data []byte
+ expected string
+ }{
+ "v1": {
+ data: []byte(`---
+apiVersion: v1
+name: "test-plugin"
+`),
+ expected: "v1",
+ },
+ "legacy": { // No apiVersion field
+ data: []byte(`---
+name: "test-plugin"
+`),
+ expected: "",
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ version, err := peekAPIVersion(bytes.NewReader(tc.data))
+ require.NoError(t, err)
+ assert.Equal(t, tc.expected, version)
+ })
+ }
+
+ // invalid yaml
+ {
+ data := []byte(`bad yaml`)
+ _, err := peekAPIVersion(bytes.NewReader(data))
+ assert.Error(t, err)
+ }
+}
+
+func TestLoadDir(t *testing.T) {
+
+ makeMetadata := func(apiVersion string) Metadata {
+ usage := "hello [params]..."
+ if apiVersion == "legacy" {
+ usage = "" // Legacy plugins don't have Usage field for command syntax
+ }
+ return Metadata{
+ APIVersion: apiVersion,
+ Name: fmt.Sprintf("hello-%s", apiVersion),
+ Version: "0.1.0",
+ Type: "cli/v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigCLIV1{
+ Usage: usage,
+ ShortHelp: "echo hello message",
+ LongHelp: "description",
+ IgnoreFlags: true,
+ },
+ RuntimeConfig: &RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{
+ {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.sh"}},
+ {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "${HELM_PLUGIN_DIR}/hello.ps1"}},
+ },
+ PlatformHooks: map[string][]PlatformCommand{
+ Install: {
+ {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}},
+ {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"installing...\""}},
+ },
+ },
+ expandHookArgs: apiVersion == "legacy",
+ },
+ }
+ }
+
+ testCases := map[string]struct {
+ dirname string
+ apiVersion string
+ expect Metadata
+ }{
+ "legacy": {
+ dirname: "testdata/plugdir/good/hello-legacy",
+ apiVersion: "legacy",
+ expect: makeMetadata("legacy"),
+ },
+ "v1": {
+ dirname: "testdata/plugdir/good/hello-v1",
+ apiVersion: "v1",
+ expect: makeMetadata("v1"),
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ plug, err := LoadDir(tc.dirname)
+ require.NoError(t, err, "error loading plugin from %s", tc.dirname)
+
+ assert.Equal(t, tc.dirname, plug.Dir())
+ assert.EqualValues(t, tc.expect, plug.Metadata())
+ })
+ }
+}
+
+func TestLoadDirDuplicateEntries(t *testing.T) {
+ testCases := map[string]string{
+ "legacy": "testdata/plugdir/bad/duplicate-entries-legacy",
+ "v1": "testdata/plugdir/bad/duplicate-entries-v1",
+ }
+ for name, dirname := range testCases {
+ t.Run(name, func(t *testing.T) {
+ _, err := LoadDir(dirname)
+ assert.Error(t, err)
+ })
+ }
+}
+
+func TestLoadDirGetter(t *testing.T) {
+ dirname := "testdata/plugdir/good/getter"
+
+ expect := Metadata{
+ Name: "getter",
+ Version: "1.2.3",
+ Type: "getter/v1",
+ APIVersion: "v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigGetterV1{
+ Protocols: []string{"myprotocol", "myprotocols"},
+ },
+ RuntimeConfig: &RuntimeConfigSubprocess{
+ ProtocolCommands: []SubprocessProtocolCommand{
+ {
+ Protocols: []string{"myprotocol", "myprotocols"},
+ PlatformCommand: []PlatformCommand{{Command: "echo getter"}},
+ },
+ },
+ },
+ }
+
+ plug, err := LoadDir(dirname)
+ require.NoError(t, err)
+ assert.Equal(t, dirname, plug.Dir())
+ assert.Equal(t, expect, plug.Metadata())
+}
+
+func TestPostRenderer(t *testing.T) {
+ dirname := "testdata/plugdir/good/postrenderer-v1"
+
+ expect := Metadata{
+ Name: "postrenderer-v1",
+ Version: "1.2.3",
+ Type: "postrenderer/v1",
+ APIVersion: "v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigPostRendererV1{},
+ RuntimeConfig: &RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{
+ {
+ Command: "${HELM_PLUGIN_DIR}/sed-test.sh",
+ },
+ },
+ },
+ }
+
+ plug, err := LoadDir(dirname)
+ require.NoError(t, err)
+ assert.Equal(t, dirname, plug.Dir())
+ assert.Equal(t, expect, plug.Metadata())
+}
+
+func TestDetectDuplicates(t *testing.T) {
+ plugs := []Plugin{
+ mockSubprocessCLIPlugin(t, "foo"),
+ mockSubprocessCLIPlugin(t, "bar"),
+ }
+ if err := detectDuplicates(plugs); err != nil {
+ t.Error("no duplicates in the first set")
+ }
+ plugs = append(plugs, mockSubprocessCLIPlugin(t, "foo"))
+ if err := detectDuplicates(plugs); err == nil {
+ t.Error("duplicates in the second set")
+ }
+}
+
+func TestLoadAll(t *testing.T) {
+ // Verify that empty dir loads:
+ {
+ plugs, err := LoadAll("testdata")
+ require.NoError(t, err)
+ assert.Len(t, plugs, 0)
+ }
+
+ basedir := "testdata/plugdir/good"
+ plugs, err := LoadAll(basedir)
+ require.NoError(t, err)
+ require.NotEmpty(t, plugs, "expected plugins to be loaded from %s", basedir)
+
+ plugsMap := map[string]Plugin{}
+ for _, p := range plugs {
+ plugsMap[p.Metadata().Name] = p
+ }
+
+ assert.Len(t, plugsMap, 7)
+ assert.Contains(t, plugsMap, "downloader")
+ assert.Contains(t, plugsMap, "echo-legacy")
+ assert.Contains(t, plugsMap, "echo-v1")
+ assert.Contains(t, plugsMap, "getter")
+ assert.Contains(t, plugsMap, "hello-legacy")
+ assert.Contains(t, plugsMap, "hello-v1")
+ assert.Contains(t, plugsMap, "postrenderer-v1")
+}
+
+func TestFindPlugins(t *testing.T) {
+ cases := []struct {
+ name string
+ plugdirs string
+ expected int
+ }{
+ {
+ name: "plugdirs is empty",
+ plugdirs: "",
+ expected: 0,
+ },
+ {
+ name: "plugdirs isn't dir",
+ plugdirs: "./plugin_test.go",
+ expected: 0,
+ },
+ {
+ name: "plugdirs doesn't have plugin",
+ plugdirs: ".",
+ expected: 0,
+ },
+ {
+ name: "normal",
+ plugdirs: "./testdata/plugdir/good",
+ expected: 7,
+ },
+ }
+ for _, c := range cases {
+ t.Run(t.Name(), func(t *testing.T) {
+ plugin, err := LoadAll(c.plugdirs)
+ require.NoError(t, err)
+ assert.Len(t, plugin, c.expected, "expected %d plugins, got %d", c.expected, len(plugin))
+ })
+ }
+}
+
+func TestLoadMetadataLegacy(t *testing.T) {
+ testCases := map[string]struct {
+ yaml string
+ expectError bool
+ errorContains string
+ expectedName string
+ logNote string
+ }{
+ "capital name field": {
+ yaml: `Name: my-plugin
+version: 1.0.0
+usage: test plugin
+description: test description
+command: echo test`,
+ expectError: true,
+ errorContains: `invalid plugin name "": must contain only a-z, A-Z, 0-9, _ and -`,
+ // Legacy plugins: No strict unmarshalling (backwards compatibility)
+ // YAML decoder silently ignores "Name:", then validation catches empty name
+ logNote: "NOTE: V1 plugins use strict unmarshalling and would get: yaml: field Name not found",
+ },
+ "correct name field": {
+ yaml: `name: my-plugin
+version: 1.0.0
+usage: test plugin
+description: test description
+command: echo test`,
+ expectError: false,
+ expectedName: "my-plugin",
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ m, err := loadMetadataLegacy([]byte(tc.yaml))
+
+ if tc.expectError {
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), tc.errorContains)
+ t.Logf("Legacy error (validation catches empty name): %v", err)
+ if tc.logNote != "" {
+ t.Log(tc.logNote)
+ }
+ } else {
+ require.NoError(t, err)
+ assert.Equal(t, tc.expectedName, m.Name)
+ }
+ })
+ }
+}
+
+func TestLoadMetadataV1(t *testing.T) {
+ testCases := map[string]struct {
+ yaml string
+ expectError bool
+ errorContains string
+ expectedName string
+ }{
+ "capital name field": {
+ yaml: `apiVersion: v1
+Name: my-plugin
+type: cli/v1
+runtime: subprocess
+`,
+ expectError: true,
+ errorContains: "field Name not found in type plugin.MetadataV1",
+ },
+ "correct name field": {
+ yaml: `apiVersion: v1
+name: my-plugin
+type: cli/v1
+runtime: subprocess
+`,
+ expectError: false,
+ expectedName: "my-plugin",
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ m, err := loadMetadataV1([]byte(tc.yaml))
+
+ if tc.expectError {
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), tc.errorContains)
+ t.Logf("V1 error (strict unmarshalling): %v", err)
+ } else {
+ require.NoError(t, err)
+ assert.Equal(t, tc.expectedName, m.Name)
+ }
+ })
+ }
+}
diff --git a/helm/internal/plugin/metadata.go b/helm/internal/plugin/metadata.go
new file mode 100644
index 000000000..4e019f0b3
--- /dev/null
+++ b/helm/internal/plugin/metadata.go
@@ -0,0 +1,216 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "errors"
+ "fmt"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+// Metadata of a plugin, converted from the "on-disk" legacy or v1 plugin.yaml
+// Specifically, Config and RuntimeConfig are converted to their respective types based on the plugin type and runtime
+type Metadata struct {
+ // APIVersion specifies the plugin API version
+ APIVersion string
+
+ // Name is the name of the plugin
+ Name string
+
+ // Type of plugin (eg, cli/v1, getter/v1, postrenderer/v1)
+ Type string
+
+ // Runtime specifies the runtime type (subprocess, wasm)
+ Runtime string
+
+ // Version is the SemVer 2 version of the plugin.
+ Version string
+
+ // SourceURL is the URL where this plugin can be found
+ SourceURL string
+
+ // Config contains the type-specific configuration for this plugin
+ Config Config
+
+ // RuntimeConfig contains the runtime-specific configuration
+ RuntimeConfig RuntimeConfig
+}
+
+func (m Metadata) Validate() error {
+ var errs []error
+
+ if !validPluginName.MatchString(m.Name) {
+ errs = append(errs, fmt.Errorf("invalid plugin name %q: must contain only a-z, A-Z, 0-9, _ and -", m.Name))
+ }
+
+ if m.APIVersion == "" {
+ errs = append(errs, fmt.Errorf("empty APIVersion"))
+ }
+
+ if m.Type == "" {
+ errs = append(errs, fmt.Errorf("empty type field"))
+ }
+
+ if m.Runtime == "" {
+ errs = append(errs, fmt.Errorf("empty runtime field"))
+ }
+
+ if m.Config == nil {
+ errs = append(errs, fmt.Errorf("missing config field"))
+ }
+
+ if m.RuntimeConfig == nil {
+ errs = append(errs, fmt.Errorf("missing runtimeConfig field"))
+ }
+
+ // Validate the config itself
+ if m.Config != nil {
+ if err := m.Config.Validate(); err != nil {
+ errs = append(errs, fmt.Errorf("config validation failed: %w", err))
+ }
+ }
+
+ // Validate the runtime config itself
+ if m.RuntimeConfig != nil {
+ if err := m.RuntimeConfig.Validate(); err != nil {
+ errs = append(errs, fmt.Errorf("runtime config validation failed: %w", err))
+ }
+ }
+
+ if len(errs) > 0 {
+ return errors.Join(errs...)
+ }
+
+ return nil
+}
+
+func fromMetadataLegacy(m MetadataLegacy) *Metadata {
+ pluginType := "cli/v1"
+
+ if len(m.Downloaders) > 0 {
+ pluginType = "getter/v1"
+ }
+
+ return &Metadata{
+ APIVersion: "legacy",
+ Name: m.Name,
+ Version: m.Version,
+ Type: pluginType,
+ Runtime: "subprocess",
+ Config: buildLegacyConfig(m, pluginType),
+ RuntimeConfig: buildLegacyRuntimeConfig(m),
+ }
+}
+
+func buildLegacyConfig(m MetadataLegacy, pluginType string) Config {
+ switch pluginType {
+ case "getter/v1":
+ var protocols []string
+ for _, d := range m.Downloaders {
+ protocols = append(protocols, d.Protocols...)
+ }
+ return &schema.ConfigGetterV1{
+ Protocols: protocols,
+ }
+ case "cli/v1":
+ return &schema.ConfigCLIV1{
+ Usage: "", // Legacy plugins don't have Usage field for command syntax
+ ShortHelp: m.Usage, // Map legacy usage to shortHelp
+ LongHelp: m.Description, // Map legacy description to longHelp
+ IgnoreFlags: m.IgnoreFlags,
+ }
+ default:
+ return nil
+ }
+}
+
+func buildLegacyRuntimeConfig(m MetadataLegacy) RuntimeConfig {
+ var protocolCommands []SubprocessProtocolCommand
+ if len(m.Downloaders) > 0 {
+ protocolCommands =
+ make([]SubprocessProtocolCommand, 0, len(m.Downloaders))
+ for _, d := range m.Downloaders {
+ protocolCommands = append(protocolCommands, SubprocessProtocolCommand{
+ Protocols: d.Protocols,
+ PlatformCommand: []PlatformCommand{{Command: d.Command}},
+ })
+ }
+ }
+
+ platformCommand := m.PlatformCommand
+ if len(platformCommand) == 0 && len(m.Command) > 0 {
+ platformCommand = []PlatformCommand{{Command: m.Command}}
+ }
+
+ platformHooks := m.PlatformHooks
+ expandHookArgs := true
+ if len(platformHooks) == 0 && len(m.Hooks) > 0 {
+ platformHooks = make(PlatformHooks, len(m.Hooks))
+ for hookName, hookCommand := range m.Hooks {
+ platformHooks[hookName] = []PlatformCommand{{Command: "sh", Args: []string{"-c", hookCommand}}}
+ expandHookArgs = false
+ }
+ }
+ return &RuntimeConfigSubprocess{
+ PlatformCommand: platformCommand,
+ PlatformHooks: platformHooks,
+ ProtocolCommands: protocolCommands,
+ expandHookArgs: expandHookArgs,
+ }
+}
+
+func fromMetadataV1(mv1 MetadataV1) (*Metadata, error) {
+ config, err := unmarshalConfig(mv1.Type, mv1.Config)
+ if err != nil {
+ return nil, err
+ }
+
+ runtimeConfig, err := convertMetadataRuntimeConfig(mv1.Runtime, mv1.RuntimeConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Metadata{
+ APIVersion: mv1.APIVersion,
+ Name: mv1.Name,
+ Type: mv1.Type,
+ Runtime: mv1.Runtime,
+ Version: mv1.Version,
+ SourceURL: mv1.SourceURL,
+ Config: config,
+ RuntimeConfig: runtimeConfig,
+ }, nil
+}
+
+func convertMetadataRuntimeConfig(runtimeType string, runtimeConfigRaw map[string]any) (RuntimeConfig, error) {
+ var runtimeConfig RuntimeConfig
+ var err error
+
+ switch runtimeType {
+ case "subprocess":
+ runtimeConfig, err = remarshalRuntimeConfig[*RuntimeConfigSubprocess](runtimeConfigRaw)
+ case "extism/v1":
+ runtimeConfig, err = remarshalRuntimeConfig[*RuntimeConfigExtismV1](runtimeConfigRaw)
+ default:
+ return nil, fmt.Errorf("unsupported plugin runtime type: %q", runtimeType)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal runtimeConfig for %s runtime: %w", runtimeType, err)
+ }
+ return runtimeConfig, nil
+}
diff --git a/helm/internal/plugin/metadata_legacy.go b/helm/internal/plugin/metadata_legacy.go
new file mode 100644
index 000000000..3cd1a50cd
--- /dev/null
+++ b/helm/internal/plugin/metadata_legacy.go
@@ -0,0 +1,113 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+)
+
+// Downloaders represents the plugins capability if it can retrieve
+// charts from special sources
+type Downloaders struct {
+ // Protocols are the list of schemes from the charts URL.
+ Protocols []string `yaml:"protocols"`
+ // Command is the executable path with which the plugin performs
+ // the actual download for the corresponding Protocols
+ Command string `yaml:"command"`
+}
+
+// MetadataLegacy is the legacy plugin.yaml format
+type MetadataLegacy struct {
+ // Name is the name of the plugin
+ Name string `yaml:"name"`
+
+ // Version is a SemVer 2 version of the plugin.
+ Version string `yaml:"version"`
+
+ // Usage is the single-line usage text shown in help
+ Usage string `yaml:"usage"`
+
+ // Description is a long description shown in places like `helm help`
+ Description string `yaml:"description"`
+
+ // PlatformCommand is the plugin command, with a platform selector and support for args.
+ PlatformCommand []PlatformCommand `yaml:"platformCommand"`
+
+ // Command is the plugin command, as a single string.
+ // DEPRECATED: Use PlatformCommand instead. Removed in subprocess/v1 plugins.
+ Command string `yaml:"command"`
+
+ // IgnoreFlags ignores any flags passed in from Helm
+ IgnoreFlags bool `yaml:"ignoreFlags"`
+
+ // PlatformHooks are commands that will run on plugin events, with a platform selector and support for args.
+ PlatformHooks PlatformHooks `yaml:"platformHooks"`
+
+ // Hooks are commands that will run on plugin events, as a single string.
+ // DEPRECATED: Use PlatformHooks instead. Removed in subprocess/v1 plugins.
+ Hooks Hooks `yaml:"hooks"`
+
+ // Downloaders field is used if the plugin supply downloader mechanism
+ // for special protocols.
+ Downloaders []Downloaders `yaml:"downloaders"`
+}
+
+func (m *MetadataLegacy) Validate() error {
+ if !validPluginName.MatchString(m.Name) {
+ return fmt.Errorf("invalid plugin name %q: must contain only a-z, A-Z, 0-9, _ and -", m.Name)
+ }
+ m.Usage = sanitizeString(m.Usage)
+
+ if len(m.PlatformCommand) > 0 && len(m.Command) > 0 {
+ return fmt.Errorf("both platformCommand and command are set")
+ }
+
+ if len(m.PlatformHooks) > 0 && len(m.Hooks) > 0 {
+ return fmt.Errorf("both platformHooks and hooks are set")
+ }
+
+ // Validate downloader plugins
+ for i, downloader := range m.Downloaders {
+ if downloader.Command == "" {
+ return fmt.Errorf("downloader %d has empty command", i)
+ }
+ if len(downloader.Protocols) == 0 {
+ return fmt.Errorf("downloader %d has no protocols", i)
+ }
+ for j, protocol := range downloader.Protocols {
+ if protocol == "" {
+ return fmt.Errorf("downloader %d has empty protocol at index %d", i, j)
+ }
+ }
+ }
+
+ return nil
+}
+
+// sanitizeString normalize spaces and removes non-printable characters.
+func sanitizeString(str string) string {
+ return strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return ' '
+ }
+ if unicode.IsPrint(r) {
+ return r
+ }
+ return -1
+ }, str)
+}
diff --git a/helm/internal/plugin/metadata_legacy_test.go b/helm/internal/plugin/metadata_legacy_test.go
new file mode 100644
index 000000000..9421e98b5
--- /dev/null
+++ b/helm/internal/plugin/metadata_legacy_test.go
@@ -0,0 +1,126 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMetadataLegacyValidate(t *testing.T) {
+ testsValid := map[string]MetadataLegacy{
+ "valid metadata": {
+ Name: "myplugin",
+ },
+ "valid with command": {
+ Name: "myplugin",
+ Command: "echo hello",
+ },
+ "valid with platformCommand": {
+ Name: "myplugin",
+ PlatformCommand: []PlatformCommand{
+ {OperatingSystem: "linux", Architecture: "amd64", Command: "echo hello"},
+ },
+ },
+ "valid with hooks": {
+ Name: "myplugin",
+ Hooks: Hooks{
+ "install": "echo install",
+ },
+ },
+ "valid with platformHooks": {
+ Name: "myplugin",
+ PlatformHooks: PlatformHooks{
+ "install": []PlatformCommand{
+ {OperatingSystem: "linux", Architecture: "amd64", Command: "echo install"},
+ },
+ },
+ },
+ "valid with downloaders": {
+ Name: "myplugin",
+ Downloaders: []Downloaders{
+ {
+ Protocols: []string{"myproto"},
+ Command: "echo download",
+ },
+ },
+ },
+ }
+
+ for testName, metadata := range testsValid {
+ t.Run(testName, func(t *testing.T) {
+ assert.NoError(t, metadata.Validate())
+ })
+ }
+
+ testsInvalid := map[string]MetadataLegacy{
+ "invalid name": {
+ Name: "my plugin", // further tested in TestValidPluginName
+ },
+ "both command and platformCommand": {
+ Name: "myplugin",
+ Command: "echo hello",
+ PlatformCommand: []PlatformCommand{
+ {OperatingSystem: "linux", Architecture: "amd64", Command: "echo hello"},
+ },
+ },
+ "both hooks and platformHooks": {
+ Name: "myplugin",
+ Hooks: Hooks{
+ "install": "echo install",
+ },
+ PlatformHooks: PlatformHooks{
+ "install": []PlatformCommand{
+ {OperatingSystem: "linux", Architecture: "amd64", Command: "echo install"},
+ },
+ },
+ },
+ "downloader with empty command": {
+ Name: "myplugin",
+ Downloaders: []Downloaders{
+ {
+ Protocols: []string{"myproto"},
+ Command: "",
+ },
+ },
+ },
+ "downloader with no protocols": {
+ Name: "myplugin",
+ Downloaders: []Downloaders{
+ {
+ Protocols: []string{},
+ Command: "echo download",
+ },
+ },
+ },
+ "downloader with empty protocol": {
+ Name: "myplugin",
+ Downloaders: []Downloaders{
+ {
+ Protocols: []string{""},
+ Command: "echo download",
+ },
+ },
+ },
+ }
+
+ for testName, metadata := range testsInvalid {
+ t.Run(testName, func(t *testing.T) {
+ assert.Error(t, metadata.Validate())
+ })
+ }
+}
diff --git a/helm/internal/plugin/metadata_test.go b/helm/internal/plugin/metadata_test.go
new file mode 100644
index 000000000..145ef5101
--- /dev/null
+++ b/helm/internal/plugin/metadata_test.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestValidatePluginData(t *testing.T) {
+
+ // A mock plugin with no commands
+ mockNoCommand := mockSubprocessCLIPlugin(t, "foo")
+ mockNoCommand.metadata.RuntimeConfig = &RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{},
+ PlatformHooks: map[string][]PlatformCommand{},
+ }
+
+ // A mock plugin with legacy commands
+ mockLegacyCommand := mockSubprocessCLIPlugin(t, "foo")
+ mockLegacyCommand.metadata.RuntimeConfig = &RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{
+ {
+ Command: "echo \"mock plugin\"",
+ },
+ },
+ PlatformHooks: map[string][]PlatformCommand{
+ Install: {
+ PlatformCommand{
+ Command: "echo installing...",
+ },
+ },
+ },
+ }
+
+ for i, item := range []struct {
+ pass bool
+ plug Plugin
+ errString string
+ }{
+ {true, mockSubprocessCLIPlugin(t, "abcdefghijklmnopqrstuvwxyz0123456789_-ABC"), ""},
+ {true, mockSubprocessCLIPlugin(t, "foo-bar-FOO-BAR_1234"), ""},
+ {false, mockSubprocessCLIPlugin(t, "foo -bar"), "invalid plugin name"},
+ {false, mockSubprocessCLIPlugin(t, "$foo -bar"), "invalid plugin name"}, // Test leading chars
+ {false, mockSubprocessCLIPlugin(t, "foo -bar "), "invalid plugin name"}, // Test trailing chars
+ {false, mockSubprocessCLIPlugin(t, "foo\nbar"), "invalid plugin name"}, // Test newline
+ {true, mockNoCommand, ""}, // Test no command metadata works
+ {true, mockLegacyCommand, ""}, // Test legacy command metadata works
+ } {
+ err := item.plug.Metadata().Validate()
+ if item.pass && err != nil {
+ t.Errorf("failed to validate case %d: %s", i, err)
+ } else if !item.pass && err == nil {
+ t.Errorf("expected case %d to fail", i)
+ }
+ if !item.pass && !strings.Contains(err.Error(), item.errString) {
+ t.Errorf("index [%d]: expected error to contain: %s, but got: %s", i, item.errString, err.Error())
+ }
+ }
+}
+
+func TestMetadataValidateMultipleErrors(t *testing.T) {
+ // Create metadata with multiple validation issues
+ metadata := Metadata{
+ Name: "invalid name with spaces", // Invalid name
+ APIVersion: "", // Empty API version
+ Type: "", // Empty type
+ Runtime: "", // Empty runtime
+ Config: nil, // Missing config
+ RuntimeConfig: nil, // Missing runtime config
+ }
+
+ err := metadata.Validate()
+ if err == nil {
+ t.Fatal("expected validation to fail with multiple errors")
+ }
+
+ errStr := err.Error()
+
+ // Check that all expected errors are present in the joined error
+ expectedErrors := []string{
+ "invalid plugin name",
+ "empty APIVersion",
+ "empty type field",
+ "empty runtime field",
+ "missing config field",
+ "missing runtimeConfig field",
+ }
+
+ for _, expectedErr := range expectedErrors {
+ if !strings.Contains(errStr, expectedErr) {
+ t.Errorf("expected error to contain %q, but got: %v", expectedErr, errStr)
+ }
+ }
+
+ // Verify that the error contains the correct number of error messages
+ errorCount := 0
+ for _, expectedErr := range expectedErrors {
+ if strings.Contains(errStr, expectedErr) {
+ errorCount++
+ }
+ }
+
+ if errorCount < len(expectedErrors) {
+ t.Errorf("expected %d errors, but only found %d in: %v", len(expectedErrors), errorCount, errStr)
+ }
+}
diff --git a/helm/internal/plugin/metadata_v1.go b/helm/internal/plugin/metadata_v1.go
new file mode 100644
index 000000000..81dbc2e20
--- /dev/null
+++ b/helm/internal/plugin/metadata_v1.go
@@ -0,0 +1,67 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+)
+
+// MetadataV1 is the APIVersion V1 plugin.yaml format
+type MetadataV1 struct {
+ // APIVersion specifies the plugin API version
+ APIVersion string `yaml:"apiVersion"`
+
+ // Name is the name of the plugin
+ Name string `yaml:"name"`
+
+ // Type of plugin (eg, cli/v1, getter/v1, postrenderer/v1)
+ Type string `yaml:"type"`
+
+ // Runtime specifies the runtime type (subprocess, wasm)
+ Runtime string `yaml:"runtime"`
+
+ // Version is a SemVer 2 version of the plugin.
+ Version string `yaml:"version"`
+
+ // SourceURL is the URL where this plugin can be found
+ SourceURL string `yaml:"sourceURL,omitempty"`
+
+ // Config contains the type-specific configuration for this plugin
+ Config map[string]any `yaml:"config"`
+
+ // RuntimeConfig contains the runtime-specific configuration
+ RuntimeConfig map[string]any `yaml:"runtimeConfig"`
+}
+
+func (m *MetadataV1) Validate() error {
+ if !validPluginName.MatchString(m.Name) {
+ return fmt.Errorf("invalid plugin `name`")
+ }
+
+ if m.APIVersion != "v1" {
+ return fmt.Errorf("invalid `apiVersion`: %q", m.APIVersion)
+ }
+
+ if m.Type == "" {
+ return fmt.Errorf("`type` missing")
+ }
+
+ if m.Runtime == "" {
+ return fmt.Errorf("`runtime` missing")
+ }
+
+ return nil
+}
diff --git a/helm/internal/plugin/plugin.go b/helm/internal/plugin/plugin.go
new file mode 100644
index 000000000..132b1739e
--- /dev/null
+++ b/helm/internal/plugin/plugin.go
@@ -0,0 +1,81 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin // import "helm.sh/helm/v4/internal/plugin"
+
+import (
+ "context"
+ "io"
+ "regexp"
+)
+
+const PluginFileName = "plugin.yaml"
+
+// Plugin defines a plugin instance. The client (Helm codebase) facing type that can be used to introspect and invoke a plugin
+type Plugin interface {
+ // Dir return the plugin directory (as an absolute path) on the filesystem
+ Dir() string
+
+ // Metadata describes the plugin's type, version, etc.
+ // (This metadata type is the converted and plugin version independented in-memory representation of the plugin.yaml file)
+ Metadata() Metadata
+
+ // Invoke takes the given input, and dispatches the contents to plugin instance
+ // The input is expected to be a JSON-serializable object, which the plugin will interpret according to its type
+ // The plugin is expected to return a JSON-serializable object, which the invoker
+ // will interpret according to the plugin's type
+ //
+ // Invoke can be thought of as a request/response mechanism. Similar to e.g. http.RoundTripper
+ //
+ // If plugin's execution fails with a non-zero "return code" (this is plugin runtime implementation specific)
+ // an InvokeExecError is returned
+ Invoke(ctx context.Context, input *Input) (*Output, error)
+}
+
+// PluginHook allows plugins to implement hooks that are invoked on plugin management events (install, upgrade, etc)
+type PluginHook interface { //nolint:revive
+ InvokeHook(event string) error
+}
+
+// Input defines the input message and parameters to be passed to the plugin
+type Input struct {
+ // Message represents the type-elided value to be passed to the plugin.
+ // The plugin is expected to interpret the message according to its type
+ // The message object must be JSON-serializable
+ Message any
+
+ // Optional: Reader to be consumed plugin's "stdin"
+ Stdin io.Reader
+
+ // Optional: Writers to consume the plugin's "stdout" and "stderr"
+ Stdout, Stderr io.Writer
+
+ // Optional: Env represents the environment as a list of "key=value" strings
+ // see os.Environ
+ Env []string
+}
+
+// Output defines the output message and parameters the passed from the plugin
+type Output struct {
+ // Message represents the type-elided value returned from the plugin
+ // The invoker is expected to interpret the message according to the plugin's type
+ // The message object must be JSON-serializable
+ Message any
+}
+
+// validPluginName is a regular expression that validates plugin names.
+//
+// Plugin names can only contain the ASCII characters a-z, A-Z, 0-9, _ and -.
+var validPluginName = regexp.MustCompile("^[A-Za-z0-9_-]+$")
diff --git a/helm/internal/plugin/plugin_test.go b/helm/internal/plugin/plugin_test.go
new file mode 100644
index 000000000..ae0b343f3
--- /dev/null
+++ b/helm/internal/plugin/plugin_test.go
@@ -0,0 +1,100 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "testing"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func TestValidPluginName(t *testing.T) {
+ validNames := map[string]string{
+ "lowercase": "myplugin",
+ "uppercase": "MYPLUGIN",
+ "mixed case": "MyPlugin",
+ "with digits": "plugin123",
+ "with hyphen": "my-plugin",
+ "with underscore": "my_plugin",
+ "mixed chars": "my-awesome_plugin_123",
+ }
+
+ for name, pluginName := range validNames {
+ t.Run("valid/"+name, func(t *testing.T) {
+ if !validPluginName.MatchString(pluginName) {
+ t.Errorf("expected %q to match validPluginName regex", pluginName)
+ }
+ })
+ }
+
+ invalidNames := map[string]string{
+ "empty": "",
+ "space": "my plugin",
+ "colon": "plugin:",
+ "period": "my.plugin",
+ "slash": "my/plugin",
+ "dollar": "$plugin",
+ "unicode": "plügîn",
+ }
+
+ for name, pluginName := range invalidNames {
+ t.Run("invalid/"+name, func(t *testing.T) {
+ if validPluginName.MatchString(pluginName) {
+ t.Errorf("expected %q to not match validPluginName regex", pluginName)
+ }
+ })
+ }
+}
+
+func mockSubprocessCLIPlugin(t *testing.T, pluginName string) *SubprocessPluginRuntime {
+ t.Helper()
+
+ rc := RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{
+ {OperatingSystem: "darwin", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}},
+ {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"mock plugin\""}},
+ {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"mock plugin\""}},
+ },
+ PlatformHooks: map[string][]PlatformCommand{
+ Install: {
+ {OperatingSystem: "darwin", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}},
+ {OperatingSystem: "linux", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"installing...\""}},
+ {OperatingSystem: "windows", Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"installing...\""}},
+ },
+ },
+ }
+
+ pluginDir := t.TempDir()
+
+ return &SubprocessPluginRuntime{
+ metadata: Metadata{
+ Name: pluginName,
+ Version: "v0.1.2",
+ Type: "cli/v1",
+ APIVersion: "v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigCLIV1{
+ Usage: "Mock plugin",
+ ShortHelp: "Mock plugin",
+ LongHelp: "Mock plugin for testing",
+ IgnoreFlags: false,
+ },
+ RuntimeConfig: &rc,
+ },
+ pluginDir: pluginDir, // NOTE: dir is empty (ie. plugin.yaml is not present)
+ RuntimeConfig: rc,
+ }
+}
diff --git a/helm/internal/plugin/plugin_type_registry.go b/helm/internal/plugin/plugin_type_registry.go
new file mode 100644
index 000000000..5138422bd
--- /dev/null
+++ b/helm/internal/plugin/plugin_type_registry.go
@@ -0,0 +1,106 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+This file contains a "registry" of supported plugin types.
+
+It enables "dyanmic" operations on the go type associated with a given plugin type (see: `helm.sh/helm/v4/internal/plugin/schema` package)
+
+Examples:
+
+```
+
+ // Create a new instance of the output message type for a given plugin type:
+
+ pluginType := "cli/v1" // for example
+ ptm, ok := pluginTypesIndex[pluginType]
+ if !ok {
+ return fmt.Errorf("unknown plugin type %q", pluginType)
+ }
+
+ outputMessageType := reflect.Zero(ptm.outputType).Interface()
+
+```
+
+```
+// Create a new instance of the config type for a given plugin type
+
+ pluginType := "cli/v1" // for example
+ ptm, ok := pluginTypesIndex[pluginType]
+ if !ok {
+ return nil
+ }
+
+ config := reflect.New(ptm.configType).Interface().(Config) // `config` is variable of type `Config`, with
+
+ // validate
+ err := config.Validate()
+ if err != nil { // handle error }
+
+ // assert to concrete type if needed
+ cliConfig := config.(*schema.ConfigCLIV1)
+
+```
+*/
+
+package plugin
+
+import (
+ "reflect"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+type pluginTypeMeta struct {
+ pluginType string
+ inputType reflect.Type
+ outputType reflect.Type
+ configType reflect.Type
+}
+
+var pluginTypes = []pluginTypeMeta{
+ {
+ pluginType: "test/v1",
+ inputType: reflect.TypeFor[schema.InputMessageTestV1](),
+ outputType: reflect.TypeFor[schema.OutputMessageTestV1](),
+ configType: reflect.TypeFor[schema.ConfigTestV1](),
+ },
+ {
+ pluginType: "cli/v1",
+ inputType: reflect.TypeFor[schema.InputMessageCLIV1](),
+ outputType: reflect.TypeFor[schema.OutputMessageCLIV1](),
+ configType: reflect.TypeFor[schema.ConfigCLIV1](),
+ },
+ {
+ pluginType: "getter/v1",
+ inputType: reflect.TypeFor[schema.InputMessageGetterV1](),
+ outputType: reflect.TypeFor[schema.OutputMessageGetterV1](),
+ configType: reflect.TypeFor[schema.ConfigGetterV1](),
+ },
+ {
+ pluginType: "postrenderer/v1",
+ inputType: reflect.TypeFor[schema.InputMessagePostRendererV1](),
+ outputType: reflect.TypeFor[schema.OutputMessagePostRendererV1](),
+ configType: reflect.TypeFor[schema.ConfigPostRendererV1](),
+ },
+}
+
+var pluginTypesIndex = func() map[string]*pluginTypeMeta {
+ result := make(map[string]*pluginTypeMeta, len(pluginTypes))
+ for _, m := range pluginTypes {
+ result[m.pluginType] = &m
+ }
+ return result
+}()
diff --git a/helm/internal/plugin/plugin_type_registry_test.go b/helm/internal/plugin/plugin_type_registry_test.go
new file mode 100644
index 000000000..22f26262d
--- /dev/null
+++ b/helm/internal/plugin/plugin_type_registry_test.go
@@ -0,0 +1,38 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func TestMakeOutputMessage(t *testing.T) {
+ ptm := pluginTypesIndex["getter/v1"]
+ outputType := reflect.Zero(ptm.outputType).Interface()
+ assert.IsType(t, schema.OutputMessageGetterV1{}, outputType)
+
+}
+
+func TestMakeConfig(t *testing.T) {
+ ptm := pluginTypesIndex["getter/v1"]
+ config := reflect.New(ptm.configType).Interface().(Config)
+ assert.IsType(t, &schema.ConfigGetterV1{}, config)
+}
diff --git a/helm/internal/plugin/runtime.go b/helm/internal/plugin/runtime.go
new file mode 100644
index 000000000..7d39a9a43
--- /dev/null
+++ b/helm/internal/plugin/runtime.go
@@ -0,0 +1,86 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+ "strings"
+
+ "go.yaml.in/yaml/v3"
+)
+
+// Runtime represents a plugin runtime (subprocess, extism, etc) ie. how a plugin should be executed
+// Runtime is responsible for instantiating plugins that implement the runtime
+// TODO: could call this something more like "PluginRuntimeCreator"?
+type Runtime interface {
+ // CreatePlugin creates a plugin instance from the given metadata
+ CreatePlugin(pluginDir string, metadata *Metadata) (Plugin, error)
+
+ // TODO: move config unmarshalling to the runtime?
+ // UnmarshalConfig(runtimeConfigRaw map[string]any) (RuntimeConfig, error)
+}
+
+// RuntimeConfig represents the assertable type for a plugin's runtime configuration.
+// It is expected to type assert (cast) the a RuntimeConfig to its expected type
+type RuntimeConfig interface {
+ Validate() error
+}
+
+func remarshalRuntimeConfig[T RuntimeConfig](runtimeData map[string]any) (RuntimeConfig, error) {
+ data, err := yaml.Marshal(runtimeData)
+ if err != nil {
+ return nil, err
+ }
+
+ var config T
+ if err := yaml.Unmarshal(data, &config); err != nil {
+ return nil, err
+ }
+
+ return config, nil
+}
+
+// ParseEnv takes a list of "KEY=value" environment variable strings
+// and transforms the result into a map[KEY]=value
+//
+// - empty input strings are ignored
+// - input strings with no value are stored as empty strings
+// - duplicate keys overwrite earlier values
+func ParseEnv(env []string) map[string]string {
+ result := make(map[string]string, len(env))
+ for _, envVar := range env {
+ parts := strings.SplitN(envVar, "=", 2)
+ if len(parts) > 0 && parts[0] != "" {
+ key := parts[0]
+ var value string
+ if len(parts) > 1 {
+ value = parts[1]
+ }
+ result[key] = value
+ }
+ }
+ return result
+}
+
+// FormatEnv takes a map[KEY]=value and transforms it into
+// a list of "KEY=value" environment variable strings
+func FormatEnv(env map[string]string) []string {
+ result := make([]string, 0, len(env))
+ for key, value := range env {
+ result = append(result, fmt.Sprintf("%s=%s", key, value))
+ }
+ return result
+}
diff --git a/helm/internal/plugin/runtime_extismv1.go b/helm/internal/plugin/runtime_extismv1.go
new file mode 100644
index 000000000..cd9a02535
--- /dev/null
+++ b/helm/internal/plugin/runtime_extismv1.go
@@ -0,0 +1,292 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "reflect"
+
+ extism "github.com/extism/go-sdk"
+ "github.com/tetratelabs/wazero"
+)
+
+const ExtismV1WasmBinaryFilename = "plugin.wasm"
+
+// RuntimeConfigExtismV1Memory exposes the Wasm/Extism memory options for the plugin
+type RuntimeConfigExtismV1Memory struct {
+ // The max amount of pages the plugin can allocate
+ // One page is 64Kib. e.g. 16 pages would require 1MiB.
+ // Default is 4 pages (256KiB)
+ MaxPages uint32 `yaml:"maxPages,omitempty"`
+
+ // The max size of an Extism HTTP response in bytes
+ // Default is 4096 bytes (4KiB)
+ MaxHTTPResponseBytes int64 `yaml:"maxHttpResponseBytes,omitempty"`
+
+ // The max size of all Extism vars in bytes
+ // Default is 4096 bytes (4KiB)
+ MaxVarBytes int64 `yaml:"maxVarBytes,omitempty"`
+}
+
+// RuntimeConfigExtismV1FileSystem exposes filesystem options for the configuration
+// TODO: should Helm expose AllowedPaths?
+type RuntimeConfigExtismV1FileSystem struct {
+ // If specified, a temporary directory will be created and mapped to /tmp in the plugin's filesystem.
+ // Data written to the directory will be visible on the host filesystem.
+ // The directory will be removed when the plugin invocation completes.
+ CreateTempDir bool `yaml:"createTempDir,omitempty"`
+}
+
+// RuntimeConfigExtismV1 defines the user-configurable options the plugin's Extism runtime
+// The format loosely follows the Extism Manifest format: https://extism.org/docs/concepts/manifest/
+type RuntimeConfigExtismV1 struct {
+ // Describes the limits on the memory the plugin may be allocated.
+ Memory RuntimeConfigExtismV1Memory `yaml:"memory"`
+
+ // The "config" key is a free-form map that can be passed to the plugin.
+ // The plugin must interpret arbitrary data this map may contain
+ Config map[string]string `yaml:"config,omitempty"`
+
+ // An optional set of hosts this plugin can communicate with.
+ // This only has an effect if the plugin makes HTTP requests.
+ // If not specified, then no hosts are allowed.
+ AllowedHosts []string `yaml:"allowedHosts,omitempty"`
+
+ FileSystem RuntimeConfigExtismV1FileSystem `yaml:"fileSystem,omitempty"`
+
+ // The timeout in milliseconds for the plugin to execute
+ Timeout uint64 `yaml:"timeout,omitempty"`
+
+ // HostFunction names exposed in Helm the plugin may access
+ // see: https://extism.org/docs/concepts/host-functions/
+ HostFunctions []string `yaml:"hostFunctions,omitempty"`
+
+ // The name of entry function name to call in the plugin
+ // Defaults to "helm_plugin_main".
+ EntryFuncName string `yaml:"entryFuncName,omitempty"`
+}
+
+var _ RuntimeConfig = (*RuntimeConfigExtismV1)(nil)
+
+func (r *RuntimeConfigExtismV1) Validate() error {
+ // TODO
+ return nil
+}
+
+type RuntimeExtismV1 struct {
+ HostFunctions map[string]extism.HostFunction
+ CompilationCache wazero.CompilationCache
+}
+
+var _ Runtime = (*RuntimeExtismV1)(nil)
+
+func (r *RuntimeExtismV1) CreatePlugin(pluginDir string, metadata *Metadata) (Plugin, error) {
+
+ rc, ok := metadata.RuntimeConfig.(*RuntimeConfigExtismV1)
+ if !ok {
+ return nil, fmt.Errorf("invalid extism/v1 plugin runtime config type: %T", metadata.RuntimeConfig)
+ }
+
+ wasmFile := filepath.Join(pluginDir, ExtismV1WasmBinaryFilename)
+ if _, err := os.Stat(wasmFile); err != nil {
+ if os.IsNotExist(err) {
+ return nil, fmt.Errorf("wasm binary missing for extism/v1 plugin: %q", wasmFile)
+ }
+ return nil, fmt.Errorf("failed to stat extism/v1 plugin wasm binary %q: %w", wasmFile, err)
+ }
+
+ return &ExtismV1PluginRuntime{
+ metadata: *metadata,
+ dir: pluginDir,
+ rc: rc,
+ r: r,
+ }, nil
+}
+
+type ExtismV1PluginRuntime struct {
+ metadata Metadata
+ dir string
+ rc *RuntimeConfigExtismV1
+ r *RuntimeExtismV1
+}
+
+var _ Plugin = (*ExtismV1PluginRuntime)(nil)
+
+func (p *ExtismV1PluginRuntime) Metadata() Metadata {
+ return p.metadata
+}
+
+func (p *ExtismV1PluginRuntime) Dir() string {
+ return p.dir
+}
+
+func (p *ExtismV1PluginRuntime) Invoke(ctx context.Context, input *Input) (*Output, error) {
+
+ var tmpDir string
+ if p.rc.FileSystem.CreateTempDir {
+ tmpDirInner, err := os.MkdirTemp(os.TempDir(), "helm-plugin-*")
+ slog.Debug("created plugin temp dir", slog.String("dir", tmpDirInner), slog.String("plugin", p.metadata.Name))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create temp dir for extism compilation cache: %w", err)
+ }
+ defer func() {
+ if err := os.RemoveAll(tmpDir); err != nil {
+ slog.Warn("failed to remove plugin temp dir", slog.String("dir", tmpDir), slog.String("plugin", p.metadata.Name), slog.String("error", err.Error()))
+ }
+ }()
+
+ tmpDir = tmpDirInner
+ }
+
+ manifest, err := buildManifest(p.dir, tmpDir, p.rc)
+ if err != nil {
+ return nil, err
+ }
+
+ config := buildPluginConfig(input, p.r)
+
+ hostFunctions, err := buildHostFunctions(p.r.HostFunctions, p.rc)
+ if err != nil {
+ return nil, err
+ }
+
+ pe, err := extism.NewPlugin(ctx, manifest, config, hostFunctions)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create existing plugin: %w", err)
+ }
+
+ pe.SetLogger(func(logLevel extism.LogLevel, s string) {
+ slog.Debug(s, slog.String("level", logLevel.String()), slog.String("plugin", p.metadata.Name))
+ })
+
+ inputData, err := json.Marshal(input.Message)
+ if err != nil {
+ return nil, fmt.Errorf("failed to json marshal plugin input message: %T: %w", input.Message, err)
+ }
+
+ slog.Debug("plugin input", slog.String("plugin", p.metadata.Name), slog.String("inputData", string(inputData)))
+
+ entryFuncName := p.rc.EntryFuncName
+ if entryFuncName == "" {
+ entryFuncName = "helm_plugin_main"
+ }
+
+ exitCode, outputData, err := pe.Call(entryFuncName, inputData)
+ if err != nil {
+ return nil, fmt.Errorf("plugin error: %w", err)
+ }
+
+ if exitCode != 0 {
+ return nil, &InvokeExecError{
+ ExitCode: int(exitCode),
+ }
+ }
+
+ slog.Debug("plugin output", slog.String("plugin", p.metadata.Name), slog.Int("exitCode", int(exitCode)), slog.String("outputData", string(outputData)))
+
+ outputMessage := reflect.New(pluginTypesIndex[p.metadata.Type].outputType)
+ if err := json.Unmarshal(outputData, outputMessage.Interface()); err != nil {
+ return nil, fmt.Errorf("failed to json marshal plugin output message: %T: %w", outputMessage, err)
+ }
+
+ output := &Output{
+ Message: outputMessage.Elem().Interface(),
+ }
+
+ return output, nil
+}
+
+func buildManifest(pluginDir string, tmpDir string, rc *RuntimeConfigExtismV1) (extism.Manifest, error) {
+ wasmFile := filepath.Join(pluginDir, ExtismV1WasmBinaryFilename)
+
+ allowedHosts := rc.AllowedHosts
+ if allowedHosts == nil {
+ allowedHosts = []string{}
+ }
+
+ allowedPaths := map[string]string{}
+ if tmpDir != "" {
+ allowedPaths[tmpDir] = "/tmp"
+ }
+
+ return extism.Manifest{
+ Wasm: []extism.Wasm{
+ extism.WasmFile{
+ Path: wasmFile,
+ Name: wasmFile,
+ },
+ },
+ Memory: &extism.ManifestMemory{
+ MaxPages: rc.Memory.MaxPages,
+ MaxHttpResponseBytes: rc.Memory.MaxHTTPResponseBytes,
+ MaxVarBytes: rc.Memory.MaxVarBytes,
+ },
+ Config: rc.Config,
+ AllowedHosts: allowedHosts,
+ AllowedPaths: allowedPaths,
+ Timeout: rc.Timeout,
+ }, nil
+}
+
+func buildPluginConfig(input *Input, r *RuntimeExtismV1) extism.PluginConfig {
+ mc := wazero.NewModuleConfig().
+ WithSysWalltime()
+ if input.Stdin != nil {
+ mc = mc.WithStdin(input.Stdin)
+ }
+ if input.Stdout != nil {
+ mc = mc.WithStdout(input.Stdout)
+ }
+ if input.Stderr != nil {
+ mc = mc.WithStderr(input.Stderr)
+ }
+ if len(input.Env) > 0 {
+ env := ParseEnv(input.Env)
+ for k, v := range env {
+ mc = mc.WithEnv(k, v)
+ }
+ }
+
+ config := extism.PluginConfig{
+ ModuleConfig: mc,
+ RuntimeConfig: wazero.NewRuntimeConfigCompiler().
+ WithCloseOnContextDone(true).
+ WithCompilationCache(r.CompilationCache),
+ EnableWasi: true,
+ EnableHttpResponseHeaders: true,
+ }
+
+ return config
+}
+
+func buildHostFunctions(hostFunctions map[string]extism.HostFunction, rc *RuntimeConfigExtismV1) ([]extism.HostFunction, error) {
+ result := make([]extism.HostFunction, len(rc.HostFunctions))
+ for _, fnName := range rc.HostFunctions {
+ fn, ok := hostFunctions[fnName]
+ if !ok {
+ return nil, fmt.Errorf("plugin requested host function %q not found", fnName)
+ }
+
+ result = append(result, fn)
+ }
+
+ return result, nil
+}
diff --git a/helm/internal/plugin/runtime_extismv1_test.go b/helm/internal/plugin/runtime_extismv1_test.go
new file mode 100644
index 000000000..8d9c55195
--- /dev/null
+++ b/helm/internal/plugin/runtime_extismv1_test.go
@@ -0,0 +1,124 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+
+ extism "github.com/extism/go-sdk"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type pluginRaw struct {
+ Metadata Metadata
+ Dir string
+}
+
+func buildLoadExtismPlugin(t *testing.T, dir string) pluginRaw {
+ t.Helper()
+
+ pluginFile := filepath.Join(dir, PluginFileName)
+
+ metadataData, err := os.ReadFile(pluginFile)
+ require.NoError(t, err)
+
+ m, err := loadMetadata(metadataData)
+ require.NoError(t, err)
+ require.Equal(t, "extism/v1", m.Runtime, "expected plugin runtime to be extism/v1")
+
+ cmd := exec.Command("make", "-C", dir)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ require.NoError(t, cmd.Run(), "failed to build plugin in %q", dir)
+
+ return pluginRaw{
+ Metadata: *m,
+ Dir: dir,
+ }
+}
+
+func TestRuntimeConfigExtismV1Validate(t *testing.T) {
+ rc := RuntimeConfigExtismV1{}
+ err := rc.Validate()
+ assert.NoError(t, err, "expected no error for empty RuntimeConfigExtismV1")
+}
+
+func TestRuntimeExtismV1InvokePlugin(t *testing.T) {
+ r := RuntimeExtismV1{}
+
+ pr := buildLoadExtismPlugin(t, "testdata/src/extismv1-test")
+ require.Equal(t, "test/v1", pr.Metadata.Type)
+
+ p, err := r.CreatePlugin(pr.Dir, &pr.Metadata)
+
+ assert.NoError(t, err, "expected no error creating plugin")
+ assert.NotNil(t, p, "expected plugin to be created")
+
+ output, err := p.Invoke(t.Context(), &Input{
+ Message: schema.InputMessageTestV1{
+ Name: "Phippy",
+ },
+ })
+ require.Nil(t, err)
+
+ msg := output.Message.(schema.OutputMessageTestV1)
+ assert.Equal(t, "Hello, Phippy! (6)", msg.Greeting)
+}
+
+func TestBuildManifest(t *testing.T) {
+ rc := &RuntimeConfigExtismV1{
+ Memory: RuntimeConfigExtismV1Memory{
+ MaxPages: 8,
+ MaxHTTPResponseBytes: 81920,
+ MaxVarBytes: 8192,
+ },
+ FileSystem: RuntimeConfigExtismV1FileSystem{
+ CreateTempDir: true,
+ },
+ Config: map[string]string{"CONFIG_KEY": "config_value"},
+ AllowedHosts: []string{"example.com", "api.example.com"},
+ Timeout: 5000,
+ }
+
+ expected := extism.Manifest{
+ Wasm: []extism.Wasm{
+ extism.WasmFile{
+ Path: "/path/to/plugin/plugin.wasm",
+ Name: "/path/to/plugin/plugin.wasm",
+ },
+ },
+ Memory: &extism.ManifestMemory{
+ MaxPages: 8,
+ MaxHttpResponseBytes: 81920,
+ MaxVarBytes: 8192,
+ },
+ Config: map[string]string{"CONFIG_KEY": "config_value"},
+ AllowedHosts: []string{"example.com", "api.example.com"},
+ AllowedPaths: map[string]string{"/tmp/foo": "/tmp"},
+ Timeout: 5000,
+ }
+
+ manifest, err := buildManifest("/path/to/plugin", "/tmp/foo", rc)
+ require.NoError(t, err)
+ assert.Equal(t, expected, manifest)
+}
diff --git a/helm/internal/plugin/runtime_subprocess.go b/helm/internal/plugin/runtime_subprocess.go
new file mode 100644
index 000000000..c836c1c6d
--- /dev/null
+++ b/helm/internal/plugin/runtime_subprocess.go
@@ -0,0 +1,278 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "log/slog"
+ "maps"
+ "os"
+ "os/exec"
+ "slices"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+// SubprocessProtocolCommand maps a given protocol to the getter command used to retrieve artifacts for that protocol
+type SubprocessProtocolCommand struct {
+ // Protocols are the list of schemes from the charts URL.
+ Protocols []string `yaml:"protocols"`
+ // PlatformCommand is the platform based command which the plugin performs
+ // to download for the corresponding getter Protocols.
+ PlatformCommand []PlatformCommand `yaml:"platformCommand"`
+}
+
+// RuntimeConfigSubprocess implements RuntimeConfig for RuntimeSubprocess
+type RuntimeConfigSubprocess struct {
+ // PlatformCommand is a list containing a plugin command, with a platform selector and support for args.
+ PlatformCommand []PlatformCommand `yaml:"platformCommand"`
+ // PlatformHooks are commands that will run on plugin events, with a platform selector and support for args.
+ PlatformHooks PlatformHooks `yaml:"platformHooks"`
+ // ProtocolCommands allows the plugin to specify protocol specific commands
+ //
+ // Obsolete/deprecated: This is a compatibility hangover from the old plugin downloader mechanism, which was extended
+ // to support multiple protocols in a given plugin. The command supplied in PlatformCommand should implement protocol
+ // specific logic by inspecting the download URL
+ ProtocolCommands []SubprocessProtocolCommand `yaml:"protocolCommands,omitempty"`
+
+ expandHookArgs bool
+}
+
+var _ RuntimeConfig = (*RuntimeConfigSubprocess)(nil)
+
+func (r *RuntimeConfigSubprocess) GetType() string { return "subprocess" }
+
+func (r *RuntimeConfigSubprocess) Validate() error {
+ return nil
+}
+
+type RuntimeSubprocess struct {
+ EnvVars map[string]string
+}
+
+var _ Runtime = (*RuntimeSubprocess)(nil)
+
+// CreatePlugin implementation for Runtime
+func (r *RuntimeSubprocess) CreatePlugin(pluginDir string, metadata *Metadata) (Plugin, error) {
+ return &SubprocessPluginRuntime{
+ metadata: *metadata,
+ pluginDir: pluginDir,
+ RuntimeConfig: *(metadata.RuntimeConfig.(*RuntimeConfigSubprocess)),
+ EnvVars: maps.Clone(r.EnvVars),
+ }, nil
+}
+
+// SubprocessPluginRuntime implements the Plugin interface for subprocess execution
+type SubprocessPluginRuntime struct {
+ metadata Metadata
+ pluginDir string
+ RuntimeConfig RuntimeConfigSubprocess
+ EnvVars map[string]string
+}
+
+var _ Plugin = (*SubprocessPluginRuntime)(nil)
+
+func (r *SubprocessPluginRuntime) Dir() string {
+ return r.pluginDir
+}
+
+func (r *SubprocessPluginRuntime) Metadata() Metadata {
+ return r.metadata
+}
+
+func (r *SubprocessPluginRuntime) Invoke(_ context.Context, input *Input) (*Output, error) {
+ switch input.Message.(type) {
+ case schema.InputMessageCLIV1:
+ return r.runCLI(input)
+ case schema.InputMessageGetterV1:
+ return r.runGetter(input)
+ case schema.InputMessagePostRendererV1:
+ return r.runPostrenderer(input)
+ default:
+ return nil, fmt.Errorf("unsupported subprocess plugin type %q", r.metadata.Type)
+ }
+}
+
+// InvokeWithEnv executes a plugin command with custom environment and I/O streams
+// This method allows execution with different command/args than the plugin's default
+func (r *SubprocessPluginRuntime) InvokeWithEnv(main string, argv []string, env []string, stdin io.Reader, stdout, stderr io.Writer) error {
+ mainCmdExp := os.ExpandEnv(main)
+ cmd := exec.Command(mainCmdExp, argv...)
+ cmd.Env = slices.Clone(os.Environ())
+ cmd.Env = append(
+ cmd.Env,
+ fmt.Sprintf("HELM_PLUGIN_NAME=%s", r.metadata.Name),
+ fmt.Sprintf("HELM_PLUGIN_DIR=%s", r.pluginDir))
+ cmd.Env = append(cmd.Env, env...)
+
+ cmd.Stdin = stdin
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+
+ if err := executeCmd(cmd, r.metadata.Name); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SubprocessPluginRuntime) InvokeHook(event string) error {
+ cmds := r.RuntimeConfig.PlatformHooks[event]
+
+ if len(cmds) == 0 {
+ return nil
+ }
+
+ env := ParseEnv(os.Environ())
+ maps.Insert(env, maps.All(r.EnvVars))
+ env["HELM_PLUGIN_NAME"] = r.metadata.Name
+ env["HELM_PLUGIN_DIR"] = r.pluginDir
+
+ main, argv, err := PrepareCommands(cmds, r.RuntimeConfig.expandHookArgs, []string{}, env)
+ if err != nil {
+ return err
+ }
+
+ cmd := exec.Command(main, argv...)
+ cmd.Env = FormatEnv(env)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+
+ slog.Debug("executing plugin hook command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String()))
+ if err := cmd.Run(); err != nil {
+ if eerr, ok := err.(*exec.ExitError); ok {
+ os.Stderr.Write(eerr.Stderr)
+ return fmt.Errorf("plugin %s hook for %q exited with error", event, r.metadata.Name)
+ }
+ return err
+ }
+ return nil
+}
+
+// TODO decide the best way to handle this code
+// right now we implement status and error return in 3 slightly different ways in this file
+// then replace the other three with a call to this func
+func executeCmd(prog *exec.Cmd, pluginName string) error {
+ if err := prog.Run(); err != nil {
+ if eerr, ok := err.(*exec.ExitError); ok {
+ slog.Debug(
+ "plugin execution failed",
+ slog.String("pluginName", pluginName),
+ slog.String("error", err.Error()),
+ slog.Int("exitCode", eerr.ExitCode()),
+ slog.String("stderr", string(bytes.TrimSpace(eerr.Stderr))))
+ return &InvokeExecError{
+ Err: fmt.Errorf("plugin %q exited with error", pluginName),
+ ExitCode: eerr.ExitCode(),
+ }
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+func (r *SubprocessPluginRuntime) runCLI(input *Input) (*Output, error) {
+ if _, ok := input.Message.(schema.InputMessageCLIV1); !ok {
+ return nil, fmt.Errorf("plugin %q input message does not implement InputMessageCLIV1", r.metadata.Name)
+ }
+
+ extraArgs := input.Message.(schema.InputMessageCLIV1).ExtraArgs
+
+ cmds := r.RuntimeConfig.PlatformCommand
+
+ env := ParseEnv(os.Environ())
+ maps.Insert(env, maps.All(r.EnvVars))
+ maps.Insert(env, maps.All(ParseEnv(input.Env)))
+ env["HELM_PLUGIN_NAME"] = r.metadata.Name
+ env["HELM_PLUGIN_DIR"] = r.pluginDir
+
+ command, args, err := PrepareCommands(cmds, true, extraArgs, env)
+ if err != nil {
+ return nil, fmt.Errorf("failed to prepare plugin command: %w", err)
+ }
+
+ cmd := exec.Command(command, args...)
+ cmd.Env = FormatEnv(env)
+
+ cmd.Stdin = input.Stdin
+ cmd.Stdout = input.Stdout
+ cmd.Stderr = input.Stderr
+
+ slog.Debug("executing plugin command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String()))
+ if err := executeCmd(cmd, r.metadata.Name); err != nil {
+ return nil, err
+ }
+
+ return &Output{
+ Message: schema.OutputMessageCLIV1{},
+ }, nil
+}
+
+func (r *SubprocessPluginRuntime) runPostrenderer(input *Input) (*Output, error) {
+ if _, ok := input.Message.(schema.InputMessagePostRendererV1); !ok {
+ return nil, fmt.Errorf("plugin %q input message does not implement InputMessagePostRendererV1", r.metadata.Name)
+ }
+
+ env := ParseEnv(os.Environ())
+ maps.Insert(env, maps.All(r.EnvVars))
+ maps.Insert(env, maps.All(ParseEnv(input.Env)))
+ env["HELM_PLUGIN_NAME"] = r.metadata.Name
+ env["HELM_PLUGIN_DIR"] = r.pluginDir
+
+ msg := input.Message.(schema.InputMessagePostRendererV1)
+ cmds := r.RuntimeConfig.PlatformCommand
+ command, args, err := PrepareCommands(cmds, true, msg.ExtraArgs, env)
+ if err != nil {
+ return nil, fmt.Errorf("failed to prepare plugin command: %w", err)
+ }
+
+ cmd := exec.Command(
+ command,
+ args...)
+
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ defer stdin.Close()
+ io.Copy(stdin, msg.Manifests)
+ }()
+
+ postRendered := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+
+ cmd.Env = FormatEnv(env)
+ cmd.Stdout = postRendered
+ cmd.Stderr = stderr
+
+ slog.Debug("executing plugin command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String()))
+ if err := executeCmd(cmd, r.metadata.Name); err != nil {
+ return nil, err
+ }
+
+ return &Output{
+ Message: schema.OutputMessagePostRendererV1{
+ Manifests: postRendered,
+ },
+ }, nil
+}
diff --git a/helm/internal/plugin/runtime_subprocess_getter.go b/helm/internal/plugin/runtime_subprocess_getter.go
new file mode 100644
index 000000000..fa6f470a9
--- /dev/null
+++ b/helm/internal/plugin/runtime_subprocess_getter.go
@@ -0,0 +1,100 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "bytes"
+ "fmt"
+ "log/slog"
+ "maps"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "slices"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func getProtocolCommand(commands []SubprocessProtocolCommand, protocol string) *SubprocessProtocolCommand {
+ for _, c := range commands {
+ if slices.Contains(c.Protocols, protocol) {
+ return &c
+ }
+ }
+
+ return nil
+}
+
+// TODO can we replace a lot of this func with RuntimeSubprocess.invokeWithEnv?
+func (r *SubprocessPluginRuntime) runGetter(input *Input) (*Output, error) {
+ msg, ok := (input.Message).(schema.InputMessageGetterV1)
+ if !ok {
+ return nil, fmt.Errorf("expected input type schema.InputMessageGetterV1, got %T", input)
+ }
+
+ tmpDir, err := os.MkdirTemp(os.TempDir(), fmt.Sprintf("helm-plugin-%s-", r.metadata.Name))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create temporary directory: %w", err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ d := getProtocolCommand(r.RuntimeConfig.ProtocolCommands, msg.Protocol)
+ if d == nil {
+ return nil, fmt.Errorf("no downloader found for protocol %q", msg.Protocol)
+ }
+
+ env := ParseEnv(os.Environ())
+ maps.Insert(env, maps.All(r.EnvVars))
+ maps.Insert(env, maps.All(ParseEnv(input.Env)))
+ env["HELM_PLUGIN_NAME"] = r.metadata.Name
+ env["HELM_PLUGIN_DIR"] = r.pluginDir
+ env["HELM_PLUGIN_USERNAME"] = msg.Options.Username
+ env["HELM_PLUGIN_PASSWORD"] = msg.Options.Password
+ env["HELM_PLUGIN_PASS_CREDENTIALS_ALL"] = fmt.Sprintf("%t", msg.Options.PassCredentialsAll)
+
+ command, args, err := PrepareCommands(d.PlatformCommand, false, []string{}, env)
+ if err != nil {
+ return nil, fmt.Errorf("failed to prepare commands for protocol %q: %w", msg.Protocol, err)
+ }
+
+ args = append(
+ args,
+ msg.Options.CertFile,
+ msg.Options.KeyFile,
+ msg.Options.CAFile,
+ msg.Href)
+
+ buf := bytes.Buffer{} // subprocess getters are expected to write content to stdout
+
+ pluginCommand := filepath.Join(r.pluginDir, command)
+ cmd := exec.Command(
+ pluginCommand,
+ args...)
+ cmd.Env = FormatEnv(env)
+ cmd.Stdout = &buf
+ cmd.Stderr = os.Stderr
+
+ slog.Debug("executing plugin command", slog.String("pluginName", r.metadata.Name), slog.String("command", cmd.String()))
+ if err := executeCmd(cmd, r.metadata.Name); err != nil {
+ return nil, err
+ }
+
+ return &Output{
+ Message: schema.OutputMessageGetterV1{
+ Data: buf.Bytes(),
+ },
+ }, nil
+}
diff --git a/helm/internal/plugin/runtime_subprocess_hooks.go b/helm/internal/plugin/runtime_subprocess_hooks.go
new file mode 100644
index 000000000..7b4ff5a38
--- /dev/null
+++ b/helm/internal/plugin/runtime_subprocess_hooks.go
@@ -0,0 +1,32 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin // import "helm.sh/helm/v4/internal/plugin"
+
+// Types of hooks
+const (
+ // Install is executed after the plugin is added.
+ Install = "install"
+ // Delete is executed after the plugin is removed.
+ Delete = "delete"
+ // Update is executed after the plugin is updated.
+ Update = "update"
+)
+
+// PlatformHooks is a map of events to a command for a particular operating system and architecture.
+type PlatformHooks map[string][]PlatformCommand
+
+// Hooks is a map of events to commands.
+type Hooks map[string]string
diff --git a/helm/internal/plugin/runtime_subprocess_test.go b/helm/internal/plugin/runtime_subprocess_test.go
new file mode 100644
index 000000000..ed251d28b
--- /dev/null
+++ b/helm/internal/plugin/runtime_subprocess_test.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.yaml.in/yaml/v3"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func mockSubprocessCLIPluginErrorExit(t *testing.T, pluginName string, exitCode uint8) *SubprocessPluginRuntime {
+ t.Helper()
+
+ rc := RuntimeConfigSubprocess{
+ PlatformCommand: []PlatformCommand{
+ {Command: "sh", Args: []string{"-c", fmt.Sprintf("echo \"mock plugin $@\"; exit %d", exitCode)}},
+ },
+ }
+
+ pluginDir := t.TempDir()
+
+ md := Metadata{
+ Name: pluginName,
+ Version: "v0.1.2",
+ Type: "cli/v1",
+ APIVersion: "v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigCLIV1{
+ Usage: "Mock plugin",
+ ShortHelp: "Mock plugin",
+ LongHelp: "Mock plugin for testing",
+ IgnoreFlags: false,
+ },
+ RuntimeConfig: &rc,
+ }
+
+ data, err := yaml.Marshal(md)
+ require.NoError(t, err)
+ os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), data, 0o644)
+
+ return &SubprocessPluginRuntime{
+ metadata: md,
+ pluginDir: pluginDir,
+ RuntimeConfig: rc,
+ }
+}
+
+func TestSubprocessPluginRuntime(t *testing.T) {
+ p := mockSubprocessCLIPluginErrorExit(t, "foo", 56)
+
+ output, err := p.Invoke(t.Context(), &Input{
+ Message: schema.InputMessageCLIV1{
+ ExtraArgs: []string{"arg1", "arg2"},
+ // Env: []string{"FOO=bar"},
+ },
+ })
+
+ require.Error(t, err)
+ ieerr, ok := err.(*InvokeExecError)
+ require.True(t, ok, "expected InvokeExecError, got %T", err)
+ assert.Equal(t, 56, ieerr.ExitCode)
+
+ assert.Nil(t, output)
+}
diff --git a/helm/internal/plugin/runtime_test.go b/helm/internal/plugin/runtime_test.go
new file mode 100644
index 000000000..5552af08e
--- /dev/null
+++ b/helm/internal/plugin/runtime_test.go
@@ -0,0 +1,100 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseEnv(t *testing.T) {
+ type testCase struct {
+ env []string
+ expected map[string]string
+ }
+
+ testCases := map[string]testCase{
+ "empty": {
+ env: []string{},
+ expected: map[string]string{},
+ },
+ "single": {
+ env: []string{"KEY=value"},
+ expected: map[string]string{"KEY": "value"},
+ },
+ "multiple": {
+ env: []string{"KEY1=value1", "KEY2=value2"},
+ expected: map[string]string{"KEY1": "value1", "KEY2": "value2"},
+ },
+ "no_value": {
+ env: []string{"KEY1=value1", "KEY2="},
+ expected: map[string]string{"KEY1": "value1", "KEY2": ""},
+ },
+ "duplicate_keys": {
+ env: []string{"KEY=value1", "KEY=value2"},
+ expected: map[string]string{"KEY": "value2"}, // last value should overwrite
+ },
+ "empty_strings": {
+ env: []string{"", "KEY=value", ""},
+ expected: map[string]string{"KEY": "value"},
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ result := ParseEnv(tc.env)
+ assert.Equal(t, tc.expected, result)
+ })
+ }
+}
+
+func TestFormatEnv(t *testing.T) {
+ type testCase struct {
+ env map[string]string
+ expected []string
+ }
+
+ testCases := map[string]testCase{
+ "empty": {
+ env: map[string]string{},
+ expected: []string{},
+ },
+ "single": {
+ env: map[string]string{"KEY": "value"},
+ expected: []string{"KEY=value"},
+ },
+ "multiple": {
+ env: map[string]string{"KEY1": "value1", "KEY2": "value2"},
+ expected: []string{"KEY1=value1", "KEY2=value2"},
+ },
+ "empty_key": {
+ env: map[string]string{"": "value1", "KEY2": "value2"},
+ expected: []string{"=value1", "KEY2=value2"},
+ },
+ "empty_value": {
+ env: map[string]string{"KEY1": "value1", "KEY2": "", "KEY3": "value3"},
+ expected: []string{"KEY1=value1", "KEY2=", "KEY3=value3"},
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ result := FormatEnv(tc.env)
+ assert.ElementsMatch(t, tc.expected, result)
+ })
+ }
+}
diff --git a/helm/internal/plugin/schema/cli.go b/helm/internal/plugin/schema/cli.go
new file mode 100644
index 000000000..2282580f5
--- /dev/null
+++ b/helm/internal/plugin/schema/cli.go
@@ -0,0 +1,45 @@
+/*
+ Copyright The Helm Authors.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package schema
+
+import (
+ "bytes"
+)
+
+type InputMessageCLIV1 struct {
+ ExtraArgs []string `json:"extraArgs"`
+}
+
+type OutputMessageCLIV1 struct {
+ Data *bytes.Buffer `json:"data"`
+}
+
+// ConfigCLIV1 represents the configuration for CLI plugins
+type ConfigCLIV1 struct {
+ // Usage is the single-line usage text shown in help
+ // For recommended syntax, see [spf13/cobra.command.Command] Use field comment:
+ // https://pkg.go.dev/github.com/spf13/cobra#Command
+ Usage string `yaml:"usage"`
+ // ShortHelp is the short description shown in the 'helm help' output
+ ShortHelp string `yaml:"shortHelp"`
+ // LongHelp is the long message shown in the 'helm help ' output
+ LongHelp string `yaml:"longHelp"`
+ // IgnoreFlags ignores any flags passed in from Helm
+ IgnoreFlags bool `yaml:"ignoreFlags"`
+}
+
+func (c *ConfigCLIV1) Validate() error {
+ // Config validation for CLI plugins
+ return nil
+}
diff --git a/helm/internal/plugin/schema/doc.go b/helm/internal/plugin/schema/doc.go
new file mode 100644
index 000000000..4b3fe5d49
--- /dev/null
+++ b/helm/internal/plugin/schema/doc.go
@@ -0,0 +1,18 @@
+/*
+ Copyright The Helm Authors.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+
+ */
+
+package schema
diff --git a/helm/internal/plugin/schema/getter.go b/helm/internal/plugin/schema/getter.go
new file mode 100644
index 000000000..2c5e81df1
--- /dev/null
+++ b/helm/internal/plugin/schema/getter.go
@@ -0,0 +1,66 @@
+/*
+ Copyright The Helm Authors.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package schema
+
+import (
+ "fmt"
+ "time"
+)
+
+// TODO: can we generate these plugin input/output messages?
+
+type GetterOptionsV1 struct {
+ URL string
+ CertFile string
+ KeyFile string
+ CAFile string
+ UNTar bool
+ InsecureSkipVerifyTLS bool
+ PlainHTTP bool
+ AcceptHeader string
+ Username string
+ Password string
+ PassCredentialsAll bool
+ UserAgent string
+ Version string
+ Timeout time.Duration
+}
+
+type InputMessageGetterV1 struct {
+ Href string `json:"href"`
+ Protocol string `json:"protocol"`
+ Options GetterOptionsV1 `json:"options"`
+}
+
+type OutputMessageGetterV1 struct {
+ Data []byte `json:"data"`
+}
+
+// ConfigGetterV1 represents the configuration for download plugins
+type ConfigGetterV1 struct {
+ // Protocols are the list of URL schemes supported by this downloader
+ Protocols []string `yaml:"protocols"`
+}
+
+func (c *ConfigGetterV1) Validate() error {
+ if len(c.Protocols) == 0 {
+ return fmt.Errorf("getter has no protocols")
+ }
+ for i, protocol := range c.Protocols {
+ if protocol == "" {
+ return fmt.Errorf("getter has empty protocol at index %d", i)
+ }
+ }
+ return nil
+}
diff --git a/helm/internal/plugin/schema/postrenderer.go b/helm/internal/plugin/schema/postrenderer.go
new file mode 100644
index 000000000..ef51a8a61
--- /dev/null
+++ b/helm/internal/plugin/schema/postrenderer.go
@@ -0,0 +1,38 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+ "bytes"
+)
+
+// InputMessagePostRendererV1 implements Input.Message
+type InputMessagePostRendererV1 struct {
+ Manifests *bytes.Buffer `json:"manifests"`
+ // from CLI --post-renderer-args
+ ExtraArgs []string `json:"extraArgs"`
+}
+
+type OutputMessagePostRendererV1 struct {
+ Manifests *bytes.Buffer `json:"manifests"`
+}
+
+type ConfigPostRendererV1 struct{}
+
+func (c *ConfigPostRendererV1) Validate() error {
+ return nil
+}
diff --git a/helm/internal/plugin/schema/test.go b/helm/internal/plugin/schema/test.go
new file mode 100644
index 000000000..97efa0fde
--- /dev/null
+++ b/helm/internal/plugin/schema/test.go
@@ -0,0 +1,28 @@
+/*
+ Copyright The Helm Authors.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package schema
+
+type InputMessageTestV1 struct {
+ Name string
+}
+
+type OutputMessageTestV1 struct {
+ Greeting string
+}
+
+type ConfigTestV1 struct{}
+
+func (c *ConfigTestV1) Validate() error {
+ return nil
+}
diff --git a/helm/internal/plugin/sign.go b/helm/internal/plugin/sign.go
new file mode 100644
index 000000000..6ddf113a2
--- /dev/null
+++ b/helm/internal/plugin/sign.go
@@ -0,0 +1,156 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+// SignPlugin signs a plugin using the SHA256 hash of the tarball data.
+//
+// This is used when packaging and signing a plugin from tarball data.
+// It creates a signature that includes the tarball hash and plugin metadata,
+// allowing verification of the original tarball later.
+func SignPlugin(tarballData []byte, filename string, signer *provenance.Signatory) (string, error) {
+ // Extract plugin metadata from tarball data
+ pluginMeta, err := ExtractTgzPluginMetadata(bytes.NewReader(tarballData))
+ if err != nil {
+ return "", fmt.Errorf("failed to extract plugin metadata: %w", err)
+ }
+
+ // Marshal plugin metadata to YAML bytes
+ metadataBytes, err := yaml.Marshal(pluginMeta)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal plugin metadata: %w", err)
+ }
+
+ // Use the generic provenance signing function
+ return signer.ClearSign(tarballData, filename, metadataBytes)
+}
+
+// ExtractTgzPluginMetadata extracts plugin metadata from a gzipped tarball reader
+func ExtractTgzPluginMetadata(r io.Reader) (*Metadata, error) {
+ gzr, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, err
+ }
+ defer gzr.Close()
+
+ tr := tar.NewReader(gzr)
+ for {
+ header, err := tr.Next()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Look for plugin.yaml file
+ if filepath.Base(header.Name) == "plugin.yaml" {
+ data, err := io.ReadAll(tr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Parse the plugin metadata
+ metadata, err := loadMetadata(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return metadata, nil
+ }
+ }
+
+ return nil, errors.New("plugin.yaml not found in tarball")
+}
+
+// parsePluginMessageBlock parses a signed message block to extract plugin metadata and checksums
+func parsePluginMessageBlock(data []byte) (*Metadata, *provenance.SumCollection, error) {
+ sc := &provenance.SumCollection{}
+
+ // We only need the checksums for verification, not the full metadata
+ if err := provenance.ParseMessageBlock(data, nil, sc); err != nil {
+ return nil, sc, err
+ }
+ return nil, sc, nil
+}
+
+// CreatePluginTarball creates a gzipped tarball from a plugin directory
+func CreatePluginTarball(sourceDir, pluginName string, w io.Writer) error {
+ gzw := gzip.NewWriter(w)
+ defer gzw.Close()
+
+ tw := tar.NewWriter(gzw)
+ defer tw.Close()
+
+ // Use the plugin name as the base directory in the tarball
+ baseDir := pluginName
+
+ // Walk the directory tree
+ return filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Create header
+ header, err := tar.FileInfoHeader(info, "")
+ if err != nil {
+ return err
+ }
+
+ // Update the name to be relative to the source directory
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // Include the base directory name in the tarball
+ header.Name = filepath.Join(baseDir, relPath)
+
+ // Write header
+ if err := tw.WriteHeader(header); err != nil {
+ return err
+ }
+
+ // If it's a regular file, write its content
+ if info.Mode().IsRegular() {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ if _, err := io.Copy(tw, file); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ })
+}
diff --git a/helm/internal/plugin/sign_test.go b/helm/internal/plugin/sign_test.go
new file mode 100644
index 000000000..fce2dbeb3
--- /dev/null
+++ b/helm/internal/plugin/sign_test.go
@@ -0,0 +1,98 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+func TestSignPlugin(t *testing.T) {
+ // Create a test plugin directory
+ tempDir := t.TempDir()
+ pluginDir := filepath.Join(tempDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a plugin.yaml file
+ pluginYAML := `apiVersion: v1
+name: test-plugin
+type: cli/v1
+runtime: subprocess
+version: 1.0.0
+runtimeConfig:
+ platformCommand:
+ - command: echo`
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(pluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a tarball
+ tarballPath := filepath.Join(tempDir, "test-plugin.tgz")
+ tarFile, err := os.Create(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil {
+ tarFile.Close()
+ t.Fatal(err)
+ }
+ tarFile.Close()
+
+ // Create a test key for signing
+ keyring := "../../pkg/cmd/testdata/helm-test-key.secret"
+ signer, err := provenance.NewFromKeyring(keyring, "helm-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := signer.DecryptKey(func(_ string) ([]byte, error) {
+ return []byte(""), nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the tarball data
+ tarballData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ t.Fatalf("failed to read tarball: %v", err)
+ }
+
+ // Sign the plugin tarball
+ sig, err := SignPlugin(tarballData, filepath.Base(tarballPath), signer)
+ if err != nil {
+ t.Fatalf("failed to sign plugin: %v", err)
+ }
+
+ // Verify the signature contains the expected content
+ if !strings.Contains(sig, "-----BEGIN PGP SIGNED MESSAGE-----") {
+ t.Error("signature does not contain PGP header")
+ }
+
+ // Verify the tarball hash is in the signature
+ expectedHash, err := provenance.DigestFile(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The signature should contain the tarball hash
+ if !strings.Contains(sig, "sha256:"+expectedHash) {
+ t.Errorf("signature does not contain expected tarball hash: sha256:%s", expectedHash)
+ }
+}
diff --git a/helm/internal/plugin/signing_info.go b/helm/internal/plugin/signing_info.go
new file mode 100644
index 000000000..61ee9cd15
--- /dev/null
+++ b/helm/internal/plugin/signing_info.go
@@ -0,0 +1,178 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/ProtonMail/go-crypto/openpgp/clearsign" //nolint
+
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+// SigningInfo contains information about a plugin's signing status
+type SigningInfo struct {
+ // Status can be:
+ // - "local dev": Plugin is a symlink (development mode)
+ // - "unsigned": No provenance file found
+ // - "invalid provenance": Provenance file is malformed
+ // - "mismatched provenance": Provenance file does not match the installed tarball
+ // - "signed": Valid signature exists for the installed tarball
+ Status string
+ IsSigned bool // True if plugin has a valid signature (even if not verified against keyring)
+}
+
+// GetPluginSigningInfo returns signing information for an installed plugin
+func GetPluginSigningInfo(metadata Metadata) (*SigningInfo, error) {
+ pluginName := metadata.Name
+ pluginDir := helmpath.DataPath("plugins", pluginName)
+
+ // Check if plugin directory exists
+ fi, err := os.Lstat(pluginDir)
+ if err != nil {
+ return nil, fmt.Errorf("plugin %s not found: %w", pluginName, err)
+ }
+
+ // Check if it's a symlink (local development)
+ if fi.Mode()&os.ModeSymlink != 0 {
+ return &SigningInfo{
+ Status: "local dev",
+ IsSigned: false,
+ }, nil
+ }
+
+ // Find the exact tarball file for this plugin
+ pluginsDir := helmpath.DataPath("plugins")
+ tarballPath := filepath.Join(pluginsDir, fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version))
+ if _, err := os.Stat(tarballPath); err != nil {
+ return &SigningInfo{
+ Status: "unsigned",
+ IsSigned: false,
+ }, nil
+ }
+
+ // Check for .prov file associated with the tarball
+ provFile := tarballPath + ".prov"
+ provData, err := os.ReadFile(provFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return &SigningInfo{
+ Status: "unsigned",
+ IsSigned: false,
+ }, nil
+ }
+ return nil, fmt.Errorf("failed to read provenance file: %w", err)
+ }
+
+ // Parse the provenance file to check validity
+ block, _ := clearsign.Decode(provData)
+ if block == nil {
+ return &SigningInfo{
+ Status: "invalid provenance",
+ IsSigned: false,
+ }, nil
+ }
+
+ // Check if provenance matches the actual tarball
+ blockContent := string(block.Plaintext)
+ if !validateProvenanceHash(blockContent, tarballPath) {
+ return &SigningInfo{
+ Status: "mismatched provenance",
+ IsSigned: false,
+ }, nil
+ }
+
+ // We have a provenance file that is valid for this plugin
+ // Without a keyring, we can't verify the signature, but we know:
+ // 1. A .prov file exists
+ // 2. It's a valid clearsigned document (cryptographically signed)
+ // 3. The provenance contains valid checksums
+ return &SigningInfo{
+ Status: "signed",
+ IsSigned: true,
+ }, nil
+}
+
+func validateProvenanceHash(blockContent string, tarballPath string) bool {
+ // Parse provenance to get the expected hash
+ _, sums, err := parsePluginMessageBlock([]byte(blockContent))
+ if err != nil {
+ return false
+ }
+
+ // Must have file checksums
+ if len(sums.Files) == 0 {
+ return false
+ }
+
+ // Calculate actual hash of the tarball
+ actualHash, err := calculateFileHash(tarballPath)
+ if err != nil {
+ return false
+ }
+
+ // Check if the actual hash matches the expected hash in the provenance
+ for filename, expectedHash := range sums.Files {
+ if strings.Contains(filename, filepath.Base(tarballPath)) && expectedHash == actualHash {
+ return true
+ }
+ }
+
+ return false
+}
+
+// calculateFileHash calculates the SHA256 hash of a file
+func calculateFileHash(filePath string) (string, error) {
+ file, err := os.Open(filePath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ hasher := sha256.New()
+ if _, err := io.Copy(hasher, file); err != nil {
+ return "", err
+ }
+
+ return fmt.Sprintf("sha256:%x", hasher.Sum(nil)), nil
+}
+
+// GetSigningInfoForPlugins returns signing info for multiple plugins
+func GetSigningInfoForPlugins(plugins []Plugin) map[string]*SigningInfo {
+ result := make(map[string]*SigningInfo)
+
+ for _, p := range plugins {
+ m := p.Metadata()
+
+ info, err := GetPluginSigningInfo(m)
+ if err != nil {
+ // If there's an error, treat as unsigned
+ result[m.Name] = &SigningInfo{
+ Status: "unknown",
+ IsSigned: false,
+ }
+ } else {
+ result[m.Name] = info
+ }
+ }
+
+ return result
+}
diff --git a/helm/internal/plugin/subprocess_commands.go b/helm/internal/plugin/subprocess_commands.go
new file mode 100644
index 000000000..9a57ed891
--- /dev/null
+++ b/helm/internal/plugin/subprocess_commands.go
@@ -0,0 +1,114 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+)
+
+// PlatformCommand represents a command for a particular operating system and architecture
+type PlatformCommand struct {
+ OperatingSystem string `yaml:"os"`
+ Architecture string `yaml:"arch"`
+ Command string `yaml:"command"`
+ Args []string `yaml:"args"`
+}
+
+// Returns command and args strings based on the following rules in priority order:
+// - From the PlatformCommand where OS and Arch match the current platform
+// - From the PlatformCommand where OS matches the current platform and Arch is empty/unspecified
+// - From the PlatformCommand where OS is empty/unspecified and Arch matches the current platform
+// - From the PlatformCommand where OS and Arch are both empty/unspecified
+// - Return nil, nil
+func getPlatformCommand(cmds []PlatformCommand) ([]string, []string) {
+ var command, args []string
+ found := false
+ foundOs := false
+
+ eq := strings.EqualFold
+ for _, c := range cmds {
+ if eq(c.OperatingSystem, runtime.GOOS) && eq(c.Architecture, runtime.GOARCH) {
+ // Return early for an exact match
+ return strings.Split(c.Command, " "), c.Args
+ }
+
+ if (len(c.OperatingSystem) > 0 && !eq(c.OperatingSystem, runtime.GOOS)) || len(c.Architecture) > 0 {
+ // Skip if OS is not empty and doesn't match or if arch is set as a set arch requires an OS match
+ continue
+ }
+
+ if !foundOs && len(c.OperatingSystem) > 0 && eq(c.OperatingSystem, runtime.GOOS) {
+ // First OS match with empty arch, can only be overridden by a direct match
+ command = strings.Split(c.Command, " ")
+ args = c.Args
+ found = true
+ foundOs = true
+ } else if !found {
+ // First empty match, can be overridden by a direct match or an OS match
+ command = strings.Split(c.Command, " ")
+ args = c.Args
+ found = true
+ }
+ }
+
+ return command, args
+}
+
+// PrepareCommands takes a []Plugin.PlatformCommand
+// and prepares the command and arguments for execution.
+//
+// It merges extraArgs into any arguments supplied in the plugin. It
+// returns the main command and an args array.
+//
+// The result is suitable to pass to exec.Command.
+func PrepareCommands(cmds []PlatformCommand, expandArgs bool, extraArgs []string, env map[string]string) (string, []string, error) {
+ cmdParts, args := getPlatformCommand(cmds)
+ if len(cmdParts) == 0 || cmdParts[0] == "" {
+ return "", nil, fmt.Errorf("no plugin command is applicable")
+ }
+ envMappingFunc := func(key string) string {
+ return env[key]
+ }
+
+ main := os.Expand(cmdParts[0], envMappingFunc)
+ baseArgs := []string{}
+ if len(cmdParts) > 1 {
+ for _, cmdPart := range cmdParts[1:] {
+ if expandArgs {
+ baseArgs = append(baseArgs, os.Expand(cmdPart, envMappingFunc))
+ } else {
+ baseArgs = append(baseArgs, cmdPart)
+ }
+ }
+ }
+
+ for _, arg := range args {
+ if expandArgs {
+ baseArgs = append(baseArgs, os.Expand(arg, envMappingFunc))
+ } else {
+ baseArgs = append(baseArgs, arg)
+ }
+ }
+
+ if len(extraArgs) > 0 {
+ baseArgs = append(baseArgs, extraArgs...)
+ }
+
+ return main, baseArgs, nil
+}
diff --git a/helm/internal/plugin/subprocess_commands_test.go b/helm/internal/plugin/subprocess_commands_test.go
new file mode 100644
index 000000000..8e9c1663e
--- /dev/null
+++ b/helm/internal/plugin/subprocess_commands_test.go
@@ -0,0 +1,273 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "reflect"
+ "runtime"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPrepareCommand(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+
+ platformCommand := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs},
+ }
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(platformCommand, true, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, cmdArgs) {
+ t.Fatalf("Expected %v, got %v", cmdArgs, args)
+ }
+}
+
+func TestPrepareCommandExtraArgs(t *testing.T) {
+
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+ platformCommand := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ }
+
+ extraArgs := []string{"--debug", "--foo", "bar"}
+
+ type testCaseExpected struct {
+ cmdMain string
+ args []string
+ }
+
+ testCases := map[string]struct {
+ ignoreFlags bool
+ expected testCaseExpected
+ }{
+ "ignoreFlags false": {
+ ignoreFlags: false,
+ expected: testCaseExpected{
+ cmdMain: cmdMain,
+ args: []string{"-c", "echo \"test\"", "--debug", "--foo", "bar"},
+ },
+ },
+ "ignoreFlags true": {
+ ignoreFlags: true,
+ expected: testCaseExpected{
+ cmdMain: cmdMain,
+ args: []string{"-c", "echo \"test\""},
+ },
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ // extra args are expected when ignoreFlags is unset or false
+ testExtraArgs := extraArgs
+ if tc.ignoreFlags {
+ testExtraArgs = []string{}
+ }
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(platformCommand, true, testExtraArgs, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, tc.expected.cmdMain, cmd, "Expected command to match")
+ assert.Equal(t, tc.expected.args, args, "Expected args to match")
+ })
+ }
+}
+
+func TestPrepareCommands(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+
+ cmds := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: cmdMain, Args: cmdArgs},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ }
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(cmds, true, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, cmdArgs) {
+ t.Fatalf("Expected %v, got %v", cmdArgs, args)
+ }
+}
+
+func TestPrepareCommandsExtraArgs(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+ extraArgs := []string{"--debug", "--foo", "bar"}
+
+ cmds := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: runtime.GOARCH, Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ }
+
+ expectedArgs := append(cmdArgs, extraArgs...)
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(cmds, true, extraArgs, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, expectedArgs) {
+ t.Fatalf("Expected %v, got %v", expectedArgs, args)
+ }
+}
+
+func TestPrepareCommandsNoArch(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+
+ cmds := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "", Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ }
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(cmds, true, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, cmdArgs) {
+ t.Fatalf("Expected %v, got %v", cmdArgs, args)
+ }
+}
+
+func TestPrepareCommandsNoOsNoArch(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"test\""}
+
+ cmds := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ {OperatingSystem: "", Architecture: "", Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "pwsh", Args: []string{"-c", "echo \"error\""}},
+ }
+
+ env := map[string]string{}
+ cmd, args, err := PrepareCommands(cmds, true, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, cmdArgs) {
+ t.Fatalf("Expected %v, got %v", cmdArgs, args)
+ }
+}
+
+func TestPrepareCommandsNoMatch(t *testing.T) {
+ cmds := []PlatformCommand{
+ {OperatingSystem: "no-os", Architecture: "no-arch", Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ {OperatingSystem: runtime.GOOS, Architecture: "no-arch", Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ {OperatingSystem: "no-os", Architecture: runtime.GOARCH, Command: "sh", Args: []string{"-c", "echo \"test\""}},
+ }
+
+ env := map[string]string{}
+ if _, _, err := PrepareCommands(cmds, true, []string{}, env); err == nil {
+ t.Fatalf("Expected error to be returned")
+ }
+}
+
+func TestPrepareCommandsNoCommands(t *testing.T) {
+ cmds := []PlatformCommand{}
+
+ env := map[string]string{}
+ if _, _, err := PrepareCommands(cmds, true, []string{}, env); err == nil {
+ t.Fatalf("Expected error to be returned")
+ }
+}
+
+func TestPrepareCommandsExpand(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"${TESTX}${TESTY}\""}
+ cmds := []PlatformCommand{
+ {OperatingSystem: "", Architecture: "", Command: cmdMain, Args: cmdArgs},
+ }
+
+ expectedArgs := []string{"-c", "echo \"testxtesty\""}
+
+ env := map[string]string{
+ "TESTX": "testx",
+ "TESTY": "testy",
+ }
+
+ cmd, args, err := PrepareCommands(cmds, true, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, expectedArgs) {
+ t.Fatalf("Expected %v, got %v", expectedArgs, args)
+ }
+}
+
+func TestPrepareCommandsNoExpand(t *testing.T) {
+ cmdMain := "sh"
+ cmdArgs := []string{"-c", "echo \"${TEST}\""}
+ cmds := []PlatformCommand{
+ {OperatingSystem: "", Architecture: "", Command: cmdMain, Args: cmdArgs},
+ }
+
+ env := map[string]string{
+ "TEST": "test",
+ }
+
+ cmd, args, err := PrepareCommands(cmds, false, []string{}, env)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmd != cmdMain {
+ t.Fatalf("Expected %q, got %q", cmdMain, cmd)
+ }
+ if !reflect.DeepEqual(args, cmdArgs) {
+ t.Fatalf("Expected %v, got %v", cmdArgs, args)
+ }
+}
diff --git a/helm/internal/plugin/testdata/plugdir/bad/duplicate-entries-legacy/plugin.yaml b/helm/internal/plugin/testdata/plugdir/bad/duplicate-entries-legacy/plugin.yaml
new file mode 100644
index 000000000..66498be96
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/bad/duplicate-entries-legacy/plugin.yaml
@@ -0,0 +1,11 @@
+name: "duplicate-entries"
+version: "0.1.0"
+usage: "usage"
+description: |-
+ description
+command: "echo hello"
+ignoreFlags: true
+hooks:
+ install: "echo installing..."
+hooks:
+ install: "echo installing something different"
diff --git a/helm/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml b/helm/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml
new file mode 100644
index 000000000..344141121
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/bad/duplicate-entries-v1/plugin.yaml
@@ -0,0 +1,19 @@
+name: "duplicate-entries"
+version: "0.1.0"
+type: cli/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ shortHelp: "test duplicate entries"
+ longHelp: |-
+ description
+ ignoreFlags: true
+runtimeConfig:
+ platformCommand:
+ - command: "echo hello"
+ platformHooks:
+ install:
+ - command: "echo installing..."
+ platformHooks:
+ install:
+ - command: "echo installing something different"
diff --git a/helm/internal/plugin/testdata/plugdir/good/downloader/plugin.yaml b/helm/internal/plugin/testdata/plugdir/good/downloader/plugin.yaml
new file mode 100644
index 000000000..4e85f1f79
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/downloader/plugin.yaml
@@ -0,0 +1,12 @@
+---
+name: "downloader"
+version: "1.2.3"
+usage: "usage"
+description: |-
+ download something
+command: "echo Hello"
+downloaders:
+ - protocols:
+ - "myprotocol"
+ - "myprotocols"
+ command: "echo Download"
diff --git a/helm/internal/plugin/testdata/plugdir/good/echo-legacy/plugin.yaml b/helm/internal/plugin/testdata/plugdir/good/echo-legacy/plugin.yaml
new file mode 100644
index 000000000..ef84a4d8f
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/echo-legacy/plugin.yaml
@@ -0,0 +1,9 @@
+---
+name: "echo-legacy"
+version: "1.2.3"
+usage: "echo something"
+description: |-
+ This is a testing fixture.
+command: "echo Hello"
+hooks:
+ install: "echo Installing"
diff --git a/helm/internal/plugin/testdata/plugdir/good/echo-v1/plugin.yaml b/helm/internal/plugin/testdata/plugdir/good/echo-v1/plugin.yaml
new file mode 100644
index 000000000..8bbef9c0f
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/echo-v1/plugin.yaml
@@ -0,0 +1,15 @@
+---
+name: "echo-v1"
+version: "1.2.3"
+type: cli/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ shortHelp: "echo something"
+ longHelp: |-
+ This is a testing fixture.
+ ignoreFlags: false
+runtimeConfig:
+ command: "echo Hello"
+ hooks:
+ install: "echo Installing"
diff --git a/helm/internal/plugin/testdata/plugdir/good/getter/plugin.yaml b/helm/internal/plugin/testdata/plugdir/good/getter/plugin.yaml
new file mode 100644
index 000000000..7bdee9bde
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/getter/plugin.yaml
@@ -0,0 +1,17 @@
+---
+name: "getter"
+version: "1.2.3"
+type: getter/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ protocols:
+ - "myprotocol"
+ - "myprotocols"
+runtimeConfig:
+ protocolCommands:
+ - platformCommand:
+ - command: "echo getter"
+ protocols:
+ - "myprotocol"
+ - "myprotocols"
diff --git a/helm/internal/plugin/testdata/plugdir/good/hello-legacy/hello.ps1 b/helm/internal/plugin/testdata/plugdir/good/hello-legacy/hello.ps1
new file mode 100644
index 000000000..bee61f27d
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/hello-legacy/hello.ps1
@@ -0,0 +1,3 @@
+#!/usr/bin/env pwsh
+
+Write-Host "Hello, world!"
diff --git a/helm/internal/plugin/testdata/plugdir/good/hello-legacy/hello.sh b/helm/internal/plugin/testdata/plugdir/good/hello-legacy/hello.sh
new file mode 100755
index 000000000..4f20796ef
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/hello-legacy/hello.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env sh
+
+echo "Hello from a Helm plugin"
+
+echo "PARAMS"
+echo "$@"
+
+$HELM_BIN ls --all
+
diff --git a/helm/internal/plugin/testdata/plugdir/good/hello-legacy/plugin.yaml b/helm/internal/plugin/testdata/plugdir/good/hello-legacy/plugin.yaml
new file mode 100644
index 000000000..bf37e0626
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/hello-legacy/plugin.yaml
@@ -0,0 +1,22 @@
+---
+name: "hello-legacy"
+version: "0.1.0"
+usage: "echo hello message"
+description: |-
+ description
+platformCommand:
+ - os: linux
+ command: "sh"
+ args: ["-c", "${HELM_PLUGIN_DIR}/hello.sh"]
+ - os: windows
+ command: "pwsh"
+ args: ["-c", "${HELM_PLUGIN_DIR}/hello.ps1"]
+ignoreFlags: true
+platformHooks:
+ install:
+ - os: linux
+ command: "sh"
+ args: ["-c", 'echo "installing..."']
+ - os: windows
+ command: "pwsh"
+ args: ["-c", 'echo "installing..."']
diff --git a/helm/internal/plugin/testdata/plugdir/good/hello-v1/hello.ps1 b/helm/internal/plugin/testdata/plugdir/good/hello-v1/hello.ps1
new file mode 100644
index 000000000..bee61f27d
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/hello-v1/hello.ps1
@@ -0,0 +1,3 @@
+#!/usr/bin/env pwsh
+
+Write-Host "Hello, world!"
diff --git a/helm/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh b/helm/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh
new file mode 100755
index 000000000..4f20796ef
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/hello-v1/hello.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env sh
+
+echo "Hello from a Helm plugin"
+
+echo "PARAMS"
+echo "$@"
+
+$HELM_BIN ls --all
+
diff --git a/helm/internal/plugin/testdata/plugdir/good/hello-v1/plugin.yaml b/helm/internal/plugin/testdata/plugdir/good/hello-v1/plugin.yaml
new file mode 100644
index 000000000..044a3476d
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/hello-v1/plugin.yaml
@@ -0,0 +1,32 @@
+---
+name: "hello-v1"
+version: "0.1.0"
+type: cli/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ usage: hello [params]...
+ shortHelp: "echo hello message"
+ longHelp: |-
+ description
+ ignoreFlags: true
+runtimeConfig:
+ platformCommand:
+ - os: linux
+ arch:
+ command: "sh"
+ args: ["-c", "${HELM_PLUGIN_DIR}/hello.sh"]
+ - os: windows
+ arch:
+ command: "pwsh"
+ args: ["-c", "${HELM_PLUGIN_DIR}/hello.ps1"]
+ platformHooks:
+ install:
+ - os: linux
+ arch: ""
+ command: "sh"
+ args: ["-c", 'echo "installing..."']
+ - os: windows
+ arch: ""
+ command: "pwsh"
+ args: ["-c", 'echo "installing..."']
diff --git a/helm/internal/plugin/testdata/plugdir/good/postrenderer-v1/plugin.yaml b/helm/internal/plugin/testdata/plugdir/good/postrenderer-v1/plugin.yaml
new file mode 100644
index 000000000..30f1599b4
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/postrenderer-v1/plugin.yaml
@@ -0,0 +1,8 @@
+name: "postrenderer-v1"
+version: "1.2.3"
+type: postrenderer/v1
+apiVersion: v1
+runtime: subprocess
+runtimeConfig:
+ platformCommand:
+ - command: "${HELM_PLUGIN_DIR}/sed-test.sh"
diff --git a/helm/internal/plugin/testdata/plugdir/good/postrenderer-v1/sed-test.sh b/helm/internal/plugin/testdata/plugdir/good/postrenderer-v1/sed-test.sh
new file mode 100755
index 000000000..a016e398f
--- /dev/null
+++ b/helm/internal/plugin/testdata/plugdir/good/postrenderer-v1/sed-test.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+if [ $# -eq 0 ]; then
+ sed s/FOOTEST/BARTEST/g <&0
+else
+ sed s/FOOTEST/"$*"/g <&0
+fi
diff --git a/helm/internal/plugin/testdata/src/extismv1-test/.gitignore b/helm/internal/plugin/testdata/src/extismv1-test/.gitignore
new file mode 100644
index 000000000..ef7d91fbb
--- /dev/null
+++ b/helm/internal/plugin/testdata/src/extismv1-test/.gitignore
@@ -0,0 +1 @@
+plugin.wasm
diff --git a/helm/internal/plugin/testdata/src/extismv1-test/Makefile b/helm/internal/plugin/testdata/src/extismv1-test/Makefile
new file mode 100644
index 000000000..24da1f371
--- /dev/null
+++ b/helm/internal/plugin/testdata/src/extismv1-test/Makefile
@@ -0,0 +1,12 @@
+
+.DEFAULT: build
+.PHONY: build test vet
+
+.PHONY: plugin.wasm
+plugin.wasm:
+ GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o plugin.wasm .
+
+build: plugin.wasm
+
+vet:
+ GOOS=wasip1 GOARCH=wasm go vet ./...
diff --git a/helm/internal/plugin/testdata/src/extismv1-test/go.mod b/helm/internal/plugin/testdata/src/extismv1-test/go.mod
new file mode 100644
index 000000000..baed75fab
--- /dev/null
+++ b/helm/internal/plugin/testdata/src/extismv1-test/go.mod
@@ -0,0 +1,5 @@
+module helm.sh/helm/v4/internal/plugin/src/extismv1-test
+
+go 1.25.0
+
+require github.com/extism/go-pdk v1.1.3
diff --git a/helm/internal/plugin/testdata/src/extismv1-test/go.sum b/helm/internal/plugin/testdata/src/extismv1-test/go.sum
new file mode 100644
index 000000000..c15d38292
--- /dev/null
+++ b/helm/internal/plugin/testdata/src/extismv1-test/go.sum
@@ -0,0 +1,2 @@
+github.com/extism/go-pdk v1.1.3 h1:hfViMPWrqjN6u67cIYRALZTZLk/enSPpNKa+rZ9X2SQ=
+github.com/extism/go-pdk v1.1.3/go.mod h1:Gz+LIU/YCKnKXhgge8yo5Yu1F/lbv7KtKFkiCSzW/P4=
diff --git a/helm/internal/plugin/testdata/src/extismv1-test/main.go b/helm/internal/plugin/testdata/src/extismv1-test/main.go
new file mode 100644
index 000000000..31c739a5b
--- /dev/null
+++ b/helm/internal/plugin/testdata/src/extismv1-test/main.go
@@ -0,0 +1,68 @@
+package main
+
+import (
+ _ "embed"
+ "fmt"
+ "os"
+
+ pdk "github.com/extism/go-pdk"
+)
+
+type InputMessageTestV1 struct {
+ Name string
+}
+
+type OutputMessageTestV1 struct {
+ Greeting string
+}
+
+type ConfigTestV1 struct{}
+
+func runGetterPluginImpl(input InputMessageTestV1) (*OutputMessageTestV1, error) {
+ name := input.Name
+
+ greeting := fmt.Sprintf("Hello, %s! (%d)", name, len(name))
+ err := os.WriteFile("/tmp/greeting.txt", []byte(greeting), 0o600)
+ if err != nil {
+ return nil, fmt.Errorf("failed to write temp file: %w", err)
+ }
+ return &OutputMessageTestV1{
+ Greeting: greeting,
+ }, nil
+}
+
+func RunGetterPlugin() error {
+ var input InputMessageTestV1
+ if err := pdk.InputJSON(&input); err != nil {
+ return fmt.Errorf("failed to parse input json: %w", err)
+ }
+
+ pdk.Log(pdk.LogDebug, fmt.Sprintf("Received input: %+v", input))
+ output, err := runGetterPluginImpl(input)
+ if err != nil {
+ pdk.Log(pdk.LogError, fmt.Sprintf("failed: %s", err.Error()))
+ return err
+ }
+
+ pdk.Log(pdk.LogDebug, fmt.Sprintf("Sending output: %+v", output))
+ if err := pdk.OutputJSON(output); err != nil {
+ return fmt.Errorf("failed to write output json: %w", err)
+ }
+
+ return nil
+}
+
+//go:wasmexport helm_plugin_main
+func HelmPlugin() uint32 {
+ pdk.Log(pdk.LogDebug, "running example-extism-getter plugin")
+
+ if err := RunGetterPlugin(); err != nil {
+ pdk.Log(pdk.LogError, err.Error())
+ pdk.SetError(err)
+ return 1
+ }
+
+ return 0
+}
+
+func main() {}
diff --git a/helm/internal/plugin/testdata/src/extismv1-test/plugin.yaml b/helm/internal/plugin/testdata/src/extismv1-test/plugin.yaml
new file mode 100644
index 000000000..fea1e3f66
--- /dev/null
+++ b/helm/internal/plugin/testdata/src/extismv1-test/plugin.yaml
@@ -0,0 +1,9 @@
+---
+apiVersion: v1
+type: test/v1
+name: extismv1-test
+version: 0.1.0
+runtime: extism/v1
+runtimeConfig:
+ fileSystem:
+ createTempDir: true
\ No newline at end of file
diff --git a/helm/internal/plugin/verify.go b/helm/internal/plugin/verify.go
new file mode 100644
index 000000000..760a56e67
--- /dev/null
+++ b/helm/internal/plugin/verify.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "path/filepath"
+
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+// VerifyPlugin verifies plugin data against a signature using data in memory.
+func VerifyPlugin(archiveData, provData []byte, filename, keyring string) (*provenance.Verification, error) {
+ // Create signatory from keyring
+ sig, err := provenance.NewFromKeyring(keyring, "")
+ if err != nil {
+ return nil, err
+ }
+
+ // Use the new VerifyData method directly
+ return sig.Verify(archiveData, provData, filename)
+}
+
+// isTarball checks if a file has a tarball extension
+func IsTarball(filename string) bool {
+ return filepath.Ext(filename) == ".gz" || filepath.Ext(filename) == ".tgz"
+}
diff --git a/helm/internal/plugin/verify_test.go b/helm/internal/plugin/verify_test.go
new file mode 100644
index 000000000..9c907788f
--- /dev/null
+++ b/helm/internal/plugin/verify_test.go
@@ -0,0 +1,214 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+const testKeyFile = "../../pkg/cmd/testdata/helm-test-key.secret"
+const testPubFile = "../../pkg/cmd/testdata/helm-test-key.pub"
+
+const testPluginYAML = `apiVersion: v1
+name: test-plugin
+type: cli/v1
+runtime: subprocess
+version: 1.0.0
+runtimeConfig:
+ platformCommand:
+ - command: echo`
+
+func TestVerifyPlugin(t *testing.T) {
+ // Create a test plugin and sign it
+ tempDir := t.TempDir()
+
+ // Create plugin directory
+ pluginDir := filepath.Join(tempDir, "verify-test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create tarball
+ tarballPath := filepath.Join(tempDir, "verify-test-plugin.tar.gz")
+ tarFile, err := os.Create(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil {
+ tarFile.Close()
+ t.Fatal(err)
+ }
+ tarFile.Close()
+
+ // Sign the plugin with source directory
+ signer, err := provenance.NewFromKeyring(testKeyFile, "helm-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := signer.DecryptKey(func(_ string) ([]byte, error) {
+ return []byte(""), nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the tarball data
+ tarballData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sig, err := SignPlugin(tarballData, filepath.Base(tarballPath), signer)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Write the signature to .prov file
+ provFile := tarballPath + ".prov"
+ if err := os.WriteFile(provFile, []byte(sig), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the files for verification
+ archiveData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ provData, err := os.ReadFile(provFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Now verify the plugin
+ verification, err := VerifyPlugin(archiveData, provData, filepath.Base(tarballPath), testPubFile)
+ if err != nil {
+ t.Fatalf("Failed to verify plugin: %v", err)
+ }
+
+ // Check verification results
+ if verification.SignedBy == nil {
+ t.Error("SignedBy is nil")
+ }
+
+ if verification.FileName != "verify-test-plugin.tar.gz" {
+ t.Errorf("Expected filename 'verify-test-plugin.tar.gz', got %s", verification.FileName)
+ }
+
+ if verification.FileHash == "" {
+ t.Error("FileHash is empty")
+ }
+}
+
+func TestVerifyPluginBadSignature(t *testing.T) {
+ tempDir := t.TempDir()
+
+ // Create a plugin tarball
+ pluginDir := filepath.Join(tempDir, "bad-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ tarballPath := filepath.Join(tempDir, "bad-plugin.tar.gz")
+ tarFile, err := os.Create(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil {
+ tarFile.Close()
+ t.Fatal(err)
+ }
+ tarFile.Close()
+
+ // Create a bad signature (just some text)
+ badSig := `-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA512
+
+This is not a real signature
+-----BEGIN PGP SIGNATURE-----
+
+InvalidSignatureData
+
+-----END PGP SIGNATURE-----`
+
+ provFile := tarballPath + ".prov"
+ if err := os.WriteFile(provFile, []byte(badSig), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the files
+ archiveData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ provData, err := os.ReadFile(provFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Try to verify - should fail
+ _, err = VerifyPlugin(archiveData, provData, filepath.Base(tarballPath), testPubFile)
+ if err == nil {
+ t.Error("Expected verification to fail with bad signature")
+ }
+}
+
+func TestVerifyPluginMissingProvenance(t *testing.T) {
+ tempDir := t.TempDir()
+ tarballPath := filepath.Join(tempDir, "no-prov.tar.gz")
+
+ // Create a minimal tarball
+ if err := os.WriteFile(tarballPath, []byte("dummy"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the tarball data
+ archiveData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Try to verify with empty provenance data
+ _, err = VerifyPlugin(archiveData, nil, filepath.Base(tarballPath), testPubFile)
+ if err == nil {
+ t.Error("Expected verification to fail with empty provenance data")
+ }
+}
+
+func TestVerifyPluginMalformedData(t *testing.T) {
+ // Test with malformed tarball data - should fail
+ malformedData := []byte("not a tarball")
+ provData := []byte("fake provenance")
+
+ _, err := VerifyPlugin(malformedData, provData, "malformed.tar.gz", testPubFile)
+ if err == nil {
+ t.Error("Expected malformed data verification to fail, but it succeeded")
+ }
+}
diff --git a/helm/internal/resolver/resolver.go b/helm/internal/resolver/resolver.go
new file mode 100644
index 000000000..3efe94f10
--- /dev/null
+++ b/helm/internal/resolver/resolver.go
@@ -0,0 +1,263 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resolver
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/Masterminds/semver/v3"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/provenance"
+ "helm.sh/helm/v4/pkg/registry"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+// Resolver resolves dependencies from semantic version ranges to a particular version.
+type Resolver struct {
+ chartpath string
+ cachepath string
+ registryClient *registry.Client
+}
+
+// New creates a new resolver for a given chart, helm home and registry client.
+func New(chartpath, cachepath string, registryClient *registry.Client) *Resolver {
+ return &Resolver{
+ chartpath: chartpath,
+ cachepath: cachepath,
+ registryClient: registryClient,
+ }
+}
+
+// Resolve resolves dependencies and returns a lock file with the resolution.
+func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string) (*chart.Lock, error) {
+
+ // Now we clone the dependencies, locking as we go.
+ locked := make([]*chart.Dependency, len(reqs))
+ missing := []string{}
+ for i, d := range reqs {
+ constraint, err := semver.NewConstraint(d.Version)
+ if err != nil {
+ return nil, fmt.Errorf("dependency %q has an invalid version/constraint format: %w", d.Name, err)
+ }
+
+ if d.Repository == "" {
+ // Local chart subfolder
+ if _, err := GetLocalPath(filepath.Join("charts", d.Name), r.chartpath); err != nil {
+ return nil, err
+ }
+
+ locked[i] = &chart.Dependency{
+ Name: d.Name,
+ Repository: "",
+ Version: d.Version,
+ }
+ continue
+ }
+ if strings.HasPrefix(d.Repository, "file://") {
+ chartpath, err := GetLocalPath(d.Repository, r.chartpath)
+ if err != nil {
+ return nil, err
+ }
+
+ ch, err := loader.LoadDir(chartpath)
+ if err != nil {
+ return nil, err
+ }
+
+ v, err := semver.NewVersion(ch.Metadata.Version)
+ if err != nil {
+ // Not a legit entry.
+ continue
+ }
+
+ if !constraint.Check(v) {
+ missing = append(missing, fmt.Sprintf("%q (repository %q, version %q)", d.Name, d.Repository, d.Version))
+ continue
+ }
+
+ locked[i] = &chart.Dependency{
+ Name: d.Name,
+ Repository: d.Repository,
+ Version: ch.Metadata.Version,
+ }
+ continue
+ }
+
+ repoName := repoNames[d.Name]
+ // if the repository was not defined, but the dependency defines a repository url, bypass the cache
+ if repoName == "" && d.Repository != "" {
+ locked[i] = &chart.Dependency{
+ Name: d.Name,
+ Repository: d.Repository,
+ Version: d.Version,
+ }
+ continue
+ }
+
+ var vs repo.ChartVersions
+ var version string
+ var ok bool
+ found := true
+ if !registry.IsOCI(d.Repository) {
+ repoIndex, err := repo.LoadIndexFile(filepath.Join(r.cachepath, helmpath.CacheIndexFile(repoName)))
+ if err != nil {
+ return nil, fmt.Errorf("no cached repository for %s found. (try 'helm repo update'): %w", repoName, err)
+ }
+
+ vs, ok = repoIndex.Entries[d.Name]
+ if !ok {
+ return nil, fmt.Errorf("%s chart not found in repo %s", d.Name, d.Repository)
+ }
+ found = false
+ } else {
+ version = d.Version
+
+ // Check to see if an explicit version has been provided
+ _, err := semver.NewVersion(version)
+
+ // Use an explicit version, otherwise search for tags
+ if err == nil {
+ vs = []*repo.ChartVersion{{
+ Metadata: &chart.Metadata{
+ Version: version,
+ },
+ }}
+
+ } else {
+ // Retrieve list of tags for repository
+ ref := fmt.Sprintf("%s/%s", strings.TrimPrefix(d.Repository, fmt.Sprintf("%s://", registry.OCIScheme)), d.Name)
+ tags, err := r.registryClient.Tags(ref)
+ if err != nil {
+ return nil, fmt.Errorf("could not retrieve list of tags for repository %s: %w", d.Repository, err)
+ }
+
+ vs = make(repo.ChartVersions, len(tags))
+ for ti, t := range tags {
+ // Mock chart version objects
+ version := &repo.ChartVersion{
+ Metadata: &chart.Metadata{
+ Version: t,
+ },
+ }
+ vs[ti] = version
+ }
+ }
+ }
+
+ locked[i] = &chart.Dependency{
+ Name: d.Name,
+ Repository: d.Repository,
+ Version: version,
+ }
+ // The versions are already sorted and hence the first one to satisfy the constraint is used
+ for _, ver := range vs {
+ v, err := semver.NewVersion(ver.Version)
+ // OCI does not need URLs
+ if err != nil || (!registry.IsOCI(d.Repository) && len(ver.URLs) == 0) {
+ // Not a legit entry.
+ continue
+ }
+ if constraint.Check(v) {
+ found = true
+ locked[i].Version = v.Original()
+ break
+ }
+ }
+
+ if !found {
+ missing = append(missing, fmt.Sprintf("%q (repository %q, version %q)", d.Name, d.Repository, d.Version))
+ }
+ }
+ if len(missing) > 0 {
+ return nil, fmt.Errorf("can't get a valid version for %d subchart(s): %s. Make sure a matching chart version exists in the repo, or change the version constraint in Chart.yaml", len(missing), strings.Join(missing, ", "))
+ }
+
+ digest, err := HashReq(reqs, locked)
+ if err != nil {
+ return nil, err
+ }
+
+ return &chart.Lock{
+ Generated: time.Now(),
+ Digest: digest,
+ Dependencies: locked,
+ }, nil
+}
+
+// HashReq generates a hash of the dependencies.
+//
+// This should be used only to compare against another hash generated by this
+// function.
+func HashReq(req, lock []*chart.Dependency) (string, error) {
+ data, err := json.Marshal([2][]*chart.Dependency{req, lock})
+ if err != nil {
+ return "", err
+ }
+ s, err := provenance.Digest(bytes.NewBuffer(data))
+ return "sha256:" + s, err
+}
+
+// HashV2Req generates a hash of requirements generated in Helm v2.
+//
+// This should be used only to compare against another hash generated by the
+// Helm v2 hash function. It is to handle issue:
+// https://github.com/helm/helm/issues/7233
+func HashV2Req(req []*chart.Dependency) (string, error) {
+ dep := make(map[string][]*chart.Dependency)
+ dep["dependencies"] = req
+ data, err := json.Marshal(dep)
+ if err != nil {
+ return "", err
+ }
+ s, err := provenance.Digest(bytes.NewBuffer(data))
+ return "sha256:" + s, err
+}
+
+// GetLocalPath generates absolute local path when use
+// "file://" in repository of dependencies
+func GetLocalPath(repo, chartpath string) (string, error) {
+ var depPath string
+ var err error
+ p := strings.TrimPrefix(repo, "file://")
+
+ // root path is absolute
+ if strings.HasPrefix(p, "/") {
+ if depPath, err = filepath.Abs(p); err != nil {
+ return "", err
+ }
+ } else {
+ depPath = filepath.Join(chartpath, p)
+ }
+
+ if _, err = os.Stat(depPath); errors.Is(err, fs.ErrNotExist) {
+ return "", fmt.Errorf("directory %s not found", depPath)
+ } else if err != nil {
+ return "", err
+ }
+
+ return depPath, nil
+}
diff --git a/helm/internal/resolver/resolver_test.go b/helm/internal/resolver/resolver_test.go
new file mode 100644
index 000000000..1e33837a9
--- /dev/null
+++ b/helm/internal/resolver/resolver_test.go
@@ -0,0 +1,310 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resolver
+
+import (
+ "runtime"
+ "testing"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+func TestResolve(t *testing.T) {
+ tests := []struct {
+ name string
+ req []*chart.Dependency
+ expect *chart.Lock
+ err bool
+ }{
+ {
+ name: "repo from invalid version",
+ req: []*chart.Dependency{
+ {Name: "base", Repository: "file://base", Version: "1.1.0"},
+ },
+ expect: &chart.Lock{
+ Dependencies: []*chart.Dependency{
+ {Name: "base", Repository: "file://base", Version: "0.1.0"},
+ },
+ },
+ err: true,
+ },
+ {
+ name: "version failure",
+ req: []*chart.Dependency{
+ {Name: "oedipus-rex", Repository: "http://example.com", Version: ">a1"},
+ },
+ err: true,
+ },
+ {
+ name: "cache index failure",
+ req: []*chart.Dependency{
+ {Name: "oedipus-rex", Repository: "http://example.com", Version: "1.0.0"},
+ },
+ expect: &chart.Lock{
+ Dependencies: []*chart.Dependency{
+ {Name: "oedipus-rex", Repository: "http://example.com", Version: "1.0.0"},
+ },
+ },
+ },
+ {
+ name: "chart not found failure",
+ req: []*chart.Dependency{
+ {Name: "redis", Repository: "http://example.com", Version: "1.0.0"},
+ },
+ err: true,
+ },
+ {
+ name: "constraint not satisfied failure",
+ req: []*chart.Dependency{
+ {Name: "alpine", Repository: "http://example.com", Version: ">=1.0.0"},
+ },
+ err: true,
+ },
+ {
+ name: "valid lock",
+ req: []*chart.Dependency{
+ {Name: "alpine", Repository: "http://example.com", Version: ">=0.1.0"},
+ },
+ expect: &chart.Lock{
+ Dependencies: []*chart.Dependency{
+ {Name: "alpine", Repository: "http://example.com", Version: "0.2.0"},
+ },
+ },
+ },
+ {
+ name: "repo from valid local path",
+ req: []*chart.Dependency{
+ {Name: "base", Repository: "file://base", Version: "0.1.0"},
+ },
+ expect: &chart.Lock{
+ Dependencies: []*chart.Dependency{
+ {Name: "base", Repository: "file://base", Version: "0.1.0"},
+ },
+ },
+ },
+ {
+ name: "repo from valid local path with range resolution",
+ req: []*chart.Dependency{
+ {Name: "base", Repository: "file://base", Version: "^0.1.0"},
+ },
+ expect: &chart.Lock{
+ Dependencies: []*chart.Dependency{
+ {Name: "base", Repository: "file://base", Version: "0.1.0"},
+ },
+ },
+ },
+ {
+ name: "repo from invalid local path",
+ req: []*chart.Dependency{
+ {Name: "nonexistent", Repository: "file://testdata/nonexistent", Version: "0.1.0"},
+ },
+ err: true,
+ },
+ {
+ name: "repo from valid path under charts path",
+ req: []*chart.Dependency{
+ {Name: "localdependency", Repository: "", Version: "0.1.0"},
+ },
+ expect: &chart.Lock{
+ Dependencies: []*chart.Dependency{
+ {Name: "localdependency", Repository: "", Version: "0.1.0"},
+ },
+ },
+ },
+ {
+ name: "repo from invalid path under charts path",
+ req: []*chart.Dependency{
+ {Name: "nonexistentdependency", Repository: "", Version: "0.1.0"},
+ },
+ expect: &chart.Lock{
+ Dependencies: []*chart.Dependency{
+ {Name: "nonexistentlocaldependency", Repository: "", Version: "0.1.0"},
+ },
+ },
+ err: true,
+ },
+ }
+
+ repoNames := map[string]string{"alpine": "kubernetes-charts", "redis": "kubernetes-charts"}
+ registryClient, _ := registry.NewClient()
+ r := New("testdata/chartpath", "testdata/repository", registryClient)
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ l, err := r.Resolve(tt.req, repoNames)
+ if err != nil {
+ if tt.err {
+ return
+ }
+ t.Fatal(err)
+ }
+
+ if tt.err {
+ t.Fatalf("Expected error in test %q", tt.name)
+ }
+
+ if h, err := HashReq(tt.req, tt.expect.Dependencies); err != nil {
+ t.Fatal(err)
+ } else if h != l.Digest {
+ t.Errorf("%q: hashes don't match.", tt.name)
+ }
+
+ // Check fields.
+ if len(l.Dependencies) != len(tt.req) {
+ t.Errorf("%s: wrong number of dependencies in lock", tt.name)
+ }
+ d0 := l.Dependencies[0]
+ e0 := tt.expect.Dependencies[0]
+ if d0.Name != e0.Name {
+ t.Errorf("%s: expected name %s, got %s", tt.name, e0.Name, d0.Name)
+ }
+ if d0.Repository != e0.Repository {
+ t.Errorf("%s: expected repo %s, got %s", tt.name, e0.Repository, d0.Repository)
+ }
+ if d0.Version != e0.Version {
+ t.Errorf("%s: expected version %s, got %s", tt.name, e0.Version, d0.Version)
+ }
+ })
+ }
+}
+
+func TestHashReq(t *testing.T) {
+ expect := "sha256:fb239e836325c5fa14b29d1540a13b7d3ba13151b67fe719f820e0ef6d66aaaf"
+
+ tests := []struct {
+ name string
+ chartVersion string
+ lockVersion string
+ wantError bool
+ }{
+ {
+ name: "chart with the expected digest",
+ chartVersion: "0.1.0",
+ lockVersion: "0.1.0",
+ wantError: false,
+ },
+ {
+ name: "ranged version but same resolved lock version",
+ chartVersion: "^0.1.0",
+ lockVersion: "0.1.0",
+ wantError: true,
+ },
+ {
+ name: "ranged version resolved as higher version",
+ chartVersion: "^0.1.0",
+ lockVersion: "0.1.2",
+ wantError: true,
+ },
+ {
+ name: "different version",
+ chartVersion: "0.1.2",
+ lockVersion: "0.1.2",
+ wantError: true,
+ },
+ {
+ name: "different version with a range",
+ chartVersion: "^0.1.2",
+ lockVersion: "0.1.2",
+ wantError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ req := []*chart.Dependency{
+ {Name: "alpine", Version: tt.chartVersion, Repository: "http://localhost:8879/charts"},
+ }
+ lock := []*chart.Dependency{
+ {Name: "alpine", Version: tt.lockVersion, Repository: "http://localhost:8879/charts"},
+ }
+ h, err := HashReq(req, lock)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !tt.wantError && expect != h {
+ t.Errorf("Expected %q, got %q", expect, h)
+ } else if tt.wantError && expect == h {
+ t.Errorf("Expected not %q, but same", expect)
+ }
+ })
+ }
+}
+
+func TestGetLocalPath(t *testing.T) {
+ tests := []struct {
+ name string
+ repo string
+ chartpath string
+ expect string
+ winExpect string
+ err bool
+ }{
+ {
+ name: "absolute path",
+ repo: "file:////",
+ expect: "/",
+ winExpect: "\\",
+ },
+ {
+ name: "relative path",
+ repo: "file://../../testdata/chartpath/base",
+ chartpath: "foo/bar",
+ expect: "testdata/chartpath/base",
+ winExpect: "testdata\\chartpath\\base",
+ },
+ {
+ name: "current directory path",
+ repo: "../charts/localdependency",
+ chartpath: "testdata/chartpath/charts",
+ expect: "testdata/chartpath/charts/localdependency",
+ winExpect: "testdata\\chartpath\\charts\\localdependency",
+ },
+ {
+ name: "invalid local path",
+ repo: "file://testdata/nonexistent",
+ chartpath: "testdata/chartpath",
+ err: true,
+ },
+ {
+ name: "invalid path under current directory",
+ repo: "charts/nonexistentdependency",
+ chartpath: "testdata/chartpath/charts",
+ err: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ p, err := GetLocalPath(tt.repo, tt.chartpath)
+ if err != nil {
+ if tt.err {
+ return
+ }
+ t.Fatal(err)
+ }
+ if tt.err {
+ t.Fatalf("Expected error in test %q", tt.name)
+ }
+ expect := tt.expect
+ if runtime.GOOS == "windows" {
+ expect = tt.winExpect
+ }
+ if p != expect {
+ t.Errorf("%q: expected %q, got %q", tt.name, expect, p)
+ }
+ })
+ }
+}
diff --git a/helm/internal/resolver/testdata/chartpath/base/Chart.yaml b/helm/internal/resolver/testdata/chartpath/base/Chart.yaml
new file mode 100644
index 000000000..860b09091
--- /dev/null
+++ b/helm/internal/resolver/testdata/chartpath/base/Chart.yaml
@@ -0,0 +1,3 @@
+apiVersion: v2
+name: base
+version: 0.1.0
diff --git a/helm/internal/resolver/testdata/chartpath/charts/localdependency/Chart.yaml b/helm/internal/resolver/testdata/chartpath/charts/localdependency/Chart.yaml
new file mode 100644
index 000000000..083c51ee5
--- /dev/null
+++ b/helm/internal/resolver/testdata/chartpath/charts/localdependency/Chart.yaml
@@ -0,0 +1,3 @@
+description: A Helm chart for Kubernetes
+name: localdependency
+version: 0.1.0
diff --git a/helm/internal/resolver/testdata/repository/kubernetes-charts-index.yaml b/helm/internal/resolver/testdata/repository/kubernetes-charts-index.yaml
new file mode 100644
index 000000000..c6b7962a1
--- /dev/null
+++ b/helm/internal/resolver/testdata/repository/kubernetes-charts-index.yaml
@@ -0,0 +1,49 @@
+apiVersion: v1
+entries:
+ alpine:
+ - name: alpine
+ urls:
+ - https://charts.helm.sh/stable/alpine-0.1.0.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 0.2.0
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ - name: alpine
+ urls:
+ - https://charts.helm.sh/stable/alpine-0.2.0.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 0.1.0
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ mariadb:
+ - name: mariadb
+ urls:
+ - https://charts.helm.sh/stable/mariadb-0.3.0.tgz
+ checksum: 65229f6de44a2be9f215d11dbff311673fc8ba56
+ home: https://mariadb.org
+ sources:
+ - https://github.com/bitnami/bitnami-docker-mariadb
+ version: 0.3.0
+ description: Chart for MariaDB
+ keywords:
+ - mariadb
+ - mysql
+ - database
+ - sql
+ maintainers:
+ - name: Bitnami
+ email: containers@bitnami.com
+ icon: ""
+ apiVersion: v2
diff --git a/helm/internal/statusreaders/job_status_reader.go b/helm/internal/statusreaders/job_status_reader.go
new file mode 100644
index 000000000..3cd9ac7ac
--- /dev/null
+++ b/helm/internal/statusreaders/job_status_reader.go
@@ -0,0 +1,121 @@
+/*
+Copyright The Helm Authors.
+This file was initially copied and modified from
+ https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go
+Copyright 2022 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statusreaders
+
+import (
+ "context"
+ "fmt"
+
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+ "github.com/fluxcd/cli-utils/pkg/object"
+)
+
+type customJobStatusReader struct {
+ genericStatusReader engine.StatusReader
+}
+
+func NewCustomJobStatusReader(mapper meta.RESTMapper) engine.StatusReader {
+ genericStatusReader := statusreaders.NewGenericStatusReader(mapper, jobConditions)
+ return &customJobStatusReader{
+ genericStatusReader: genericStatusReader,
+ }
+}
+
+func (j *customJobStatusReader) Supports(gk schema.GroupKind) bool {
+ return gk == batchv1.SchemeGroupVersion.WithKind("Job").GroupKind()
+}
+
+func (j *customJobStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) {
+ return j.genericStatusReader.ReadStatus(ctx, reader, resource)
+}
+
+func (j *customJobStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) {
+ return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource)
+}
+
+// Ref: https://github.com/kubernetes-sigs/cli-utils/blob/v0.29.4/pkg/kstatus/status/core.go
+// Modified to return Current status only when the Job has completed as opposed to when it's in progress.
+func jobConditions(u *unstructured.Unstructured) (*status.Result, error) {
+ obj := u.UnstructuredContent()
+
+ parallelism := status.GetIntField(obj, ".spec.parallelism", 1)
+ completions := status.GetIntField(obj, ".spec.completions", parallelism)
+ succeeded := status.GetIntField(obj, ".status.succeeded", 0)
+ failed := status.GetIntField(obj, ".status.failed", 0)
+
+ // Conditions
+ // https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/job/utils.go#L24
+ objc, err := status.GetObjectWithConditions(obj)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range objc.Status.Conditions {
+ switch c.Type {
+ case "Complete":
+ if c.Status == corev1.ConditionTrue {
+ message := fmt.Sprintf("Job Completed. succeeded: %d/%d", succeeded, completions)
+ return &status.Result{
+ Status: status.CurrentStatus,
+ Message: message,
+ Conditions: []status.Condition{},
+ }, nil
+ }
+ case "Failed":
+ message := fmt.Sprintf("Job Failed. failed: %d/%d", failed, completions)
+ if c.Status == corev1.ConditionTrue {
+ return &status.Result{
+ Status: status.FailedStatus,
+ Message: message,
+ Conditions: []status.Condition{
+ {
+ Type: status.ConditionStalled,
+ Status: corev1.ConditionTrue,
+ Reason: "JobFailed",
+ Message: message,
+ },
+ },
+ }, nil
+ }
+ }
+ }
+
+ message := "Job in progress"
+ return &status.Result{
+ Status: status.InProgressStatus,
+ Message: message,
+ Conditions: []status.Condition{
+ {
+ Type: status.ConditionReconciling,
+ Status: corev1.ConditionTrue,
+ Reason: "JobInProgress",
+ Message: message,
+ },
+ },
+ }, nil
+}
diff --git a/helm/internal/statusreaders/job_status_reader_test.go b/helm/internal/statusreaders/job_status_reader_test.go
new file mode 100644
index 000000000..6e9ed5a79
--- /dev/null
+++ b/helm/internal/statusreaders/job_status_reader_test.go
@@ -0,0 +1,116 @@
+/*
+Copyright The Helm Authors.
+This file was initially copied and modified from
+ https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job_test.go
+Copyright 2022 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statusreaders
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+)
+
+func toUnstructured(t *testing.T, obj runtime.Object) (*unstructured.Unstructured, error) {
+ t.Helper()
+ // If the incoming object is already unstructured, perform a deep copy first
+ // otherwise DefaultUnstructuredConverter ends up returning the inner map without
+ // making a copy.
+ if _, ok := obj.(runtime.Unstructured); ok {
+ obj = obj.DeepCopyObject()
+ }
+ rawMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
+ if err != nil {
+ return nil, err
+ }
+ return &unstructured.Unstructured{Object: rawMap}, nil
+}
+
+func TestJobConditions(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ job *batchv1.Job
+ expectedStatus status.Status
+ }{
+ {
+ name: "job without Complete condition returns InProgress status",
+ job: &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "job-no-condition",
+ },
+ Spec: batchv1.JobSpec{},
+ Status: batchv1.JobStatus{},
+ },
+ expectedStatus: status.InProgressStatus,
+ },
+ {
+ name: "job with Complete condition as True returns Current status",
+ job: &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "job-complete",
+ },
+ Spec: batchv1.JobSpec{},
+ Status: batchv1.JobStatus{
+ Conditions: []batchv1.JobCondition{
+ {
+ Type: batchv1.JobComplete,
+ Status: corev1.ConditionTrue,
+ },
+ },
+ },
+ },
+ expectedStatus: status.CurrentStatus,
+ },
+ {
+ name: "job with Failed condition as True returns Failed status",
+ job: &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "job-failed",
+ },
+ Spec: batchv1.JobSpec{},
+ Status: batchv1.JobStatus{
+ Conditions: []batchv1.JobCondition{
+ {
+ Type: batchv1.JobFailed,
+ Status: corev1.ConditionTrue,
+ },
+ },
+ },
+ },
+ expectedStatus: status.FailedStatus,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ us, err := toUnstructured(t, tc.job)
+ assert.NoError(t, err)
+ result, err := jobConditions(us)
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expectedStatus, result.Status)
+ })
+ }
+}
diff --git a/helm/internal/statusreaders/pod_status_reader.go b/helm/internal/statusreaders/pod_status_reader.go
new file mode 100644
index 000000000..bf633c0dd
--- /dev/null
+++ b/helm/internal/statusreaders/pod_status_reader.go
@@ -0,0 +1,104 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statusreaders
+
+import (
+ "context"
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+ "github.com/fluxcd/cli-utils/pkg/object"
+)
+
+type customPodStatusReader struct {
+ genericStatusReader engine.StatusReader
+}
+
+func NewCustomPodStatusReader(mapper meta.RESTMapper) engine.StatusReader {
+ genericStatusReader := statusreaders.NewGenericStatusReader(mapper, podConditions)
+ return &customPodStatusReader{
+ genericStatusReader: genericStatusReader,
+ }
+}
+
+func (j *customPodStatusReader) Supports(gk schema.GroupKind) bool {
+ return gk == corev1.SchemeGroupVersion.WithKind("Pod").GroupKind()
+}
+
+func (j *customPodStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) {
+ return j.genericStatusReader.ReadStatus(ctx, reader, resource)
+}
+
+func (j *customPodStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) {
+ return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource)
+}
+
+func podConditions(u *unstructured.Unstructured) (*status.Result, error) {
+ obj := u.UnstructuredContent()
+ phase := status.GetStringField(obj, ".status.phase", "")
+ switch corev1.PodPhase(phase) {
+ case corev1.PodSucceeded:
+ message := fmt.Sprintf("pod %s succeeded", u.GetName())
+ return &status.Result{
+ Status: status.CurrentStatus,
+ Message: message,
+ Conditions: []status.Condition{
+ {
+ Type: status.ConditionStalled,
+ Status: corev1.ConditionTrue,
+ Message: message,
+ },
+ },
+ }, nil
+ case corev1.PodFailed:
+ message := fmt.Sprintf("pod %s failed", u.GetName())
+ return &status.Result{
+ Status: status.FailedStatus,
+ Message: message,
+ Conditions: []status.Condition{
+ {
+ Type: status.ConditionStalled,
+ Status: corev1.ConditionTrue,
+ Reason: "PodFailed",
+ Message: message,
+ },
+ },
+ }, nil
+ default:
+ message := "Pod in progress"
+ return &status.Result{
+ Status: status.InProgressStatus,
+ Message: message,
+ Conditions: []status.Condition{
+ {
+ Type: status.ConditionReconciling,
+ Status: corev1.ConditionTrue,
+ Reason: "PodInProgress",
+ Message: message,
+ },
+ },
+ }, nil
+ }
+}
diff --git a/helm/internal/statusreaders/pod_status_reader_test.go b/helm/internal/statusreaders/pod_status_reader_test.go
new file mode 100644
index 000000000..ba0d1f1bb
--- /dev/null
+++ b/helm/internal/statusreaders/pod_status_reader_test.go
@@ -0,0 +1,111 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package statusreaders
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+)
+
+func TestPodConditions(t *testing.T) {
+ tests := []struct {
+ name string
+ pod *v1.Pod
+ expectedStatus status.Status
+ }{
+ {
+ name: "pod without status returns in progress",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-no-status"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{},
+ },
+ expectedStatus: status.InProgressStatus,
+ },
+ {
+ name: "pod succeeded returns current status",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-succeeded"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{
+ Phase: v1.PodSucceeded,
+ },
+ },
+ expectedStatus: status.CurrentStatus,
+ },
+ {
+ name: "pod failed returns failed status",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-failed"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{
+ Phase: v1.PodFailed,
+ },
+ },
+ expectedStatus: status.FailedStatus,
+ },
+ {
+ name: "pod pending returns in progress status",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-pending"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{
+ Phase: v1.PodPending,
+ },
+ },
+ expectedStatus: status.InProgressStatus,
+ },
+ {
+ name: "pod running returns in progress status",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-running"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ },
+ },
+ expectedStatus: status.InProgressStatus,
+ },
+ {
+ name: "pod with unknown phase returns in progress status",
+ pod: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod-unknown"},
+ Spec: v1.PodSpec{},
+ Status: v1.PodStatus{
+ Phase: v1.PodUnknown,
+ },
+ },
+ expectedStatus: status.InProgressStatus,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ us, err := toUnstructured(t, tc.pod)
+ assert.NoError(t, err)
+ result, err := podConditions(us)
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expectedStatus, result.Status)
+ })
+ }
+}
diff --git a/helm/internal/sympath/walk.go b/helm/internal/sympath/walk.go
new file mode 100644
index 000000000..812bb68ce
--- /dev/null
+++ b/helm/internal/sympath/walk.go
@@ -0,0 +1,119 @@
+/*
+Copyright (c) for portions of walk.go are held by The Go Authors, 2009 and are
+provided under the BSD license.
+
+https://github.com/golang/go/blob/master/LICENSE
+
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sympath
+
+import (
+ "fmt"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "sort"
+)
+
+// Walk walks the file tree rooted at root, calling walkFn for each file or directory
+// in the tree, including root. All errors that arise visiting files and directories
+// are filtered by walkFn. The files are walked in lexical order, which makes the
+// output deterministic but means that for very large directories Walk can be
+// inefficient. Walk follows symbolic links.
+func Walk(root string, walkFn filepath.WalkFunc) error {
+ info, err := os.Lstat(root)
+ if err != nil {
+ err = walkFn(root, nil, err)
+ } else {
+ err = symwalk(root, info, walkFn)
+ }
+ if err == filepath.SkipDir {
+ return nil
+ }
+ return err
+}
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entries.
+func readDirNames(dirname string) ([]string, error) {
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ names, err := f.Readdirnames(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+// symwalk recursively descends path, calling walkFn.
+func symwalk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+ // Recursively walk symlinked directories.
+ if IsSymlink(info) {
+ resolved, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return fmt.Errorf("error evaluating symlink %s: %w", path, err)
+ }
+ // This log message is to highlight a symlink that is being used within a chart, symlinks can be used for nefarious reasons.
+ slog.Info("found symbolic link in path. Contents of linked file included and used", "path", path, "resolved", resolved)
+ if info, err = os.Lstat(resolved); err != nil {
+ return err
+ }
+ if err := symwalk(path, info, walkFn); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ return nil
+ }
+
+ if err := walkFn(path, info, nil); err != nil {
+ return err
+ }
+
+ if !info.IsDir() {
+ return nil
+ }
+
+ names, err := readDirNames(path)
+ if err != nil {
+ return walkFn(path, info, err)
+ }
+
+ for _, name := range names {
+ filename := filepath.Join(path, name)
+ fileInfo, err := os.Lstat(filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ } else {
+ err = symwalk(filename, fileInfo, walkFn)
+ if err != nil {
+ if (!fileInfo.IsDir() && !IsSymlink(fileInfo)) || err != filepath.SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// IsSymlink is used to determine if the fileinfo is a symbolic link.
+func IsSymlink(fi os.FileInfo) bool {
+ return fi.Mode()&os.ModeSymlink != 0
+}
diff --git a/helm/internal/sympath/walk_test.go b/helm/internal/sympath/walk_test.go
new file mode 100644
index 000000000..1eba8b996
--- /dev/null
+++ b/helm/internal/sympath/walk_test.go
@@ -0,0 +1,152 @@
+/*
+Copyright (c) for portions of walk_test.go are held by The Go Authors, 2009 and are
+provided under the BSD license.
+
+https://github.com/golang/go/blob/master/LICENSE
+
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sympath
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+type Node struct {
+ name string
+ entries []*Node // nil if the entry is a file
+ marks int
+ expectedMarks int
+ symLinkedTo string
+}
+
+var tree = &Node{
+ "testdata",
+ []*Node{
+ {"a", nil, 0, 1, ""},
+ {"b", []*Node{}, 0, 1, ""},
+ {"c", nil, 0, 2, ""},
+ {"d", nil, 0, 0, "c"},
+ {
+ "e",
+ []*Node{
+ {"x", nil, 0, 1, ""},
+ {"y", []*Node{}, 0, 1, ""},
+ {
+ "z",
+ []*Node{
+ {"u", nil, 0, 1, ""},
+ {"v", nil, 0, 1, ""},
+ {"w", nil, 0, 1, ""},
+ },
+ 0,
+ 1,
+ "",
+ },
+ },
+ 0,
+ 1,
+ "",
+ },
+ },
+ 0,
+ 1,
+ "",
+}
+
+func walkTree(n *Node, path string, f func(path string, n *Node)) {
+ f(path, n)
+ for _, e := range n.entries {
+ walkTree(e, filepath.Join(path, e.name), f)
+ }
+}
+
+func makeTree(t *testing.T) {
+ t.Helper()
+ walkTree(tree, tree.name, func(path string, n *Node) {
+ if n.entries == nil {
+ if n.symLinkedTo != "" {
+ if err := os.Symlink(n.symLinkedTo, path); err != nil {
+ t.Fatalf("makeTree: %v", err)
+ }
+ } else {
+ fd, err := os.Create(path)
+ if err != nil {
+ t.Fatalf("makeTree: %v", err)
+ return
+ }
+ fd.Close()
+ }
+ } else {
+ if err := os.Mkdir(path, 0770); err != nil {
+ t.Fatalf("makeTree: %v", err)
+ }
+ }
+ })
+}
+
+func checkMarks(t *testing.T, report bool) {
+ t.Helper()
+ walkTree(tree, tree.name, func(path string, n *Node) {
+ if n.marks != n.expectedMarks && report {
+ t.Errorf("node %s mark = %d; expected %d", path, n.marks, n.expectedMarks)
+ }
+ n.marks = 0
+ })
+}
+
+// Assumes that each node name is unique. Good enough for a test.
+// If clearIncomingError is true, any incoming error is cleared before
+// return. The errors are always accumulated, though.
+func mark(info os.FileInfo, err error, errors *[]error, clearIncomingError bool) error {
+ if err != nil {
+ *errors = append(*errors, err)
+ if clearIncomingError {
+ return nil
+ }
+ return err
+ }
+ name := info.Name()
+ walkTree(tree, tree.name, func(_ string, n *Node) {
+ if n.name == name {
+ n.marks++
+ }
+ })
+ return nil
+}
+
+func TestWalk(t *testing.T) {
+ makeTree(t)
+ errors := make([]error, 0, 10)
+ markFn := func(_ string, info os.FileInfo, err error) error {
+ return mark(info, err, &errors, true)
+ }
+ // Expect no errors.
+ err := Walk(tree.name, markFn)
+ if err != nil {
+ t.Fatalf("no error expected, found: %s", err)
+ }
+ if len(errors) != 0 {
+ t.Fatalf("unexpected errors: %s", errors)
+ }
+ checkMarks(t, true)
+
+ // cleanup
+ if err := os.RemoveAll(tree.name); err != nil {
+ t.Errorf("removeTree: %v", err)
+ }
+}
diff --git a/helm/internal/test/ensure/ensure.go b/helm/internal/test/ensure/ensure.go
new file mode 100644
index 000000000..a72f48c2d
--- /dev/null
+++ b/helm/internal/test/ensure/ensure.go
@@ -0,0 +1,56 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ensure
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/helmpath/xdg"
+)
+
+// HelmHome sets up a Helm Home in a temp dir.
+func HelmHome(t *testing.T) {
+ t.Helper()
+ base := t.TempDir()
+ t.Setenv(xdg.CacheHomeEnvVar, base)
+ t.Setenv(xdg.ConfigHomeEnvVar, base)
+ t.Setenv(xdg.DataHomeEnvVar, base)
+ t.Setenv(helmpath.CacheHomeEnvVar, "")
+ t.Setenv(helmpath.ConfigHomeEnvVar, "")
+ t.Setenv(helmpath.DataHomeEnvVar, "")
+}
+
+// TempFile ensures a temp file for unit testing purposes.
+//
+// It returns the path to the directory (to which you will still need to join the filename)
+//
+// The returned directory is automatically removed when the test and all its subtests complete.
+//
+// tempdir := TempFile(t, "foo", []byte("bar"))
+// filename := filepath.Join(tempdir, "foo")
+func TempFile(t *testing.T, name string, data []byte) string {
+ t.Helper()
+ path := t.TempDir()
+ filename := filepath.Join(path, name)
+ if err := os.WriteFile(filename, data, 0o755); err != nil {
+ t.Fatal(err)
+ }
+ return path
+}
diff --git a/helm/internal/test/test.go b/helm/internal/test/test.go
new file mode 100644
index 000000000..632bc72fd
--- /dev/null
+++ b/helm/internal/test/test.go
@@ -0,0 +1,95 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package test
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+// UpdateGolden writes out the golden files with the latest values, rather than failing the test.
+var updateGolden = flag.Bool("update", false, "update golden files")
+
+// TestingT describes a testing object compatible with the critical functions from the testing.T type
+type TestingT interface {
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ HelperT
+}
+
+// HelperT describes a test with a helper function
+type HelperT interface {
+ Helper()
+}
+
+// AssertGoldenString asserts that the given string matches the contents of the given file.
+func AssertGoldenString(t TestingT, actual, filename string) {
+ t.Helper()
+
+ if err := compare([]byte(actual), path(filename)); err != nil {
+ t.Fatalf("%v\n", err)
+ }
+}
+
+// AssertGoldenFile asserts that the content of the actual file matches the contents of the expected file
+func AssertGoldenFile(t TestingT, actualFileName string, expectedFilename string) {
+ t.Helper()
+
+ actual, err := os.ReadFile(actualFileName)
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ AssertGoldenString(t, string(actual), expectedFilename)
+}
+
+func path(filename string) string {
+ if filepath.IsAbs(filename) {
+ return filename
+ }
+ return filepath.Join("testdata", filename)
+}
+
+func compare(actual []byte, filename string) error {
+ actual = normalize(actual)
+ if err := update(filename, actual); err != nil {
+ return err
+ }
+
+ expected, err := os.ReadFile(filename)
+ if err != nil {
+ return fmt.Errorf("unable to read testdata %s: %w", filename, err)
+ }
+ expected = normalize(expected)
+ if !bytes.Equal(expected, actual) {
+ return fmt.Errorf("does not match golden file %s\n\nWANT:\n'%s'\n\nGOT:\n'%s'", filename, expected, actual)
+ }
+ return nil
+}
+
+func update(filename string, in []byte) error {
+ if !*updateGolden {
+ return nil
+ }
+ return os.WriteFile(filename, normalize(in), 0666)
+}
+
+func normalize(in []byte) []byte {
+ return bytes.ReplaceAll(in, []byte("\r\n"), []byte("\n"))
+}
diff --git a/helm/internal/third_party/dep/fs/fs.go b/helm/internal/third_party/dep/fs/fs.go
new file mode 100644
index 000000000..6e2720f3b
--- /dev/null
+++ b/helm/internal/third_party/dep/fs/fs.go
@@ -0,0 +1,377 @@
+/*
+Copyright (c) for portions of fs.go are held by The Go Authors, 2016 and are provided under
+the BSD license.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package fs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "runtime"
+ "syscall"
+)
+
+// fs contains a copy of a few functions from dep tool code to avoid a dependency on golang/dep.
+// This code is copied from https://github.com/golang/dep/blob/37d6c560cdf407be7b6cd035b23dba89df9275cf/internal/fs/fs.go
+// No changes to the code were made other than removing some unused functions
+
+// RenameWithFallback attempts to rename a file or directory, but falls back to
+// copying in the event of a cross-device link error. If the fallback copy
+// succeeds, src is still removed, emulating normal rename behavior.
+func RenameWithFallback(src, dst string) error {
+ _, err := os.Stat(src)
+ if err != nil {
+ return fmt.Errorf("cannot stat %s: %w", src, err)
+ }
+
+ err = os.Rename(src, dst)
+ if err == nil {
+ return nil
+ }
+
+ return renameFallback(err, src, dst)
+}
+
+// renameByCopy attempts to rename a file or directory by copying it to the
+// destination and then removing the src thus emulating the rename behavior.
+func renameByCopy(src, dst string) error {
+ var cerr error
+ if dir, _ := IsDir(src); dir {
+ cerr = CopyDir(src, dst)
+ if cerr != nil {
+ cerr = fmt.Errorf("copying directory failed: %w", cerr)
+ }
+ } else {
+ cerr = CopyFile(src, dst)
+ if cerr != nil {
+ cerr = fmt.Errorf("copying file failed: %w", cerr)
+ }
+ }
+
+ if cerr != nil {
+ return fmt.Errorf("rename fallback failed: cannot rename %s to %s: %w", src, dst, cerr)
+ }
+
+ if err := os.RemoveAll(src); err != nil {
+ return fmt.Errorf("cannot delete %s: %w", src, err)
+ }
+
+ return nil
+}
+
+var (
+ errSrcNotDir = errors.New("source is not a directory")
+ errDstExist = errors.New("destination already exists")
+)
+
+// CopyDir recursively copies a directory tree, attempting to preserve permissions.
+// Source directory must exist, destination directory must *not* exist.
+func CopyDir(src, dst string) error {
+ src = filepath.Clean(src)
+ dst = filepath.Clean(dst)
+
+ // We use os.Lstat() here to ensure we don't fall in a loop where a symlink
+ // actually links to a one of its parent directories.
+ fi, err := os.Lstat(src)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return errSrcNotDir
+ }
+
+ _, err = os.Stat(dst)
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return err
+ }
+ if err == nil {
+ return errDstExist
+ }
+
+ if err = os.MkdirAll(dst, fi.Mode()); err != nil {
+ return fmt.Errorf("cannot mkdir %s: %w", dst, err)
+ }
+
+ entries, err := os.ReadDir(src)
+ if err != nil {
+ return fmt.Errorf("cannot read directory %s: %w", dst, err)
+ }
+
+ for _, entry := range entries {
+ srcPath := filepath.Join(src, entry.Name())
+ dstPath := filepath.Join(dst, entry.Name())
+
+ if entry.IsDir() {
+ if err = CopyDir(srcPath, dstPath); err != nil {
+ return fmt.Errorf("copying directory failed: %w", err)
+ }
+ } else {
+ // This will include symlinks, which is what we want when
+ // copying things.
+ if err = CopyFile(srcPath, dstPath); err != nil {
+ return fmt.Errorf("copying file failed: %w", err)
+ }
+ }
+ }
+
+ return nil
+}
+
+// CopyFile copies the contents of the file named src to the file named
+// by dst. The file will be created if it does not already exist. If the
+// destination file exists, all its contents will be replaced by the contents
+// of the source file. The file mode will be copied from the source.
+func CopyFile(src, dst string) (err error) {
+ if sym, err := IsSymlink(src); err != nil {
+ return fmt.Errorf("symlink check failed: %w", err)
+ } else if sym {
+ if err := cloneSymlink(src, dst); err != nil {
+ if runtime.GOOS == "windows" {
+ // If cloning the symlink fails on Windows because the user
+ // does not have the required privileges, ignore the error and
+ // fall back to copying the file contents.
+ //
+ // ERROR_PRIVILEGE_NOT_HELD is 1314 (0x522):
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx
+ if lerr, ok := err.(*os.LinkError); ok && lerr.Err != syscall.Errno(1314) {
+ return err
+ }
+ } else {
+ return err
+ }
+ } else {
+ return nil
+ }
+ }
+
+ in, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer in.Close()
+
+ out, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+
+ if _, err = io.Copy(out, in); err != nil {
+ out.Close()
+ return err
+ }
+
+ // Check for write errors on Close
+ if err = out.Close(); err != nil {
+ return err
+ }
+
+ si, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ // Temporary fix for Go < 1.9
+ //
+ // See: https://github.com/golang/dep/issues/774
+ // and https://github.com/golang/go/issues/20829
+ if runtime.GOOS == "windows" {
+ dst = fixLongPath(dst)
+ }
+ err = os.Chmod(dst, si.Mode())
+
+ return err
+}
+
+// cloneSymlink will create a new symlink that points to the resolved path of sl.
+// If sl is a relative symlink, dst will also be a relative symlink.
+func cloneSymlink(sl, dst string) error {
+ resolved, err := os.Readlink(sl)
+ if err != nil {
+ return err
+ }
+
+ return os.Symlink(resolved, dst)
+}
+
+// IsDir determines is the path given is a directory or not.
+func IsDir(name string) (bool, error) {
+ fi, err := os.Stat(name)
+ if err != nil {
+ return false, err
+ }
+ if !fi.IsDir() {
+ return false, fmt.Errorf("%q is not a directory", name)
+ }
+ return true, nil
+}
+
+// IsSymlink determines if the given path is a symbolic link.
+func IsSymlink(path string) (bool, error) {
+ l, err := os.Lstat(path)
+ if err != nil {
+ return false, err
+ }
+
+ return l.Mode()&os.ModeSymlink == os.ModeSymlink, nil
+}
+
+// fixLongPath returns the extended-length (\\?\-prefixed) form of
+// path when needed, in order to avoid the default 260 character file
+// path limit imposed by Windows. If path is not easily converted to
+// the extended-length form (for example, if path is a relative path
+// or contains .. elements), or is short enough, fixLongPath returns
+// path unmodified.
+//
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
+func fixLongPath(path string) string {
+ // Do nothing (and don't allocate) if the path is "short".
+ // Empirically (at least on the Windows Server 2013 builder),
+ // the kernel is arbitrarily okay with < 248 bytes. That
+ // matches what the docs above say:
+ // "When using an API to create a directory, the specified
+ // path cannot be so long that you cannot append an 8.3 file
+ // name (that is, the directory name cannot exceed MAX_PATH
+ // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248.
+ //
+ // The MSDN docs appear to say that a normal path that is 248 bytes long
+ // will work; empirically the path must be less than 248 bytes long.
+ if len(path) < 248 {
+ // Don't fix. (This is how Go 1.7 and earlier worked,
+ // not automatically generating the \\?\ form)
+ return path
+ }
+
+ // The extended form begins with \\?\, as in
+ // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt.
+ // The extended form disables evaluation of . and .. path
+ // elements and disables the interpretation of / as equivalent
+ // to \. The conversion here rewrites / to \ and elides
+ // . elements as well as trailing or duplicate separators. For
+ // simplicity it avoids the conversion entirely for relative
+ // paths or paths containing .. elements. For now,
+ // \\server\share paths are not converted to
+ // \\?\UNC\server\share paths because the rules for doing so
+ // are less well-specified.
+ if len(path) >= 2 && path[:2] == `\\` {
+ // Don't canonicalize UNC paths.
+ return path
+ }
+ if !isAbs(path) {
+ // Relative path
+ return path
+ }
+
+ const prefix = `\\?`
+
+ pathbuf := make([]byte, len(prefix)+len(path)+len(`\`))
+ copy(pathbuf, prefix)
+ n := len(path)
+ r, w := 0, len(prefix)
+ for r < n {
+ switch {
+ case os.IsPathSeparator(path[r]):
+ // empty block
+ r++
+ case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
+ // /./
+ r++
+ case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // /../ is currently unhandled
+ return path
+ default:
+ pathbuf[w] = '\\'
+ w++
+ for ; r < n && !os.IsPathSeparator(path[r]); r++ {
+ pathbuf[w] = path[r]
+ w++
+ }
+ }
+ }
+ // A drive's root directory needs a trailing \
+ if w == len(`\\?\c:`) {
+ pathbuf[w] = '\\'
+ w++
+ }
+ return string(pathbuf[:w])
+}
+
+func isAbs(path string) (b bool) {
+ v := volumeName(path)
+ if v == "" {
+ return false
+ }
+ path = path[len(v):]
+ if path == "" {
+ return false
+ }
+ return os.IsPathSeparator(path[0])
+}
+
+func volumeName(path string) (v string) {
+ if len(path) < 2 {
+ return ""
+ }
+ // with drive letter
+ c := path[0]
+ if path[1] == ':' &&
+ ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' ||
+ 'A' <= c && c <= 'Z') {
+ return path[:2]
+ }
+ // is it UNC
+ if l := len(path); l >= 5 && os.IsPathSeparator(path[0]) && os.IsPathSeparator(path[1]) &&
+ !os.IsPathSeparator(path[2]) && path[2] != '.' {
+ // first, leading `\\` and next shouldn't be `\`. its server name.
+ for n := 3; n < l-1; n++ {
+ // second, next '\' shouldn't be repeated.
+ if os.IsPathSeparator(path[n]) {
+ n++
+ // third, following something characters. its share name.
+ if !os.IsPathSeparator(path[n]) {
+ if path[n] == '.' {
+ break
+ }
+ for ; n < l; n++ {
+ if os.IsPathSeparator(path[n]) {
+ break
+ }
+ }
+ return path[:n]
+ }
+ break
+ }
+ }
+ }
+ return ""
+}
diff --git a/helm/internal/third_party/dep/fs/fs_test.go b/helm/internal/third_party/dep/fs/fs_test.go
new file mode 100644
index 000000000..610771bc3
--- /dev/null
+++ b/helm/internal/third_party/dep/fs/fs_test.go
@@ -0,0 +1,624 @@
+/*
+Copyright (c) for portions of fs_test.go are held by The Go Authors, 2016 and are provided under
+the BSD license.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package fs
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+func TestRenameWithFallback(t *testing.T) {
+ dir := t.TempDir()
+
+ if err := RenameWithFallback(filepath.Join(dir, "does_not_exists"), filepath.Join(dir, "dst")); err == nil {
+ t.Fatal("expected an error for non existing file, but got nil")
+ }
+
+ srcpath := filepath.Join(dir, "src")
+
+ if srcf, err := os.Create(srcpath); err != nil {
+ t.Fatal(err)
+ } else {
+ srcf.Close()
+ }
+
+ if err := RenameWithFallback(srcpath, filepath.Join(dir, "dst")); err != nil {
+ t.Fatal(err)
+ }
+
+ srcpath = filepath.Join(dir, "a")
+ if err := os.MkdirAll(srcpath, 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ dstpath := filepath.Join(dir, "b")
+ if err := os.MkdirAll(dstpath, 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := RenameWithFallback(srcpath, dstpath); err == nil {
+ t.Fatal("expected an error if dst is an existing directory, but got nil")
+ }
+}
+
+func TestCopyDir(t *testing.T) {
+ dir := t.TempDir()
+
+ srcdir := filepath.Join(dir, "src")
+ if err := os.MkdirAll(srcdir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ files := []struct {
+ path string
+ contents string
+ fi os.FileInfo
+ }{
+ {path: "myfile", contents: "hello world"},
+ {path: filepath.Join("subdir", "file"), contents: "subdir file"},
+ }
+
+ // Create structure indicated in 'files'
+ for i, file := range files {
+ fn := filepath.Join(srcdir, file.path)
+ dn := filepath.Dir(fn)
+ if err := os.MkdirAll(dn, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ fh, err := os.Create(fn)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = fh.Write([]byte(file.contents)); err != nil {
+ t.Fatal(err)
+ }
+ fh.Close()
+
+ files[i].fi, err = os.Stat(fn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ destdir := filepath.Join(dir, "dest")
+ if err := CopyDir(srcdir, destdir); err != nil {
+ t.Fatal(err)
+ }
+
+ // Compare copy against structure indicated in 'files'
+ for _, file := range files {
+ fn := filepath.Join(srcdir, file.path)
+ dn := filepath.Dir(fn)
+ dirOK, err := IsDir(dn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !dirOK {
+ t.Fatalf("expected %s to be a directory", dn)
+ }
+
+ got, err := os.ReadFile(fn)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if file.contents != string(got) {
+ t.Fatalf("expected: %s, got: %s", file.contents, string(got))
+ }
+
+ gotinfo, err := os.Stat(fn)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if file.fi.Mode() != gotinfo.Mode() {
+ t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v",
+ file.path, file.fi.Mode(), fn, gotinfo.Mode())
+ }
+ }
+}
+
+func TestCopyDirFail_SrcInaccessible(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // XXX: setting permissions works differently in
+ // Microsoft Windows. Skipping this until a
+ // compatible implementation is provided.
+ t.Skip("skipping on windows")
+ }
+
+ var currentUID = os.Getuid()
+
+ if currentUID == 0 {
+ // Skipping if root, because all files are accessible
+ t.Skip("Skipping for root user")
+ }
+
+ var srcdir, dstdir string
+
+ cleanup := setupInaccessibleDir(t, func(dir string) error {
+ srcdir = filepath.Join(dir, "src")
+ return os.MkdirAll(srcdir, 0755)
+ })
+ defer cleanup()
+
+ dir := t.TempDir()
+
+ dstdir = filepath.Join(dir, "dst")
+ if err := CopyDir(srcdir, dstdir); err == nil {
+ t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir)
+ }
+}
+
+func TestCopyDirFail_DstInaccessible(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // XXX: setting permissions works differently in
+ // Microsoft Windows. Skipping this until a
+ // compatible implementation is provided.
+ t.Skip("skipping on windows")
+ }
+
+ var currentUID = os.Getuid()
+
+ if currentUID == 0 {
+ // Skipping if root, because all files are accessible
+ t.Skip("Skipping for root user")
+ }
+
+ var srcdir, dstdir string
+
+ dir := t.TempDir()
+
+ srcdir = filepath.Join(dir, "src")
+ if err := os.MkdirAll(srcdir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ cleanup := setupInaccessibleDir(t, func(dir string) error {
+ dstdir = filepath.Join(dir, "dst")
+ return nil
+ })
+ defer cleanup()
+
+ if err := CopyDir(srcdir, dstdir); err == nil {
+ t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir)
+ }
+}
+
+func TestCopyDirFail_SrcIsNotDir(t *testing.T) {
+ var srcdir, dstdir string
+ var err error
+
+ dir := t.TempDir()
+
+ srcdir = filepath.Join(dir, "src")
+ if _, err = os.Create(srcdir); err != nil {
+ t.Fatal(err)
+ }
+
+ dstdir = filepath.Join(dir, "dst")
+
+ if err = CopyDir(srcdir, dstdir); err == nil {
+ t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir)
+ }
+
+ if err != errSrcNotDir {
+ t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errSrcNotDir, srcdir, dstdir, err)
+ }
+
+}
+
+func TestCopyDirFail_DstExists(t *testing.T) {
+ var srcdir, dstdir string
+ var err error
+
+ dir := t.TempDir()
+
+ srcdir = filepath.Join(dir, "src")
+ if err = os.MkdirAll(srcdir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ dstdir = filepath.Join(dir, "dst")
+ if err = os.MkdirAll(dstdir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = CopyDir(srcdir, dstdir); err == nil {
+ t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir)
+ }
+
+ if err != errDstExist {
+ t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errDstExist, srcdir, dstdir, err)
+ }
+}
+
+func TestCopyDirFailOpen(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // XXX: setting permissions works differently in
+ // Microsoft Windows. os.Chmod(..., 0222) below is not
+ // enough for the file to be readonly, and os.Chmod(...,
+ // 0000) returns an invalid argument error. Skipping
+ // this until a compatible implementation is
+ // provided.
+ t.Skip("skipping on windows")
+ }
+
+ var currentUID = os.Getuid()
+
+ if currentUID == 0 {
+ // Skipping if root, because all files are accessible
+ t.Skip("Skipping for root user")
+ }
+
+ var srcdir, dstdir string
+
+ dir := t.TempDir()
+
+ srcdir = filepath.Join(dir, "src")
+ if err := os.MkdirAll(srcdir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ srcfn := filepath.Join(srcdir, "file")
+ srcf, err := os.Create(srcfn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ srcf.Close()
+
+ // setup source file so that it cannot be read
+ if err = os.Chmod(srcfn, 0222); err != nil {
+ t.Fatal(err)
+ }
+
+ dstdir = filepath.Join(dir, "dst")
+
+ if err = CopyDir(srcdir, dstdir); err == nil {
+ t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir)
+ }
+}
+
+func TestCopyFile(t *testing.T) {
+ dir := t.TempDir()
+
+ srcf, err := os.Create(filepath.Join(dir, "srcfile"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := "hello world"
+ if _, err := srcf.Write([]byte(want)); err != nil {
+ t.Fatal(err)
+ }
+ srcf.Close()
+
+ destf := filepath.Join(dir, "destf")
+ if err := CopyFile(srcf.Name(), destf); err != nil {
+ t.Fatal(err)
+ }
+
+ got, err := os.ReadFile(destf)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want != string(got) {
+ t.Fatalf("expected: %s, got: %s", want, string(got))
+ }
+
+ wantinfo, err := os.Stat(srcf.Name())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ gotinfo, err := os.Stat(destf)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if wantinfo.Mode() != gotinfo.Mode() {
+ t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", srcf.Name(), wantinfo.Mode(), destf, gotinfo.Mode())
+ }
+}
+
+func TestCopyFileSymlink(t *testing.T) {
+ tempdir := t.TempDir()
+
+ testcases := map[string]string{
+ filepath.Join("./testdata/symlinks/file-symlink"): filepath.Join(tempdir, "dst-file"),
+ filepath.Join("./testdata/symlinks/windows-file-symlink"): filepath.Join(tempdir, "windows-dst-file"),
+ filepath.Join("./testdata/symlinks/invalid-symlink"): filepath.Join(tempdir, "invalid-symlink"),
+ }
+
+ for symlink, dst := range testcases {
+ t.Run(symlink, func(t *testing.T) {
+ var err error
+ if err = CopyFile(symlink, dst); err != nil {
+ t.Fatalf("failed to copy symlink: %s", err)
+ }
+
+ var want, got string
+
+ if runtime.GOOS == "windows" {
+ // Creating symlinks on Windows require an additional permission
+ // regular users aren't granted usually. So we copy the file
+ // content as a fall back instead of creating a real symlink.
+ srcb, err := os.ReadFile(symlink)
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+ dstb, err := os.ReadFile(dst)
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+
+ want = string(srcb)
+ got = string(dstb)
+ } else {
+ want, err = os.Readlink(symlink)
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+
+ got, err = os.Readlink(dst)
+ if err != nil {
+ t.Fatalf("could not resolve symlink: %s", err)
+ }
+ }
+
+ if want != got {
+ t.Fatalf("resolved path is incorrect. expected %s, got %s", want, got)
+ }
+ })
+ }
+}
+
+func TestCopyFileFail(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // XXX: setting permissions works differently in
+ // Microsoft Windows. Skipping this until a
+ // compatible implementation is provided.
+ t.Skip("skipping on windows")
+ }
+
+ var currentUID = os.Getuid()
+
+ if currentUID == 0 {
+ // Skipping if root, because all files are accessible
+ t.Skip("Skipping for root user")
+ }
+
+ dir := t.TempDir()
+
+ srcf, err := os.Create(filepath.Join(dir, "srcfile"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ srcf.Close()
+
+ var dstdir string
+
+ cleanup := setupInaccessibleDir(t, func(dir string) error {
+ dstdir = filepath.Join(dir, "dir")
+ return os.Mkdir(dstdir, 0777)
+ })
+ defer cleanup()
+
+ fn := filepath.Join(dstdir, "file")
+ if err := CopyFile(srcf.Name(), fn); err == nil {
+ t.Fatalf("expected error for %s, got none", fn)
+ }
+}
+
+// setupInaccessibleDir creates a temporary location with a single
+// directory in it, in such a way that directory is not accessible
+// after this function returns.
+//
+// op is called with the directory as argument, so that it can create
+// files or other test artifacts.
+//
+// If setupInaccessibleDir fails in its preparation, or op fails, t.Fatal
+// will be invoked.
+//
+// This function returns a cleanup function that removes all the temporary
+// files this function creates. It is the caller's responsibility to call
+// this function before the test is done running, whether there's an error or not.
+func setupInaccessibleDir(t *testing.T, op func(dir string) error) func() {
+ t.Helper()
+ dir := t.TempDir()
+
+ subdir := filepath.Join(dir, "dir")
+
+ cleanup := func() {
+ if err := os.Chmod(subdir, 0777); err != nil {
+ t.Error(err)
+ }
+ }
+
+ if err := os.Mkdir(subdir, 0777); err != nil {
+ cleanup()
+ t.Fatal(err)
+ return nil
+ }
+
+ if err := op(subdir); err != nil {
+ cleanup()
+ t.Fatal(err)
+ return nil
+ }
+
+ if err := os.Chmod(subdir, 0666); err != nil {
+ cleanup()
+ t.Fatal(err)
+ return nil
+ }
+
+ return cleanup
+}
+
+func TestIsDir(t *testing.T) {
+
+ var currentUID = os.Getuid()
+
+ if currentUID == 0 {
+ // Skipping if root, because all files are accessible
+ t.Skip("Skipping for root user")
+ }
+
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var dn string
+
+ cleanup := setupInaccessibleDir(t, func(dir string) error {
+ dn = filepath.Join(dir, "dir")
+ return os.Mkdir(dn, 0777)
+ })
+ defer cleanup()
+
+ tests := map[string]struct {
+ exists bool
+ err bool
+ }{
+ wd: {true, false},
+ filepath.Join(wd, "testdata"): {true, false},
+ filepath.Join(wd, "main.go"): {false, true},
+ filepath.Join(wd, "this_file_does_not_exist.thing"): {false, true},
+ dn: {false, true},
+ }
+
+ if runtime.GOOS == "windows" {
+ // This test doesn't work on Microsoft Windows because
+ // of the differences in how file permissions are
+ // implemented. For this to work, the directory where
+ // the directory exists should be inaccessible.
+ delete(tests, dn)
+ }
+
+ for f, want := range tests {
+ got, err := IsDir(f)
+ if err != nil && !want.err {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if got != want.exists {
+ t.Fatalf("expected %t for %s, got %t", want.exists, f, got)
+ }
+ }
+}
+
+func TestIsSymlink(t *testing.T) {
+
+ var currentUID = os.Getuid()
+
+ if currentUID == 0 {
+ // Skipping if root, because all files are accessible
+ t.Skip("Skipping for root user")
+ }
+
+ dir := t.TempDir()
+
+ dirPath := filepath.Join(dir, "directory")
+ if err := os.MkdirAll(dirPath, 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ filePath := filepath.Join(dir, "file")
+ f, err := os.Create(filePath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ dirSymlink := filepath.Join(dir, "dirSymlink")
+ fileSymlink := filepath.Join(dir, "fileSymlink")
+
+ if err = os.Symlink(dirPath, dirSymlink); err != nil {
+ t.Fatal(err)
+ }
+ if err = os.Symlink(filePath, fileSymlink); err != nil {
+ t.Fatal(err)
+ }
+
+ var (
+ inaccessibleFile string
+ inaccessibleSymlink string
+ )
+
+ cleanup := setupInaccessibleDir(t, func(dir string) error {
+ inaccessibleFile = filepath.Join(dir, "file")
+ if fh, err := os.Create(inaccessibleFile); err != nil {
+ return err
+ } else if err = fh.Close(); err != nil {
+ return err
+ }
+
+ inaccessibleSymlink = filepath.Join(dir, "symlink")
+ return os.Symlink(inaccessibleFile, inaccessibleSymlink)
+ })
+ defer cleanup()
+
+ tests := map[string]struct{ expected, err bool }{
+ dirPath: {false, false},
+ filePath: {false, false},
+ dirSymlink: {true, false},
+ fileSymlink: {true, false},
+ inaccessibleFile: {false, true},
+ inaccessibleSymlink: {false, true},
+ }
+
+ if runtime.GOOS == "windows" {
+ // XXX: setting permissions works differently in Windows. Skipping
+ // these cases until a compatible implementation is provided.
+ delete(tests, inaccessibleFile)
+ delete(tests, inaccessibleSymlink)
+ }
+
+ for path, want := range tests {
+ got, err := IsSymlink(path)
+ if err != nil {
+ if !want.err {
+ t.Errorf("expected no error, got %v", err)
+ }
+ }
+
+ if got != want.expected {
+ t.Errorf("expected %t for %s, got %t", want.expected, path, got)
+ }
+ }
+}
diff --git a/helm/internal/third_party/dep/fs/rename.go b/helm/internal/third_party/dep/fs/rename.go
new file mode 100644
index 000000000..5f13b1ca3
--- /dev/null
+++ b/helm/internal/third_party/dep/fs/rename.go
@@ -0,0 +1,57 @@
+//go:build !windows
+
+/*
+Copyright (c) for portions of rename.go are held by The Go Authors, 2016 and are provided under
+the BSD license.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package fs
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+)
+
+// renameFallback attempts to determine the appropriate fallback to failed rename
+// operation depending on the resulting error.
+func renameFallback(err error, src, dst string) error {
+ // Rename may fail if src and dst are on different devices; fall back to
+ // copy if we detect that case. syscall.EXDEV is the common name for the
+ // cross device link error which has varying output text across different
+ // operating systems.
+ terr, ok := err.(*os.LinkError)
+ if !ok {
+ return err
+ } else if terr.Err != syscall.EXDEV {
+ return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr)
+ }
+
+ return renameByCopy(src, dst)
+}
diff --git a/helm/internal/third_party/dep/fs/rename_windows.go b/helm/internal/third_party/dep/fs/rename_windows.go
new file mode 100644
index 000000000..566f695d3
--- /dev/null
+++ b/helm/internal/third_party/dep/fs/rename_windows.go
@@ -0,0 +1,68 @@
+//go:build windows
+
+/*
+Copyright (c) for portions of rename_windows.go are held by The Go Authors, 2016 and are provided under
+the BSD license.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package fs
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+)
+
+// renameFallback attempts to determine the appropriate fallback to failed rename
+// operation depending on the resulting error.
+func renameFallback(err error, src, dst string) error {
+ // Rename may fail if src and dst are on different devices; fall back to
+ // copy if we detect that case. syscall.EXDEV is the common name for the
+ // cross device link error which has varying output text across different
+ // operating systems.
+ terr, ok := err.(*os.LinkError)
+ if !ok {
+ return err
+ }
+
+ if terr.Err != syscall.EXDEV {
+ // In windows it can drop down to an operating system call that
+ // returns an operating system error with a different number and
+ // message. Checking for that as a fall back.
+ noerr, ok := terr.Err.(syscall.Errno)
+
+ // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error.
+ // See https://msdn.microsoft.com/en-us/library/cc231199.aspx
+ if ok && noerr != 0x11 {
+ return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr)
+ }
+ }
+
+ return renameByCopy(src, dst)
+}
diff --git a/helm/internal/third_party/dep/fs/testdata/symlinks/file-symlink b/helm/internal/third_party/dep/fs/testdata/symlinks/file-symlink
new file mode 120000
index 000000000..4c52274de
--- /dev/null
+++ b/helm/internal/third_party/dep/fs/testdata/symlinks/file-symlink
@@ -0,0 +1 @@
+../test.file
\ No newline at end of file
diff --git a/helm/internal/third_party/dep/fs/testdata/symlinks/invalid-symlink b/helm/internal/third_party/dep/fs/testdata/symlinks/invalid-symlink
new file mode 120000
index 000000000..0edf4f301
--- /dev/null
+++ b/helm/internal/third_party/dep/fs/testdata/symlinks/invalid-symlink
@@ -0,0 +1 @@
+/non/existing/file
\ No newline at end of file
diff --git a/helm/internal/third_party/dep/fs/testdata/symlinks/windows-file-symlink b/helm/internal/third_party/dep/fs/testdata/symlinks/windows-file-symlink
new file mode 120000
index 000000000..af1d6c8f5
--- /dev/null
+++ b/helm/internal/third_party/dep/fs/testdata/symlinks/windows-file-symlink
@@ -0,0 +1 @@
+C:/Users/ibrahim/go/src/github.com/golang/dep/internal/fs/testdata/test.file
\ No newline at end of file
diff --git a/helm/internal/third_party/dep/fs/testdata/test.file b/helm/internal/third_party/dep/fs/testdata/test.file
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go b/helm/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go
new file mode 100644
index 000000000..ae62d0e6f
--- /dev/null
+++ b/helm/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "context"
+ "sort"
+
+ apps "k8s.io/api/apps/v1"
+ v1 "k8s.io/api/core/v1"
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ intstrutil "k8s.io/apimachinery/pkg/util/intstr"
+ appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
+)
+
+// deploymentutil contains a copy of a few functions from Kubernetes controller code to avoid a dependency on k8s.io/kubernetes.
+// This code is copied from https://github.com/kubernetes/kubernetes/blob/e856613dd5bb00bcfaca6974431151b5c06cbed5/pkg/controller/deployment/util/deployment_util.go
+// No changes to the code were made other than removing some unused functions
+
+// RsListFunc returns the ReplicaSet from the ReplicaSet namespace and the List metav1.ListOptions.
+type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error)
+
+// ListReplicaSets returns a slice of RSes the given deployment targets.
+// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
+// because only the controller itself should do that.
+// However, it does filter out anything whose ControllerRef doesn't match.
+func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) {
+ // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
+ // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
+ namespace := deployment.Namespace
+ selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
+ if err != nil {
+ return nil, err
+ }
+ options := metav1.ListOptions{LabelSelector: selector.String()}
+ all, err := getRSList(namespace, options)
+ if err != nil {
+ return nil, err
+ }
+ // Only include those whose ControllerRef matches the Deployment.
+ owned := make([]*apps.ReplicaSet, 0, len(all))
+ for _, rs := range all {
+ if metav1.IsControlledBy(rs, deployment) {
+ owned = append(owned, rs)
+ }
+ }
+ return owned, nil
+}
+
+// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker.
+type ReplicaSetsByCreationTimestamp []*apps.ReplicaSet
+
+func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
+func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
+func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
+ if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
+ return o[i].Name < o[j].Name
+ }
+ return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
+}
+
+// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
+func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet {
+ sort.Sort(ReplicaSetsByCreationTimestamp(rsList))
+ for i := range rsList {
+ if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) {
+ // In rare cases, such as after cluster upgrades, Deployment may end up with
+ // having more than one new ReplicaSets that have the same template as its template,
+ // see https://github.com/kubernetes/kubernetes/issues/40415
+ // We deterministically choose the oldest new ReplicaSet.
+ return rsList[i]
+ }
+ }
+ // new ReplicaSet does not exist.
+ return nil
+}
+
+// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
+// We ignore pod-template-hash because:
+// 1. The hash result would be different upon podTemplateSpec API changes
+// (e.g. the addition of a new field will cause the hash code to change)
+// 2. The deployment template won't have hash labels
+func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool {
+ t1Copy := template1.DeepCopy()
+ t2Copy := template2.DeepCopy()
+ // Remove hash labels from template.Labels before comparing
+ delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
+ delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
+ return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
+}
+
+// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
+// Returns nil if the new replica set doesn't exist yet.
+func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) (*apps.ReplicaSet, error) {
+ rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
+ if err != nil {
+ return nil, err
+ }
+ return FindNewReplicaSet(deployment, rsList), nil
+}
+
+// RsListFromClient returns an rsListFunc that wraps the given client.
+func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
+ return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
+ rsList, err := c.ReplicaSets(namespace).List(context.Background(), options)
+ if err != nil {
+ return nil, err
+ }
+ var ret []*apps.ReplicaSet
+ for i := range rsList.Items {
+ ret = append(ret, &rsList.Items[i])
+ }
+ return ret, err
+ }
+}
+
+// IsRollingUpdate returns true if the strategy type is a rolling update.
+func IsRollingUpdate(deployment *apps.Deployment) bool {
+ return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType
+}
+
+// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take.
+func MaxUnavailable(deployment apps.Deployment) int32 {
+ if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 {
+ return int32(0)
+ }
+ // Error caught by validation
+ _, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
+ if maxUnavailable > *deployment.Spec.Replicas {
+ return *deployment.Spec.Replicas
+ }
+ return maxUnavailable
+}
+
+// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
+// step. For example:
+//
+// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1)
+// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1)
+// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
+// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1)
+// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
+// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
+func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
+ surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true)
+ if err != nil {
+ return 0, 0, err
+ }
+ unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ if surge == 0 && unavailable == 0 {
+ // Validation should never allow the user to explicitly use zero values for both maxSurge
+ // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero.
+ // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the
+ // theory that surge might not work due to quota.
+ unavailable = 1
+ }
+
+ return int32(surge), int32(unavailable), nil
+}
diff --git a/helm/internal/tlsutil/tls.go b/helm/internal/tlsutil/tls.go
new file mode 100644
index 000000000..88f26d47b
--- /dev/null
+++ b/helm/internal/tlsutil/tls.go
@@ -0,0 +1,122 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tlsutil
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "os"
+
+ "errors"
+)
+
+type TLSConfigOptions struct {
+ insecureSkipTLSVerify bool
+ certPEMBlock, keyPEMBlock []byte
+ caPEMBlock []byte
+}
+
+type TLSConfigOption func(options *TLSConfigOptions) error
+
+func WithInsecureSkipVerify(insecureSkipTLSVerify bool) TLSConfigOption {
+ return func(options *TLSConfigOptions) error {
+ options.insecureSkipTLSVerify = insecureSkipTLSVerify
+
+ return nil
+ }
+}
+
+func WithCertKeyPairFiles(certFile, keyFile string) TLSConfigOption {
+ return func(options *TLSConfigOptions) error {
+ if certFile == "" && keyFile == "" {
+ return nil
+ }
+
+ certPEMBlock, err := os.ReadFile(certFile)
+ if err != nil {
+ return fmt.Errorf("unable to read cert file: %q: %w", certFile, err)
+ }
+
+ keyPEMBlock, err := os.ReadFile(keyFile)
+ if err != nil {
+ return fmt.Errorf("unable to read key file: %q: %w", keyFile, err)
+ }
+
+ options.certPEMBlock = certPEMBlock
+ options.keyPEMBlock = keyPEMBlock
+
+ return nil
+ }
+}
+
+func WithCAFile(caFile string) TLSConfigOption {
+ return func(options *TLSConfigOptions) error {
+ if caFile == "" {
+ return nil
+ }
+
+ caPEMBlock, err := os.ReadFile(caFile)
+ if err != nil {
+ return fmt.Errorf("can't read CA file: %q: %w", caFile, err)
+ }
+
+ options.caPEMBlock = caPEMBlock
+
+ return nil
+ }
+}
+
+func NewTLSConfig(options ...TLSConfigOption) (*tls.Config, error) {
+ to := TLSConfigOptions{}
+
+ errs := []error{}
+ for _, option := range options {
+ err := option(&to)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ if len(errs) > 0 {
+ return nil, errors.Join(errs...)
+ }
+
+ config := tls.Config{
+ InsecureSkipVerify: to.insecureSkipTLSVerify,
+ }
+
+ if len(to.certPEMBlock) > 0 && len(to.keyPEMBlock) > 0 {
+ cert, err := tls.X509KeyPair(to.certPEMBlock, to.keyPEMBlock)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load cert from key pair: %w", err)
+ }
+
+ config.Certificates = []tls.Certificate{cert}
+ }
+
+ if len(to.caPEMBlock) > 0 {
+ cp := x509.NewCertPool()
+ if !cp.AppendCertsFromPEM(to.caPEMBlock) {
+ return nil, fmt.Errorf("failed to append certificates from pem block")
+ }
+
+ config.RootCAs = cp
+ }
+
+ return &config, nil
+}
diff --git a/helm/internal/tlsutil/tls_test.go b/helm/internal/tlsutil/tls_test.go
new file mode 100644
index 000000000..f16eb218f
--- /dev/null
+++ b/helm/internal/tlsutil/tls_test.go
@@ -0,0 +1,106 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tlsutil
+
+import (
+ "path/filepath"
+ "testing"
+)
+
+const tlsTestDir = "../../testdata"
+
+const (
+ testCaCertFile = "rootca.crt"
+ testCertFile = "crt.pem"
+ testKeyFile = "key.pem"
+)
+
+func testfile(t *testing.T, file string) (path string) {
+ t.Helper()
+ path, err := filepath.Abs(filepath.Join(tlsTestDir, file))
+ if err != nil {
+ t.Fatalf("error getting absolute path to test file %q: %v", file, err)
+ }
+ return path
+}
+
+func TestNewTLSConfig(t *testing.T) {
+ certFile := testfile(t, testCertFile)
+ keyFile := testfile(t, testKeyFile)
+ caCertFile := testfile(t, testCaCertFile)
+ insecureSkipTLSVerify := false
+
+ {
+ cfg, err := NewTLSConfig(
+ WithInsecureSkipVerify(insecureSkipTLSVerify),
+ WithCertKeyPairFiles(certFile, keyFile),
+ WithCAFile(caCertFile),
+ )
+ if err != nil {
+ t.Error(err)
+ }
+
+ if got := len(cfg.Certificates); got != 1 {
+ t.Fatalf("expecting 1 client certificates, got %d", got)
+ }
+ if cfg.InsecureSkipVerify {
+ t.Fatalf("insecure skip verify mismatch, expecting false")
+ }
+ if cfg.RootCAs == nil {
+ t.Fatalf("mismatch tls RootCAs, expecting non-nil")
+ }
+ }
+ {
+ cfg, err := NewTLSConfig(
+ WithInsecureSkipVerify(insecureSkipTLSVerify),
+ WithCAFile(caCertFile),
+ )
+ if err != nil {
+ t.Error(err)
+ }
+
+ if got := len(cfg.Certificates); got != 0 {
+ t.Fatalf("expecting 0 client certificates, got %d", got)
+ }
+ if cfg.InsecureSkipVerify {
+ t.Fatalf("insecure skip verify mismatch, expecting false")
+ }
+ if cfg.RootCAs == nil {
+ t.Fatalf("mismatch tls RootCAs, expecting non-nil")
+ }
+ }
+
+ {
+ cfg, err := NewTLSConfig(
+ WithInsecureSkipVerify(insecureSkipTLSVerify),
+ WithCertKeyPairFiles(certFile, keyFile),
+ )
+ if err != nil {
+ t.Error(err)
+ }
+
+ if got := len(cfg.Certificates); got != 1 {
+ t.Fatalf("expecting 1 client certificates, got %d", got)
+ }
+ if cfg.InsecureSkipVerify {
+ t.Fatalf("insecure skip verify mismatch, expecting false")
+ }
+ if cfg.RootCAs != nil {
+ t.Fatalf("mismatch tls RootCAs, expecting nil")
+ }
+ }
+}
diff --git a/helm/internal/urlutil/urlutil.go b/helm/internal/urlutil/urlutil.go
new file mode 100644
index 000000000..a8cf7398c
--- /dev/null
+++ b/helm/internal/urlutil/urlutil.go
@@ -0,0 +1,73 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package urlutil
+
+import (
+ "net/url"
+ "path"
+ "path/filepath"
+)
+
+// URLJoin joins a base URL to one or more path components.
+//
+// It's like filepath.Join for URLs. If the baseURL is pathish, this will still
+// perform a join.
+//
+// If the URL is unparsable, this returns an error.
+func URLJoin(baseURL string, paths ...string) (string, error) {
+ u, err := url.Parse(baseURL)
+ if err != nil {
+ return "", err
+ }
+ // We want path instead of filepath because path always uses /.
+ all := []string{u.Path}
+ all = append(all, paths...)
+ u.Path = path.Join(all...)
+ return u.String(), nil
+}
+
+// Equal normalizes two URLs and then compares for equality.
+func Equal(a, b string) bool {
+ au, err := url.Parse(a)
+ if err != nil {
+ a = filepath.Clean(a)
+ b = filepath.Clean(b)
+ // If urls are paths, return true only if they are an exact match
+ return a == b
+ }
+ bu, err := url.Parse(b)
+ if err != nil {
+ return false
+ }
+
+ for _, u := range []*url.URL{au, bu} {
+ if u.Path == "" {
+ u.Path = "/"
+ }
+ u.Path = filepath.Clean(u.Path)
+ }
+ return au.String() == bu.String()
+}
+
+// ExtractHostname returns hostname from URL
+func ExtractHostname(addr string) (string, error) {
+ u, err := url.Parse(addr)
+ if err != nil {
+ return "", err
+ }
+ return u.Hostname(), nil
+}
diff --git a/helm/internal/urlutil/urlutil_test.go b/helm/internal/urlutil/urlutil_test.go
new file mode 100644
index 000000000..82acc40fe
--- /dev/null
+++ b/helm/internal/urlutil/urlutil_test.go
@@ -0,0 +1,81 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package urlutil
+
+import "testing"
+
+func TestURLJoin(t *testing.T) {
+ tests := []struct {
+ name, url, expect string
+ paths []string
+ }{
+ {name: "URL, one path", url: "http://example.com", paths: []string{"hello"}, expect: "http://example.com/hello"},
+ {name: "Long URL, one path", url: "http://example.com/but/first", paths: []string{"slurm"}, expect: "http://example.com/but/first/slurm"},
+ {name: "URL, two paths", url: "http://example.com", paths: []string{"hello", "world"}, expect: "http://example.com/hello/world"},
+ {name: "URL, no paths", url: "http://example.com", paths: []string{}, expect: "http://example.com"},
+ {name: "basepath, two paths", url: "../example.com", paths: []string{"hello", "world"}, expect: "../example.com/hello/world"},
+ }
+
+ for _, tt := range tests {
+ if got, err := URLJoin(tt.url, tt.paths...); err != nil {
+ t.Errorf("%s: error %q", tt.name, err)
+ } else if got != tt.expect {
+ t.Errorf("%s: expected %q, got %q", tt.name, tt.expect, got)
+ }
+ }
+}
+
+func TestEqual(t *testing.T) {
+ for _, tt := range []struct {
+ a, b string
+ match bool
+ }{
+ {"http://example.com", "http://example.com", true},
+ {"http://example.com", "http://another.example.com", false},
+ {"https://example.com", "https://example.com", true},
+ {"http://example.com/", "http://example.com", true},
+ {"https://example.com", "http://example.com", false},
+ {"http://example.com/foo", "http://example.com/foo/", true},
+ {"http://example.com/foo//", "http://example.com/foo/", true},
+ {"http://example.com/./foo/", "http://example.com/foo/", true},
+ {"http://example.com/bar/../foo/", "http://example.com/foo/", true},
+ {"/foo", "/foo", true},
+ {"/foo", "/foo/", true},
+ {"/foo/.", "/foo/", true},
+ {"%/1234", "%/1234", true},
+ {"%/1234", "%/123", false},
+ {"/1234", "%/1234", false},
+ } {
+ if tt.match != Equal(tt.a, tt.b) {
+ t.Errorf("Expected %q==%q to be %t", tt.a, tt.b, tt.match)
+ }
+ }
+}
+
+func TestExtractHostname(t *testing.T) {
+ tests := map[string]string{
+ "http://example.com": "example.com",
+ "https://example.com/foo": "example.com",
+
+ "https://example.com:31337/not/with/a/bang/but/a/whimper": "example.com",
+ }
+ for start, expect := range tests {
+ if got, _ := ExtractHostname(start); got != expect {
+ t.Errorf("Got %q, expected %q", got, expect)
+ }
+ }
+}
diff --git a/helm/internal/version/clientgo.go b/helm/internal/version/clientgo.go
new file mode 100644
index 000000000..ab2a38fd5
--- /dev/null
+++ b/helm/internal/version/clientgo.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+ "fmt"
+ "runtime/debug"
+ "slices"
+
+ _ "k8s.io/client-go/kubernetes" // Force k8s.io/client-go to be included in the build
+)
+
+func K8sIOClientGoModVersion() (string, error) {
+ info, ok := debug.ReadBuildInfo()
+ if !ok {
+ return "", fmt.Errorf("failed to read build info")
+ }
+
+ idx := slices.IndexFunc(info.Deps, func(m *debug.Module) bool {
+ return m.Path == "k8s.io/client-go"
+ })
+
+ if idx == -1 {
+ return "", fmt.Errorf("k8s.io/client-go not found in build info")
+ }
+
+ m := info.Deps[idx]
+
+ return m.Version, nil
+}
diff --git a/helm/internal/version/clientgo_test.go b/helm/internal/version/clientgo_test.go
new file mode 100644
index 000000000..624c669af
--- /dev/null
+++ b/helm/internal/version/clientgo_test.go
@@ -0,0 +1,30 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestK8sClientGoModVersion(t *testing.T) {
+ // Unfortunately, test builds don't include debug info / module info
+ // So we expect "K8sIOClientGoModVersion" to return error
+ _, err := K8sIOClientGoModVersion()
+ require.ErrorContains(t, err, "k8s.io/client-go not found in build info")
+}
diff --git a/helm/internal/version/version.go b/helm/internal/version/version.go
new file mode 100644
index 000000000..3daf80893
--- /dev/null
+++ b/helm/internal/version/version.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+ "flag"
+ "fmt"
+ "log/slog"
+ "runtime"
+ "strings"
+ "testing"
+
+ "github.com/Masterminds/semver/v3"
+)
+
+var (
+ // version is the current version of Helm.
+ // Update this whenever making a new release.
+ // The version is of the format Major.Minor.Patch[-Prerelease][+BuildMetadata]
+ //
+ // Increment major number for new feature additions and behavioral changes.
+ // Increment minor number for bug fixes and performance enhancements.
+ version = "v4.1"
+
+ // metadata is extra build time data
+ metadata = ""
+ // gitCommit is the git sha1
+ gitCommit = ""
+ // gitTreeState is the state of the git tree
+ gitTreeState = ""
+)
+
+const (
+ kubeClientGoVersionTesting = "v1.20"
+)
+
+// BuildInfo describes the compile time information.
+type BuildInfo struct {
+ // Version is the current semver.
+ Version string `json:"version,omitempty"`
+ // GitCommit is the git sha1.
+ GitCommit string `json:"git_commit,omitempty"`
+ // GitTreeState is the state of the git tree.
+ GitTreeState string `json:"git_tree_state,omitempty"`
+ // GoVersion is the version of the Go compiler used.
+ GoVersion string `json:"go_version,omitempty"`
+ // KubeClientVersion is the version of client-go Helm was build with
+ KubeClientVersion string `json:"kube_client_version"`
+}
+
+// GetVersion returns the semver string of the version
+func GetVersion() string {
+ if metadata == "" {
+ return version
+ }
+ return version + "+" + metadata
+}
+
+// GetUserAgent returns a user agent for user with an HTTP client
+func GetUserAgent() string {
+ return "Helm/" + strings.TrimPrefix(GetVersion(), "v")
+}
+
+// Get returns build info
+func Get() BuildInfo {
+
+ makeKubeClientVersionString := func() string {
+ // Test builds don't include debug info / module info
+ // (And even if they did, we probably want a stable version during tests anyway)
+ // Return a default value for test builds
+ if testing.Testing() {
+ return kubeClientGoVersionTesting
+ }
+
+ vstr, err := K8sIOClientGoModVersion()
+ if err != nil {
+ slog.Error("failed to retrieve k8s.io/client-go version", slog.Any("error", err))
+ return ""
+ }
+
+ v, err := semver.NewVersion(vstr)
+ if err != nil {
+ slog.Error("unable to parse k8s.io/client-go version", slog.String("version", vstr), slog.Any("error", err))
+ return ""
+ }
+
+ kubeClientVersionMajor := v.Major() + 1
+ kubeClientVersionMinor := v.Minor()
+
+ return fmt.Sprintf("v%d.%d", kubeClientVersionMajor, kubeClientVersionMinor)
+ }
+
+ v := BuildInfo{
+ Version: GetVersion(),
+ GitCommit: gitCommit,
+ GitTreeState: gitTreeState,
+ GoVersion: runtime.Version(),
+ KubeClientVersion: makeKubeClientVersionString(),
+ }
+
+ // HACK(bacongobbler): strip out GoVersion during a test run for consistent test output
+ if flag.Lookup("test.v") != nil {
+ v.GoVersion = ""
+ }
+ return v
+}
diff --git a/helm/pkg/action/action.go b/helm/pkg/action/action.go
new file mode 100644
index 000000000..c2a27940f
--- /dev/null
+++ b/helm/pkg/action/action.go
@@ -0,0 +1,592 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "maps"
+ "os"
+ "path"
+ "path/filepath"
+ "slices"
+ "strings"
+ "sync"
+ "text/template"
+ "time"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "sigs.k8s.io/kustomize/kyaml/kio"
+ kyaml "sigs.k8s.io/kustomize/kyaml/yaml"
+
+ "helm.sh/helm/v4/internal/logging"
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/engine"
+ "helm.sh/helm/v4/pkg/kube"
+ "helm.sh/helm/v4/pkg/postrenderer"
+ "helm.sh/helm/v4/pkg/registry"
+ ri "helm.sh/helm/v4/pkg/release"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ "helm.sh/helm/v4/pkg/storage"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+// Timestamper is a function capable of producing a timestamp.Timestamper.
+//
+// By default, this is a time.Time function from the Helm time package. This can
+// be overridden for testing though, so that timestamps are predictable.
+var Timestamper = time.Now
+
+var (
+ // errMissingChart indicates that a chart was not provided.
+ errMissingChart = errors.New("no chart provided")
+ // errMissingRelease indicates that a release (name) was not provided.
+ errMissingRelease = errors.New("no release provided")
+ // errInvalidRevision indicates that an invalid release revision number was provided.
+ errInvalidRevision = errors.New("invalid release revision")
+ // errPending indicates that another instance of Helm is already applying an operation on a release.
+ errPending = errors.New("another operation (install/upgrade/rollback) is in progress")
+)
+
+type DryRunStrategy string
+
+const (
+ // DryRunNone indicates the client will make all mutating calls
+ DryRunNone DryRunStrategy = "none"
+
+ // DryRunClient, or client-side dry-run, indicates the client will avoid
+ // making calls to the server
+ DryRunClient DryRunStrategy = "client"
+
+ // DryRunServer, or server-side dry-run, indicates the client will send
+ // calls to the APIServer with the dry-run parameter to prevent persisting changes
+ DryRunServer DryRunStrategy = "server"
+)
+
+// Configuration injects the dependencies that all actions share.
+type Configuration struct {
+ // RESTClientGetter is an interface that loads Kubernetes clients.
+ RESTClientGetter RESTClientGetter
+
+ // Releases stores records of releases.
+ Releases *storage.Storage
+
+ // KubeClient is a Kubernetes API client.
+ KubeClient kube.Interface
+
+ // RegistryClient is a client for working with registries
+ RegistryClient *registry.Client
+
+ // Capabilities describes the capabilities of the Kubernetes cluster.
+ Capabilities *common.Capabilities
+
+ // CustomTemplateFuncs is defined by users to provide custom template funcs
+ CustomTemplateFuncs template.FuncMap
+
+ // HookOutputFunc called with container name and returns and expects writer that will receive the log output.
+ HookOutputFunc func(namespace, pod, container string) io.Writer
+
+ // Mutex is an exclusive lock for concurrent access to the action
+ mutex sync.Mutex
+
+ // Embed a LogHolder to provide logger functionality
+ logging.LogHolder
+}
+
+type ConfigurationOption func(c *Configuration)
+
+// Override the default logging handler
+// If unspecified, the default logger will be used
+func ConfigurationSetLogger(h slog.Handler) ConfigurationOption {
+ return func(c *Configuration) {
+ c.SetLogger(h)
+ }
+}
+
+func NewConfiguration(options ...ConfigurationOption) *Configuration {
+ c := &Configuration{}
+ c.SetLogger(slog.Default().Handler())
+
+ for _, o := range options {
+ o(c)
+ }
+
+ return c
+}
+
+const (
+ // filenameAnnotation is the annotation key used to store the original filename
+ // information in manifest annotations for post-rendering reconstruction.
+ filenameAnnotation = "postrenderer.helm.sh/postrender-filename"
+)
+
+// annotateAndMerge combines multiple YAML files into a single stream of documents,
+// adding filename annotations to each document for later reconstruction.
+func annotateAndMerge(files map[string]string) (string, error) {
+ var combinedManifests []*kyaml.RNode
+
+ // Get sorted filenames to ensure result is deterministic
+ fnames := slices.Sorted(maps.Keys(files))
+
+ for _, fname := range fnames {
+ content := files[fname]
+ // Skip partials and empty files.
+ if strings.HasPrefix(path.Base(fname), "_") || strings.TrimSpace(content) == "" {
+ continue
+ }
+
+ manifests, err := kio.ParseAll(content)
+ if err != nil {
+ return "", fmt.Errorf("parsing %s: %w", fname, err)
+ }
+ for _, manifest := range manifests {
+ if err := manifest.PipeE(kyaml.SetAnnotation(filenameAnnotation, fname)); err != nil {
+ return "", fmt.Errorf("annotating %s: %w", fname, err)
+ }
+ combinedManifests = append(combinedManifests, manifest)
+ }
+ }
+
+ merged, err := kio.StringAll(combinedManifests)
+ if err != nil {
+ return "", fmt.Errorf("writing merged docs: %w", err)
+ }
+ return merged, nil
+}
+
+// splitAndDeannotate reconstructs individual files from a merged YAML stream,
+// removing filename annotations and grouping documents by their original filenames.
+func splitAndDeannotate(postrendered string) (map[string]string, error) {
+ manifests, err := kio.ParseAll(postrendered)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing YAML: %w", err)
+ }
+
+ manifestsByFilename := make(map[string][]*kyaml.RNode)
+ for i, manifest := range manifests {
+ meta, err := manifest.GetMeta()
+ if err != nil {
+ return nil, fmt.Errorf("getting metadata: %w", err)
+ }
+ fname := meta.Annotations[filenameAnnotation]
+ if fname == "" {
+ fname = fmt.Sprintf("generated-by-postrender-%d.yaml", i)
+ }
+ if err := manifest.PipeE(kyaml.ClearAnnotation(filenameAnnotation)); err != nil {
+ return nil, fmt.Errorf("clearing filename annotation: %w", err)
+ }
+ manifestsByFilename[fname] = append(manifestsByFilename[fname], manifest)
+ }
+
+ reconstructed := make(map[string]string, len(manifestsByFilename))
+ for fname, docs := range manifestsByFilename {
+ fileContents, err := kio.StringAll(docs)
+ if err != nil {
+ return nil, fmt.Errorf("re-writing %s: %w", fname, err)
+ }
+ reconstructed[fname] = fileContents
+ }
+ return reconstructed, nil
+}
+
+// renderResources renders the templates in a chart
+//
+// TODO: This function is badly in need of a refactor.
+// TODO: As part of the refactor the duplicate code in cmd/helm/template.go should be removed
+//
+// This code has to do with writing files to disk.
+func (cfg *Configuration) renderResources(ch *chart.Chart, values common.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrenderer.PostRenderer, interactWithRemote, enableDNS, hideSecret bool) ([]*release.Hook, *bytes.Buffer, string, error) {
+ var hs []*release.Hook
+ b := bytes.NewBuffer(nil)
+
+ caps, err := cfg.getCapabilities()
+ if err != nil {
+ return hs, b, "", err
+ }
+
+ if ch.Metadata.KubeVersion != "" {
+ if !chartutil.IsCompatibleRange(ch.Metadata.KubeVersion, caps.KubeVersion.String()) {
+ return hs, b, "", fmt.Errorf("chart requires kubeVersion: %s which is incompatible with Kubernetes %s", ch.Metadata.KubeVersion, caps.KubeVersion.Version)
+ }
+ }
+
+ var files map[string]string
+ var err2 error
+
+ // A `helm template` should not talk to the remote cluster. However, commands with the flag
+ // `--dry-run` with the value of `false`, `none`, or `server` should try to interact with the cluster.
+ // It may break in interesting and exotic ways because other data (e.g. discovery) is mocked.
+ if interactWithRemote && cfg.RESTClientGetter != nil {
+ restConfig, err := cfg.RESTClientGetter.ToRESTConfig()
+ if err != nil {
+ return hs, b, "", err
+ }
+ e := engine.New(restConfig)
+ e.EnableDNS = enableDNS
+ e.CustomTemplateFuncs = cfg.CustomTemplateFuncs
+
+ files, err2 = e.Render(ch, values)
+ } else {
+ var e engine.Engine
+ e.EnableDNS = enableDNS
+ e.CustomTemplateFuncs = cfg.CustomTemplateFuncs
+
+ files, err2 = e.Render(ch, values)
+ }
+
+ if err2 != nil {
+ return hs, b, "", err2
+ }
+
+ // NOTES.txt gets rendered like all the other files, but because it's not a hook nor a resource,
+ // pull it out of here into a separate file so that we can actually use the output of the rendered
+ // text file. We have to spin through this map because the file contains path information, so we
+ // look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip
+ // it in the sortHooks.
+ var notesBuffer bytes.Buffer
+ for k, v := range files {
+ if strings.HasSuffix(k, notesFileSuffix) {
+ if subNotes || (k == path.Join(ch.Name(), "templates", notesFileSuffix)) {
+ // If buffer contains data, add newline before adding more
+ if notesBuffer.Len() > 0 {
+ notesBuffer.WriteString("\n")
+ }
+ notesBuffer.WriteString(v)
+ }
+ delete(files, k)
+ }
+ }
+ notes := notesBuffer.String()
+
+ if pr != nil {
+ // We need to send files to the post-renderer before sorting and splitting
+ // hooks from manifests. The post-renderer interface expects a stream of
+ // manifests (similar to what tools like Kustomize and kubectl expect), whereas
+ // the sorter uses filenames.
+ // Here, we merge the documents into a stream, post-render them, and then split
+ // them back into a map of filename -> content.
+
+ // Merge files as stream of documents for sending to post renderer
+ merged, err := annotateAndMerge(files)
+ if err != nil {
+ return hs, b, notes, fmt.Errorf("error merging manifests: %w", err)
+ }
+
+ // Run the post renderer
+ postRendered, err := pr.Run(bytes.NewBufferString(merged))
+ if err != nil {
+ return hs, b, notes, fmt.Errorf("error while running post render on files: %w", err)
+ }
+
+ // Use the file list and contents received from the post renderer
+ files, err = splitAndDeannotate(postRendered.String())
+ if err != nil {
+ return hs, b, notes, fmt.Errorf("error while parsing post rendered output: %w", err)
+ }
+ }
+
+ // Sort hooks, manifests, and partials. Only hooks and manifests are returned,
+ // as partials are not used after renderer.Render. Empty manifests are also
+ // removed here.
+ hs, manifests, err := releaseutil.SortManifests(files, nil, releaseutil.InstallOrder)
+ if err != nil {
+ // By catching parse errors here, we can prevent bogus releases from going
+ // to Kubernetes.
+ //
+ // We return the files as a big blob of data to help the user debug parser
+ // errors.
+ for name, content := range files {
+ if strings.TrimSpace(content) == "" {
+ continue
+ }
+ fmt.Fprintf(b, "---\n# Source: %s\n%s\n", name, content)
+ }
+ return hs, b, "", err
+ }
+
+ // Aggregate all valid manifests into one big doc.
+ fileWritten := make(map[string]bool)
+
+ if includeCrds {
+ for _, crd := range ch.CRDObjects() {
+ if outputDir == "" {
+ fmt.Fprintf(b, "---\n# Source: %s\n%s\n", crd.Filename, string(crd.File.Data[:]))
+ } else {
+ err = writeToFile(outputDir, crd.Filename, string(crd.File.Data[:]), fileWritten[crd.Filename])
+ if err != nil {
+ return hs, b, "", err
+ }
+ fileWritten[crd.Filename] = true
+ }
+ }
+ }
+
+ for _, m := range manifests {
+ if outputDir == "" {
+ if hideSecret && m.Head.Kind == "Secret" && m.Head.Version == "v1" {
+ fmt.Fprintf(b, "---\n# Source: %s\n# HIDDEN: The Secret output has been suppressed\n", m.Name)
+ } else {
+ fmt.Fprintf(b, "---\n# Source: %s\n%s\n", m.Name, m.Content)
+ }
+ } else {
+ newDir := outputDir
+ if useReleaseName {
+ newDir = filepath.Join(outputDir, releaseName)
+ }
+ // NOTE: We do not have to worry about the post-renderer because
+ // output dir is only used by `helm template`. In the next major
+ // release, we should move this logic to template only as it is not
+ // used by install or upgrade
+ err = writeToFile(newDir, m.Name, m.Content, fileWritten[m.Name])
+ if err != nil {
+ return hs, b, "", err
+ }
+ fileWritten[m.Name] = true
+ }
+ }
+
+ return hs, b, notes, nil
+}
+
+// RESTClientGetter gets the rest client
+type RESTClientGetter interface {
+ ToRESTConfig() (*rest.Config, error)
+ ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error)
+ ToRESTMapper() (meta.RESTMapper, error)
+}
+
+// capabilities builds a Capabilities from discovery information.
+func (cfg *Configuration) getCapabilities() (*common.Capabilities, error) {
+ if cfg.Capabilities != nil {
+ return cfg.Capabilities, nil
+ }
+ dc, err := cfg.RESTClientGetter.ToDiscoveryClient()
+ if err != nil {
+ return nil, fmt.Errorf("could not get Kubernetes discovery client: %w", err)
+ }
+ // force a discovery cache invalidation to always fetch the latest server version/capabilities.
+ dc.Invalidate()
+ kubeVersion, err := dc.ServerVersion()
+ if err != nil {
+ return nil, fmt.Errorf("could not get server version from Kubernetes: %w", err)
+ }
+ // Issue #6361:
+ // Client-Go emits an error when an API service is registered but unimplemented.
+ // We trap that error here and print a warning. But since the discovery client continues
+ // building the API object, it is correctly populated with all valid APIs.
+ // See https://github.com/kubernetes/kubernetes/issues/72051#issuecomment-521157642
+ apiVersions, err := GetVersionSet(dc)
+ if err != nil {
+ if discovery.IsGroupDiscoveryFailedError(err) {
+ cfg.Logger().Warn("the kubernetes server has an orphaned API service", slog.Any("error", err))
+ cfg.Logger().Warn("to fix this, kubectl delete apiservice ")
+ } else {
+ return nil, fmt.Errorf("could not get apiVersions from Kubernetes: %w", err)
+ }
+ }
+
+ cfg.Capabilities = &common.Capabilities{
+ APIVersions: apiVersions,
+ KubeVersion: common.KubeVersion{
+ Version: kubeVersion.GitVersion,
+ Major: kubeVersion.Major,
+ Minor: kubeVersion.Minor,
+ },
+ HelmVersion: common.DefaultCapabilities.HelmVersion,
+ }
+ return cfg.Capabilities, nil
+}
+
+// KubernetesClientSet creates a new kubernetes ClientSet based on the configuration
+func (cfg *Configuration) KubernetesClientSet() (kubernetes.Interface, error) {
+ conf, err := cfg.RESTClientGetter.ToRESTConfig()
+ if err != nil {
+ return nil, fmt.Errorf("unable to generate config for kubernetes client: %w", err)
+ }
+
+ return kubernetes.NewForConfig(conf)
+}
+
+// Now generates a timestamp
+//
+// If the configuration has a Timestamper on it, that will be used.
+// Otherwise, this will use time.Now().
+func (cfg *Configuration) Now() time.Time {
+ return Timestamper()
+}
+
+func (cfg *Configuration) releaseContent(name string, version int) (ri.Releaser, error) {
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, fmt.Errorf("releaseContent: Release name is invalid: %s", name)
+ }
+
+ if version <= 0 {
+ return cfg.Releases.Last(name)
+ }
+
+ return cfg.Releases.Get(name, version)
+}
+
+// GetVersionSet retrieves a set of available k8s API versions
+func GetVersionSet(client discovery.ServerResourcesInterface) (common.VersionSet, error) {
+ groups, resources, err := client.ServerGroupsAndResources()
+ if err != nil && !discovery.IsGroupDiscoveryFailedError(err) {
+ return common.DefaultVersionSet, fmt.Errorf("could not get apiVersions from Kubernetes: %w", err)
+ }
+
+ // FIXME: The Kubernetes test fixture for cli appears to always return nil
+ // for calls to Discovery().ServerGroupsAndResources(). So in this case, we
+ // return the default API list. This is also a safe value to return in any
+ // other odd-ball case.
+ if len(groups) == 0 && len(resources) == 0 {
+ return common.DefaultVersionSet, nil
+ }
+
+ versionMap := make(map[string]interface{})
+ var versions []string
+
+ // Extract the groups
+ for _, g := range groups {
+ for _, gv := range g.Versions {
+ versionMap[gv.GroupVersion] = struct{}{}
+ }
+ }
+
+ // Extract the resources
+ var id string
+ var ok bool
+ for _, r := range resources {
+ for _, rl := range r.APIResources {
+
+ // A Kind at a GroupVersion can show up more than once. We only want
+ // it displayed once in the final output.
+ id = path.Join(r.GroupVersion, rl.Kind)
+ if _, ok = versionMap[id]; !ok {
+ versionMap[id] = struct{}{}
+ }
+ }
+ }
+
+ // Convert to a form that NewVersionSet can use
+ for k := range versionMap {
+ versions = append(versions, k)
+ }
+
+ return common.VersionSet(versions), nil
+}
+
+// recordRelease with an update operation in case reuse has been set.
+func (cfg *Configuration) recordRelease(r *release.Release) {
+ if err := cfg.Releases.Update(r); err != nil {
+ cfg.Logger().Warn(
+ "failed to update release",
+ slog.String("name", r.Name),
+ slog.Int("revision", r.Version),
+ slog.Any("error", err),
+ )
+ }
+}
+
+// Init initializes the action configuration
+func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string) error {
+ kc := kube.New(getter)
+ kc.SetLogger(cfg.Logger().Handler())
+
+ lazyClient := &lazyClient{
+ namespace: namespace,
+ clientFn: kc.Factory.KubernetesClientSet,
+ }
+
+ var store *storage.Storage
+ switch helmDriver {
+ case "secret", "secrets", "":
+ d := driver.NewSecrets(newSecretClient(lazyClient))
+ d.SetLogger(cfg.Logger().Handler())
+ store = storage.Init(d)
+ case "configmap", "configmaps":
+ d := driver.NewConfigMaps(newConfigMapClient(lazyClient))
+ d.SetLogger(cfg.Logger().Handler())
+ store = storage.Init(d)
+ case "memory":
+ var d *driver.Memory
+ if cfg.Releases != nil {
+ if mem, ok := cfg.Releases.Driver.(*driver.Memory); ok {
+ // This function can be called more than once (e.g., helm list --all-namespaces).
+ // If a memory driver was already initialized, reuse it but set the possibly new namespace.
+ // We reuse it in case some releases where already created in the existing memory driver.
+ d = mem
+ }
+ }
+ if d == nil {
+ d = driver.NewMemory()
+ }
+ d.SetLogger(cfg.Logger().Handler())
+ d.SetNamespace(namespace)
+ store = storage.Init(d)
+ case "sql":
+ d, err := driver.NewSQL(
+ os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"),
+ namespace,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to instantiate SQL driver: %w", err)
+ }
+ d.SetLogger(cfg.Logger().Handler())
+ store = storage.Init(d)
+ default:
+ return fmt.Errorf("unknown driver %q", helmDriver)
+ }
+
+ cfg.RESTClientGetter = getter
+ cfg.KubeClient = kc
+ cfg.Releases = store
+ cfg.HookOutputFunc = func(_, _, _ string) io.Writer { return io.Discard }
+
+ return nil
+}
+
+// SetHookOutputFunc sets the HookOutputFunc on the Configuration.
+func (cfg *Configuration) SetHookOutputFunc(hookOutputFunc func(_, _, _ string) io.Writer) {
+ cfg.HookOutputFunc = hookOutputFunc
+}
+
+func determineReleaseSSApplyMethod(serverSideApply bool) release.ApplyMethod {
+ if serverSideApply {
+ return release.ApplyMethodServerSideApply
+ }
+ return release.ApplyMethodClientSideApply
+}
+
+// isDryRun returns true if the strategy is set to run as a DryRun
+func isDryRun(strategy DryRunStrategy) bool {
+ return strategy == DryRunClient || strategy == DryRunServer
+}
+
+// interactWithServer determine whether or not to interact with a remote Kubernetes server
+func interactWithServer(strategy DryRunStrategy) bool {
+ return strategy == DryRunNone || strategy == DryRunServer
+}
diff --git a/helm/pkg/action/action_test.go b/helm/pkg/action/action_test.go
new file mode 100644
index 000000000..85ee42d64
--- /dev/null
+++ b/helm/pkg/action/action_test.go
@@ -0,0 +1,976 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package action
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "log/slog"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ fakeclientset "k8s.io/client-go/kubernetes/fake"
+
+ "helm.sh/helm/v4/internal/logging"
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/kube"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ "helm.sh/helm/v4/pkg/registry"
+ rcommon "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ "helm.sh/helm/v4/pkg/storage"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+var verbose = flag.Bool("test.log", false, "enable test logging (debug by default)")
+
+func actionConfigFixture(t *testing.T) *Configuration {
+ t.Helper()
+ return actionConfigFixtureWithDummyResources(t, nil)
+}
+
+func actionConfigFixtureWithDummyResources(t *testing.T, dummyResources kube.ResourceList) *Configuration {
+ t.Helper()
+
+ logger := logging.NewLogger(func() bool {
+ return *verbose
+ })
+ slog.SetDefault(logger)
+
+ registryClient, err := registry.NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return &Configuration{
+ Releases: storage.Init(driver.NewMemory()),
+ KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: dummyResources},
+ Capabilities: common.DefaultCapabilities,
+ RegistryClient: registryClient,
+ }
+}
+
+var manifestWithHook = `kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ "helm.sh/hook": post-install,pre-delete,post-upgrade
+data:
+ name: value`
+
+var manifestWithTestHook = `kind: Pod
+ metadata:
+ name: finding-nemo,
+ annotations:
+ "helm.sh/hook": test
+ spec:
+ containers:
+ - name: nemo-test
+ image: fake-image
+ cmd: fake-command
+ `
+
+var rbacManifests = `apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: schedule-agents
+rules:
+- apiGroups: [""]
+ resources: ["pods", "pods/exec", "pods/log"]
+ verbs: ["*"]
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: schedule-agents
+ namespace: {{ default .Release.Namespace}}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: schedule-agents
+subjects:
+- kind: ServiceAccount
+ name: schedule-agents
+ namespace: {{ .Release.Namespace }}
+`
+
+type chartOptions struct {
+ *chart.Chart
+}
+
+type chartOption func(*chartOptions)
+
+func buildChart(opts ...chartOption) *chart.Chart {
+ modTime := time.Now()
+ defaultTemplates := []*common.File{
+ {Name: "templates/hello", ModTime: modTime, Data: []byte("hello: world")},
+ {Name: "templates/hooks", ModTime: modTime, Data: []byte(manifestWithHook)},
+ }
+ return buildChartWithTemplates(defaultTemplates, opts...)
+}
+
+func buildChartWithTemplates(templates []*common.File, opts ...chartOption) *chart.Chart {
+ c := &chartOptions{
+ Chart: &chart.Chart{
+ // TODO: This should be more complete.
+ Metadata: &chart.Metadata{
+ APIVersion: "v1",
+ Name: "hello",
+ Version: "0.1.0",
+ },
+ Templates: templates,
+ },
+ }
+
+ for _, opt := range opts {
+ opt(c)
+ }
+ return c.Chart
+}
+
+func withName(name string) chartOption {
+ return func(opts *chartOptions) {
+ opts.Metadata.Name = name
+ }
+}
+
+func withSampleValues() chartOption {
+ values := map[string]interface{}{
+ "someKey": "someValue",
+ "nestedKey": map[string]interface{}{
+ "simpleKey": "simpleValue",
+ "anotherNestedKey": map[string]interface{}{
+ "yetAnotherNestedKey": map[string]interface{}{
+ "youReadyForAnotherNestedKey": "No",
+ },
+ },
+ },
+ }
+ return func(opts *chartOptions) {
+ opts.Values = values
+ }
+}
+
+func withValues(values map[string]interface{}) chartOption {
+ return func(opts *chartOptions) {
+ opts.Values = values
+ }
+}
+
+func withNotes(notes string) chartOption {
+ return func(opts *chartOptions) {
+ opts.Templates = append(opts.Templates, &common.File{
+ Name: "templates/NOTES.txt",
+ ModTime: time.Now(),
+ Data: []byte(notes),
+ })
+ }
+}
+
+func withDependency(dependencyOpts ...chartOption) chartOption {
+ return func(opts *chartOptions) {
+ opts.AddDependency(buildChart(dependencyOpts...))
+ }
+}
+
+func withMetadataDependency(dependency chart.Dependency) chartOption {
+ return func(opts *chartOptions) {
+ opts.Metadata.Dependencies = append(opts.Metadata.Dependencies, &dependency)
+ }
+}
+
+func withFile(file common.File) chartOption {
+ return func(opts *chartOptions) {
+ opts.Files = append(opts.Files, &file)
+ }
+}
+
+func withSampleTemplates() chartOption {
+ return func(opts *chartOptions) {
+ modTime := time.Now()
+ sampleTemplates := []*common.File{
+ // This adds basic templates and partials.
+ {Name: "templates/goodbye", ModTime: modTime, Data: []byte("goodbye: world")},
+ {Name: "templates/empty", ModTime: modTime, Data: []byte("")},
+ {Name: "templates/with-partials", ModTime: modTime, Data: []byte(`hello: {{ template "_planet" . }}`)},
+ {Name: "templates/partials/_planet", ModTime: modTime, Data: []byte(`{{define "_planet"}}Earth{{end}}`)},
+ }
+ opts.Templates = append(opts.Templates, sampleTemplates...)
+ }
+}
+
+func withSampleSecret() chartOption {
+ return func(opts *chartOptions) {
+ sampleSecret := &common.File{Name: "templates/secret.yaml", ModTime: time.Now(), Data: []byte("apiVersion: v1\nkind: Secret\n")}
+ opts.Templates = append(opts.Templates, sampleSecret)
+ }
+}
+
+func withSampleIncludingIncorrectTemplates() chartOption {
+ return func(opts *chartOptions) {
+ modTime := time.Now()
+ sampleTemplates := []*common.File{
+ // This adds basic templates and partials.
+ {Name: "templates/goodbye", ModTime: modTime, Data: []byte("goodbye: world")},
+ {Name: "templates/empty", ModTime: modTime, Data: []byte("")},
+ {Name: "templates/incorrect", ModTime: modTime, Data: []byte("{{ .Values.bad.doh }}")},
+ {Name: "templates/with-partials", ModTime: modTime, Data: []byte(`hello: {{ template "_planet" . }}`)},
+ {Name: "templates/partials/_planet", ModTime: modTime, Data: []byte(`{{define "_planet"}}Earth{{end}}`)},
+ }
+ opts.Templates = append(opts.Templates, sampleTemplates...)
+ }
+}
+
+func withMultipleManifestTemplate() chartOption {
+ return func(opts *chartOptions) {
+ sampleTemplates := []*common.File{
+ {Name: "templates/rbac", ModTime: time.Now(), Data: []byte(rbacManifests)},
+ }
+ opts.Templates = append(opts.Templates, sampleTemplates...)
+ }
+}
+
+func withKube(version string) chartOption {
+ return func(opts *chartOptions) {
+ opts.Metadata.KubeVersion = version
+ }
+}
+
+// releaseStub creates a release stub, complete with the chartStub as its chart.
+func releaseStub() *release.Release {
+ return namedReleaseStub("angry-panda", rcommon.StatusDeployed)
+}
+
+func namedReleaseStub(name string, status rcommon.Status) *release.Release {
+ now := time.Now()
+ return &release.Release{
+ Name: name,
+ Info: &release.Info{
+ FirstDeployed: now,
+ LastDeployed: now,
+ Status: status,
+ Description: "Named Release Stub",
+ },
+ Chart: buildChart(withSampleTemplates()),
+ Config: map[string]interface{}{"name": "value"},
+ Version: 1,
+ Hooks: []*release.Hook{
+ {
+ Name: "test-cm",
+ Kind: "ConfigMap",
+ Path: "test-cm",
+ Manifest: manifestWithHook,
+ Events: []release.HookEvent{
+ release.HookPostInstall,
+ release.HookPreDelete,
+ },
+ },
+ {
+ Name: "finding-nemo",
+ Kind: "Pod",
+ Path: "finding-nemo",
+ Manifest: manifestWithTestHook,
+ Events: []release.HookEvent{
+ release.HookTest,
+ },
+ },
+ },
+ }
+}
+
+func TestConfiguration_Init(t *testing.T) {
+ tests := []struct {
+ name string
+ helmDriver string
+ expectedDriverType interface{}
+ expectErr bool
+ errMsg string
+ }{
+ {
+ name: "Test secret driver",
+ helmDriver: "secret",
+ expectedDriverType: &driver.Secrets{},
+ },
+ {
+ name: "Test secrets driver",
+ helmDriver: "secrets",
+ expectedDriverType: &driver.Secrets{},
+ },
+ {
+ name: "Test empty driver",
+ helmDriver: "",
+ expectedDriverType: &driver.Secrets{},
+ },
+ {
+ name: "Test configmap driver",
+ helmDriver: "configmap",
+ expectedDriverType: &driver.ConfigMaps{},
+ },
+ {
+ name: "Test configmaps driver",
+ helmDriver: "configmaps",
+ expectedDriverType: &driver.ConfigMaps{},
+ },
+ {
+ name: "Test memory driver",
+ helmDriver: "memory",
+ expectedDriverType: &driver.Memory{},
+ },
+ {
+ name: "Test sql driver",
+ helmDriver: "sql",
+ expectErr: true,
+ errMsg: "unable to instantiate SQL driver",
+ },
+ {
+ name: "Test unknown driver",
+ helmDriver: "someDriver",
+ expectErr: true,
+ errMsg: fmt.Sprintf("unknown driver %q", "someDriver"),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := NewConfiguration()
+
+ actualErr := cfg.Init(nil, "default", tt.helmDriver)
+ if tt.expectErr {
+ assert.Error(t, actualErr)
+ assert.Contains(t, actualErr.Error(), tt.errMsg)
+ } else {
+ assert.NoError(t, actualErr)
+ assert.IsType(t, tt.expectedDriverType, cfg.Releases.Driver)
+ }
+ })
+ }
+}
+
+func TestGetVersionSet(t *testing.T) {
+ client := fakeclientset.NewClientset()
+
+ vs, err := GetVersionSet(client.Discovery())
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !vs.Has("v1") {
+ t.Errorf("Expected supported versions to at least include v1.")
+ }
+ if vs.Has("nosuchversion/v1") {
+ t.Error("Non-existent version is reported found.")
+ }
+}
+
+// Mock PostRenderer for testing
+type mockPostRenderer struct {
+ shouldError bool
+ transform func(string) string
+}
+
+func (m *mockPostRenderer) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) {
+ if m.shouldError {
+ return nil, errors.New("mock post-renderer error")
+ }
+
+ content := renderedManifests.String()
+ if m.transform != nil {
+ content = m.transform(content)
+ }
+
+ return bytes.NewBufferString(content), nil
+}
+
+func TestAnnotateAndMerge(t *testing.T) {
+ tests := []struct {
+ name string
+ files map[string]string
+ expectedError string
+ expected string
+ }{
+ {
+ name: "no files",
+ files: map[string]string{},
+ expected: "",
+ },
+ {
+ name: "single file with single manifest",
+ files: map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value`,
+ },
+ expected: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/configmap.yaml'
+data:
+ key: value
+`,
+ },
+ {
+ name: "multiple files with multiple manifests",
+ files: map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value`,
+ "templates/secret.yaml": `apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+data:
+ password: dGVzdA==`,
+ },
+ expected: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/configmap.yaml'
+data:
+ key: value
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/secret.yaml'
+data:
+ password: dGVzdA==
+`,
+ },
+ {
+ name: "file with multiple manifests",
+ files: map[string]string{
+ "templates/multi.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+data:
+ key: value1
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm2
+data:
+ key: value2`,
+ },
+ expected: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/multi.yaml'
+data:
+ key: value1
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm2
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/multi.yaml'
+data:
+ key: value2
+`,
+ },
+ {
+ name: "partials and empty files are removed",
+ files: map[string]string{
+ "templates/cm.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+`,
+ "templates/_partial.tpl": `
+{{-define name}}
+ {{- "abracadabra"}}
+{{- end -}}`,
+ "templates/empty.yaml": ``,
+ },
+ expected: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+ annotations:
+ postrenderer.helm.sh/postrender-filename: 'templates/cm.yaml'
+`,
+ },
+ {
+ name: "empty file",
+ files: map[string]string{
+ "templates/empty.yaml": "",
+ },
+ expected: ``,
+ },
+ {
+ name: "invalid yaml",
+ files: map[string]string{
+ "templates/invalid.yaml": `invalid: yaml: content:
+ - malformed`,
+ },
+ expectedError: "parsing templates/invalid.yaml",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ merged, err := annotateAndMerge(tt.files)
+
+ if tt.expectedError != "" {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), tt.expectedError)
+ } else {
+ assert.NoError(t, err)
+ assert.NotNil(t, merged)
+ assert.Equal(t, tt.expected, merged)
+ }
+ })
+ }
+}
+
+func TestSplitAndDeannotate(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expectedFiles map[string]string
+ expectedError string
+ }{
+ {
+ name: "single annotated manifest",
+ input: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/configmap.yaml
+data:
+ key: value`,
+ expectedFiles: map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value
+`,
+ },
+ },
+ {
+ name: "multiple manifests with different filenames",
+ input: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/configmap.yaml
+data:
+ key: value
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/secret.yaml
+data:
+ password: dGVzdA==`,
+ expectedFiles: map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value
+`,
+ "templates/secret.yaml": `apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+data:
+ password: dGVzdA==
+`,
+ },
+ },
+ {
+ name: "multiple manifests with same filename",
+ input: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/multi.yaml
+data:
+ key: value1
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm2
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/multi.yaml
+data:
+ key: value2`,
+ expectedFiles: map[string]string{
+ "templates/multi.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+data:
+ key: value1
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm2
+data:
+ key: value2
+`,
+ },
+ },
+ {
+ name: "manifest with other annotations",
+ input: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ postrenderer.helm.sh/postrender-filename: templates/configmap.yaml
+ other-annotation: should-remain
+data:
+ key: value`,
+ expectedFiles: map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+ annotations:
+ other-annotation: should-remain
+data:
+ key: value
+`,
+ },
+ },
+ {
+ name: "invalid yaml input",
+ input: "invalid: yaml: content:",
+ expectedError: "error parsing YAML: MalformedYAMLError",
+ },
+ {
+ name: "manifest without filename annotation",
+ input: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value`,
+ expectedFiles: map[string]string{
+ "generated-by-postrender-0.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value
+`,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ files, err := splitAndDeannotate(tt.input)
+
+ if tt.expectedError != "" {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), tt.expectedError)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, len(tt.expectedFiles), len(files))
+
+ for expectedFile, expectedContent := range tt.expectedFiles {
+ actualContent, exists := files[expectedFile]
+ assert.True(t, exists, "Expected file %s not found", expectedFile)
+ assert.Equal(t, expectedContent, actualContent)
+ }
+ }
+ })
+ }
+}
+
+func TestAnnotateAndMerge_SplitAndDeannotate_Roundtrip(t *testing.T) {
+ // Test that merge/split operations are symmetric
+ originalFiles := map[string]string{
+ "templates/configmap.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ key: value`,
+ "templates/secret.yaml": `apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+data:
+ password: dGVzdA==`,
+ "templates/multi.yaml": `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm1
+data:
+ key: value1
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm2
+data:
+ key: value2`,
+ }
+
+ // Merge and annotate
+ merged, err := annotateAndMerge(originalFiles)
+ require.NoError(t, err)
+
+ // Split and deannotate
+ reconstructed, err := splitAndDeannotate(merged)
+ require.NoError(t, err)
+
+ // Compare the results
+ assert.Equal(t, len(originalFiles), len(reconstructed))
+ for filename, originalContent := range originalFiles {
+ reconstructedContent, exists := reconstructed[filename]
+ assert.True(t, exists, "File %s should exist in reconstructed files", filename)
+
+ // Normalize whitespace for comparison since YAML processing might affect formatting
+ normalizeContent := func(content string) string {
+ return strings.TrimSpace(strings.ReplaceAll(content, "\r\n", "\n"))
+ }
+
+ assert.Equal(t, normalizeContent(originalContent), normalizeContent(reconstructedContent))
+ }
+}
+
+func TestRenderResources_PostRenderer_Success(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ // Create a simple mock post-renderer
+ mockPR := &mockPostRenderer{
+ transform: func(content string) string {
+ content = strings.ReplaceAll(content, "hello", "yellow")
+ content = strings.ReplaceAll(content, "goodbye", "foodpie")
+ return strings.ReplaceAll(content, "test-cm", "test-cm-postrendered")
+ },
+ }
+
+ ch := buildChart(withSampleTemplates())
+ values := map[string]interface{}{}
+
+ hooks, buf, notes, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ mockPR, false, false, false,
+ )
+
+ assert.NoError(t, err)
+ assert.NotNil(t, hooks)
+ assert.NotNil(t, buf)
+ assert.Equal(t, "", notes)
+ expectedBuf := `---
+# Source: yellow/templates/foodpie
+foodpie: world
+---
+# Source: yellow/templates/with-partials
+yellow: Earth
+---
+# Source: yellow/templates/yellow
+yellow: world
+`
+ expectedHook := `kind: ConfigMap
+metadata:
+ name: test-cm-postrendered
+ annotations:
+ "helm.sh/hook": post-install,pre-delete,post-upgrade
+data:
+ name: value`
+
+ assert.Equal(t, expectedBuf, buf.String())
+ assert.Len(t, hooks, 1)
+ assert.Equal(t, expectedHook, hooks[0].Manifest)
+}
+
+func TestRenderResources_PostRenderer_Error(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ // Create a post-renderer that returns an error
+ mockPR := &mockPostRenderer{
+ shouldError: true,
+ }
+
+ ch := buildChart(withSampleTemplates())
+ values := map[string]interface{}{}
+
+ _, _, _, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ mockPR, false, false, false,
+ )
+
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "error while running post render on files")
+}
+
+func TestRenderResources_PostRenderer_MergeError(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ // Create a mock post-renderer
+ mockPR := &mockPostRenderer{}
+
+ // Create a chart with invalid YAML that would cause AnnotateAndMerge to fail
+ ch := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: "v1",
+ Name: "test-chart",
+ Version: "0.1.0",
+ },
+ Templates: []*common.File{
+ {Name: "templates/invalid", ModTime: time.Now(), Data: []byte("invalid: yaml: content:")},
+ },
+ }
+ values := map[string]interface{}{}
+
+ _, _, _, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ mockPR, false, false, false,
+ )
+
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "error merging manifests")
+}
+
+func TestRenderResources_PostRenderer_SplitError(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ // Create a post-renderer that returns invalid YAML
+ mockPR := &mockPostRenderer{
+ transform: func(_ string) string {
+ return "invalid: yaml: content:"
+ },
+ }
+
+ ch := buildChart(withSampleTemplates())
+ values := map[string]interface{}{}
+
+ _, _, _, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ mockPR, false, false, false,
+ )
+
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "error while parsing post rendered output: error parsing YAML: MalformedYAMLError:")
+}
+
+func TestRenderResources_PostRenderer_Integration(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ mockPR := &mockPostRenderer{
+ transform: func(content string) string {
+ return strings.ReplaceAll(content, "metadata:", "color: blue\nmetadata:")
+ },
+ }
+
+ ch := buildChart(withSampleTemplates())
+ values := map[string]interface{}{}
+
+ hooks, buf, notes, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ mockPR, false, false, false,
+ )
+
+ assert.NoError(t, err)
+ assert.NotNil(t, hooks)
+ assert.NotNil(t, buf)
+ assert.Equal(t, "", notes) // Notes should be empty for this test
+
+ // Verify that the post-renderer modifications are present in the output
+ output := buf.String()
+ expected := `---
+# Source: hello/templates/goodbye
+goodbye: world
+color: blue
+---
+# Source: hello/templates/hello
+hello: world
+color: blue
+---
+# Source: hello/templates/with-partials
+hello: Earth
+color: blue
+`
+ assert.Contains(t, output, "color: blue")
+ assert.Equal(t, 3, strings.Count(output, "color: blue"))
+ assert.Equal(t, expected, output)
+}
+
+func TestRenderResources_NoPostRenderer(t *testing.T) {
+ cfg := actionConfigFixture(t)
+
+ ch := buildChart(withSampleTemplates())
+ values := map[string]interface{}{}
+
+ hooks, buf, notes, err := cfg.renderResources(
+ ch, values, "test-release", "", false, false, false,
+ nil, false, false, false,
+ )
+
+ assert.NoError(t, err)
+ assert.NotNil(t, hooks)
+ assert.NotNil(t, buf)
+ assert.Equal(t, "", notes)
+}
+
+func TestDetermineReleaseSSAApplyMethod(t *testing.T) {
+ assert.Equal(t, release.ApplyMethodClientSideApply, determineReleaseSSApplyMethod(false))
+ assert.Equal(t, release.ApplyMethodServerSideApply, determineReleaseSSApplyMethod(true))
+}
+
+func TestIsDryRun(t *testing.T) {
+ assert.False(t, isDryRun(DryRunNone))
+ assert.True(t, isDryRun(DryRunClient))
+ assert.True(t, isDryRun(DryRunServer))
+}
+
+func TestInteractWithServer(t *testing.T) {
+ assert.True(t, interactWithServer(DryRunNone))
+ assert.False(t, interactWithServer(DryRunClient))
+ assert.True(t, interactWithServer(DryRunServer))
+}
diff --git a/helm/pkg/action/dependency.go b/helm/pkg/action/dependency.go
new file mode 100644
index 000000000..b12887bde
--- /dev/null
+++ b/helm/pkg/action/dependency.go
@@ -0,0 +1,237 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/gosuri/uitable"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+)
+
+// Dependency is the action for building a given chart's dependency tree.
+//
+// It provides the implementation of 'helm dependency' and its respective subcommands.
+type Dependency struct {
+ Verify bool
+ Keyring string
+ SkipRefresh bool
+ ColumnWidth uint
+ Username string
+ Password string
+ CertFile string
+ KeyFile string
+ CaFile string
+ InsecureSkipTLSVerify bool
+ PlainHTTP bool
+}
+
+// NewDependency creates a new Dependency object with the given configuration.
+func NewDependency() *Dependency {
+ return &Dependency{
+ ColumnWidth: 80,
+ }
+}
+
+// List executes 'helm dependency list'.
+func (d *Dependency) List(chartpath string, out io.Writer) error {
+ c, err := loader.Load(chartpath)
+ if err != nil {
+ return err
+ }
+
+ if c.Metadata.Dependencies == nil {
+ fmt.Fprintf(out, "WARNING: no dependencies at %s\n", filepath.Join(chartpath, "charts"))
+ return nil
+ }
+
+ d.printDependencies(chartpath, out, c)
+ fmt.Fprintln(out)
+ d.printMissing(chartpath, out, c.Metadata.Dependencies)
+ return nil
+}
+
+// dependencyStatus returns a string describing the status of a dependency viz a viz the parent chart.
+func (d *Dependency) dependencyStatus(chartpath string, dep *chart.Dependency, parent *chart.Chart) string {
+ filename := fmt.Sprintf("%s-%s.tgz", dep.Name, "*")
+
+ // If a chart is unpacked, this will check the unpacked chart's `charts/` directory for tarballs.
+ // Technically, this is COMPLETELY unnecessary, and should be removed in Helm 4. It is here
+ // to preserved backward compatibility. In Helm 2/3, there is a "difference" between
+ // the tgz version (which outputs "ok" if it unpacks) and the loaded version (which outputs
+ // "unpacked"). Early in Helm 2's history, this would have made a difference. But it no
+ // longer does. However, since this code shipped with Helm 3, the output must remain stable
+ // until Helm 4.
+ switch archives, err := filepath.Glob(filepath.Join(chartpath, "charts", filename)); {
+ case err != nil:
+ return "bad pattern"
+ case len(archives) > 1:
+ // See if the second part is a SemVer
+ found := []string{}
+ for _, arc := range archives {
+ // we need to trip the prefix dirs and the extension off.
+ filename = strings.TrimSuffix(filepath.Base(arc), ".tgz")
+ maybeVersion := strings.TrimPrefix(filename, fmt.Sprintf("%s-", dep.Name))
+
+ if _, err := semver.StrictNewVersion(maybeVersion); err == nil {
+ // If the version parsed without an error, it is possibly a valid
+ // version.
+ found = append(found, arc)
+ }
+ }
+
+ if l := len(found); l == 1 {
+ // If we get here, we do the same thing as in len(archives) == 1.
+ if r := statArchiveForStatus(found[0], dep); r != "" {
+ return r
+ }
+
+ // Fall through and look for directories
+ } else if l > 1 {
+ return "too many matches"
+ }
+
+ // The sanest thing to do here is to fall through and see if we have any directory
+ // matches.
+
+ case len(archives) == 1:
+ archive := archives[0]
+ if r := statArchiveForStatus(archive, dep); r != "" {
+ return r
+ }
+
+ }
+ // End unnecessary code.
+
+ var depChart *chart.Chart
+ for _, item := range parent.Dependencies() {
+ if item.Name() == dep.Name {
+ depChart = item
+ }
+ }
+
+ if depChart == nil {
+ return "missing"
+ }
+
+ if depChart.Metadata.Version != dep.Version {
+ constraint, err := semver.NewConstraint(dep.Version)
+ if err != nil {
+ return "invalid version"
+ }
+
+ v, err := semver.NewVersion(depChart.Metadata.Version)
+ if err != nil {
+ return "invalid version"
+ }
+
+ if !constraint.Check(v) {
+ return "wrong version"
+ }
+ }
+
+ return "unpacked"
+}
+
+// stat an archive and return a message if the stat is successful
+//
+// This is a refactor of the code originally in dependencyStatus. It is here to
+// support legacy behavior, and should be removed in Helm 4.
+func statArchiveForStatus(archive string, dep *chart.Dependency) string {
+ if _, err := os.Stat(archive); err == nil {
+ c, err := loader.Load(archive)
+ if err != nil {
+ return "corrupt"
+ }
+ if c.Name() != dep.Name {
+ return "misnamed"
+ }
+
+ if c.Metadata.Version != dep.Version {
+ constraint, err := semver.NewConstraint(dep.Version)
+ if err != nil {
+ return "invalid version"
+ }
+
+ v, err := semver.NewVersion(c.Metadata.Version)
+ if err != nil {
+ return "invalid version"
+ }
+
+ if !constraint.Check(v) {
+ return "wrong version"
+ }
+ }
+ return "ok"
+ }
+ return ""
+}
+
+// printDependencies prints all of the dependencies in the yaml file.
+func (d *Dependency) printDependencies(chartpath string, out io.Writer, c *chart.Chart) {
+ table := uitable.New()
+ table.MaxColWidth = d.ColumnWidth
+ table.AddRow("NAME", "VERSION", "REPOSITORY", "STATUS")
+ for _, row := range c.Metadata.Dependencies {
+ table.AddRow(row.Name, row.Version, row.Repository, d.dependencyStatus(chartpath, row, c))
+ }
+ fmt.Fprintln(out, table)
+}
+
+// printMissing prints warnings about charts that are present on disk, but are
+// not in Chart.yaml.
+func (d *Dependency) printMissing(chartpath string, out io.Writer, reqs []*chart.Dependency) {
+ folder := filepath.Join(chartpath, "charts/*")
+ files, err := filepath.Glob(folder)
+ if err != nil {
+ fmt.Fprintln(out, err)
+ return
+ }
+
+ for _, f := range files {
+ fi, err := os.Stat(f)
+ if err != nil {
+ fmt.Fprintf(out, "Warning: %s\n", err)
+ }
+ // Skip anything that is not a directory and not a tgz file.
+ if !fi.IsDir() && filepath.Ext(f) != ".tgz" {
+ continue
+ }
+ c, err := loader.Load(f)
+ if err != nil {
+ fmt.Fprintf(out, "WARNING: %q is not a chart.\n", f)
+ continue
+ }
+ found := false
+ for _, d := range reqs {
+ if d.Name == c.Name() {
+ found = true
+ break
+ }
+ }
+ if !found {
+ fmt.Fprintf(out, "WARNING: %q is not in Chart.yaml.\n", f)
+ }
+ }
+}
diff --git a/helm/pkg/action/dependency_test.go b/helm/pkg/action/dependency_test.go
new file mode 100644
index 000000000..5be7bf5a9
--- /dev/null
+++ b/helm/pkg/action/dependency_test.go
@@ -0,0 +1,152 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/internal/test"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+)
+
+func TestList(t *testing.T) {
+ for _, tcase := range []struct {
+ chart string
+ golden string
+ }{
+ {
+ chart: "testdata/charts/chart-with-compressed-dependencies",
+ golden: "output/list-compressed-deps.txt",
+ },
+ {
+ chart: "testdata/charts/chart-with-compressed-dependencies-2.1.8.tgz",
+ golden: "output/list-compressed-deps-tgz.txt",
+ },
+ {
+ chart: "testdata/charts/chart-with-uncompressed-dependencies",
+ golden: "output/list-uncompressed-deps.txt",
+ },
+ {
+ chart: "testdata/charts/chart-with-uncompressed-dependencies-2.1.8.tgz",
+ golden: "output/list-uncompressed-deps-tgz.txt",
+ },
+ {
+ chart: "testdata/charts/chart-missing-deps",
+ golden: "output/list-missing-deps.txt",
+ },
+ } {
+ buf := bytes.Buffer{}
+ if err := NewDependency().List(tcase.chart, &buf); err != nil {
+ t.Fatal(err)
+ }
+ test.AssertGoldenString(t, buf.String(), tcase.golden)
+ }
+}
+
+// TestDependencyStatus_Dashes is a regression test to make sure that dashes in
+// chart names do not cause resolution problems.
+func TestDependencyStatus_Dashes(t *testing.T) {
+ // Make a temp dir
+ dir := t.TempDir()
+
+ chartpath := filepath.Join(dir, "charts")
+ if err := os.MkdirAll(chartpath, 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ // Add some fake charts
+ first := buildChart(withName("first-chart"))
+ _, err := chartutil.Save(first, chartpath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ second := buildChart(withName("first-chart-second-chart"))
+ _, err = chartutil.Save(second, chartpath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ dep := &chart.Dependency{
+ Name: "first-chart",
+ Version: "0.1.0",
+ }
+
+ // Now try to get the deps
+ stat := NewDependency().dependencyStatus(dir, dep, first)
+ if stat != "ok" {
+ t.Errorf("Unexpected status: %q", stat)
+ }
+}
+
+func TestStatArchiveForStatus(t *testing.T) {
+ // Make a temp dir
+ dir := t.TempDir()
+
+ chartpath := filepath.Join(dir, "charts")
+ if err := os.MkdirAll(chartpath, 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ // unsaved chart
+ lilith := buildChart(withName("lilith"))
+
+ // dep referring to chart
+ dep := &chart.Dependency{
+ Name: "lilith",
+ Version: "1.2.3",
+ }
+
+ is := assert.New(t)
+
+ lilithpath := filepath.Join(chartpath, "lilith-1.2.3.tgz")
+ is.Empty(statArchiveForStatus(lilithpath, dep))
+
+ // save the chart (version 0.1.0, because that is the default)
+ where, err := chartutil.Save(lilith, chartpath)
+ is.NoError(err)
+
+ // Should get "wrong version" because we asked for 1.2.3 and got 0.1.0
+ is.Equal("wrong version", statArchiveForStatus(where, dep))
+
+ // Break version on dep
+ dep = &chart.Dependency{
+ Name: "lilith",
+ Version: "1.2.3.4.5",
+ }
+ is.Equal("invalid version", statArchiveForStatus(where, dep))
+
+ // Break the name
+ dep = &chart.Dependency{
+ Name: "lilith2",
+ Version: "1.2.3",
+ }
+ is.Equal("misnamed", statArchiveForStatus(where, dep))
+
+ // Now create the right version
+ dep = &chart.Dependency{
+ Name: "lilith",
+ Version: "0.1.0",
+ }
+ is.Equal("ok", statArchiveForStatus(where, dep))
+}
diff --git a/helm/pkg/action/doc.go b/helm/pkg/action/doc.go
new file mode 100644
index 000000000..3c91bd618
--- /dev/null
+++ b/helm/pkg/action/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package action contains the logic for each action that Helm can perform.
+//
+// This is a library for calling top-level Helm actions like 'install',
+// 'upgrade', or 'list'. Actions approximately match the command line
+// invocations that the Helm client uses.
+package action
diff --git a/helm/pkg/action/get.go b/helm/pkg/action/get.go
new file mode 100644
index 000000000..b5e7c194b
--- /dev/null
+++ b/helm/pkg/action/get.go
@@ -0,0 +1,47 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ release "helm.sh/helm/v4/pkg/release"
+)
+
+// Get is the action for checking a given release's information.
+//
+// It provides the implementation of 'helm get' and its respective subcommands (except `helm get values`).
+type Get struct {
+ cfg *Configuration
+
+ // Initializing Version to 0 will get the latest revision of the release.
+ Version int
+}
+
+// NewGet creates a new Get object with the given configuration.
+func NewGet(cfg *Configuration) *Get {
+ return &Get{
+ cfg: cfg,
+ }
+}
+
+// Run executes 'helm get' against the given release.
+func (g *Get) Run(name string) (release.Releaser, error) {
+ if err := g.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ return g.cfg.releaseContent(name, g.Version)
+}
diff --git a/helm/pkg/action/get_metadata.go b/helm/pkg/action/get_metadata.go
new file mode 100644
index 000000000..5312dac7f
--- /dev/null
+++ b/helm/pkg/action/get_metadata.go
@@ -0,0 +1,127 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "errors"
+ "log/slog"
+ "sort"
+ "strings"
+ "time"
+
+ ci "helm.sh/helm/v4/pkg/chart"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/release"
+)
+
+// GetMetadata is the action for checking a given release's metadata.
+//
+// It provides the implementation of 'helm get metadata'.
+type GetMetadata struct {
+ cfg *Configuration
+
+ Version int
+}
+
+type Metadata struct {
+ Name string `json:"name" yaml:"name"`
+ Chart string `json:"chart" yaml:"chart"`
+ Version string `json:"version" yaml:"version"`
+ AppVersion string `json:"appVersion" yaml:"appVersion"`
+ // Annotations are fetched from the Chart.yaml file
+ Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"`
+ // Labels of the release which are stored in driver metadata fields storage
+ Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
+ Dependencies []ci.Dependency `json:"dependencies,omitempty" yaml:"dependencies,omitempty"`
+ Namespace string `json:"namespace" yaml:"namespace"`
+ Revision int `json:"revision" yaml:"revision"`
+ Status string `json:"status" yaml:"status"`
+ DeployedAt string `json:"deployedAt" yaml:"deployedAt"`
+ ApplyMethod string `json:"applyMethod,omitempty" yaml:"applyMethod,omitempty"`
+}
+
+// NewGetMetadata creates a new GetMetadata object with the given configuration.
+func NewGetMetadata(cfg *Configuration) *GetMetadata {
+ return &GetMetadata{
+ cfg: cfg,
+ }
+}
+
+// Run executes 'helm get metadata' against the given release.
+func (g *GetMetadata) Run(name string) (*Metadata, error) {
+ if err := g.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ rel, err := g.cfg.releaseContent(name, g.Version)
+ if err != nil {
+ return nil, err
+ }
+
+ rac, err := release.NewAccessor(rel)
+ if err != nil {
+ return nil, err
+ }
+ ac, err := ci.NewAccessor(rac.Chart())
+ if err != nil {
+ return nil, err
+ }
+
+ charti := rac.Chart()
+
+ var chrt *chart.Chart
+ switch c := charti.(type) {
+ case *chart.Chart:
+ chrt = c
+ case chart.Chart:
+ chrt = &c
+ default:
+ return nil, errors.New("invalid chart apiVersion")
+ }
+
+ return &Metadata{
+ Name: rac.Name(),
+ Chart: chrt.Metadata.Name,
+ Version: chrt.Metadata.Version,
+ AppVersion: chrt.Metadata.AppVersion,
+ Dependencies: ac.MetaDependencies(),
+ Annotations: chrt.Metadata.Annotations,
+ Labels: rac.Labels(),
+ Namespace: rac.Namespace(),
+ Revision: rac.Version(),
+ Status: rac.Status(),
+ DeployedAt: rac.DeployedAt().Format(time.RFC3339),
+ ApplyMethod: rac.ApplyMethod(),
+ }, nil
+}
+
+// FormattedDepNames formats metadata.dependencies names into a comma-separated list.
+func (m *Metadata) FormattedDepNames() string {
+ depsNames := make([]string, 0, len(m.Dependencies))
+ for _, dep := range m.Dependencies {
+ ac, err := ci.NewDependencyAccessor(dep)
+ if err != nil {
+ slog.Error("unable to access dependency metadata", "error", err)
+ continue
+ }
+ depsNames = append(depsNames, ac.Name())
+
+ }
+ sort.StringSlice(depsNames).Sort()
+
+ return strings.Join(depsNames, ",")
+}
diff --git a/helm/pkg/action/get_metadata_test.go b/helm/pkg/action/get_metadata_test.go
new file mode 100644
index 000000000..4caa966ab
--- /dev/null
+++ b/helm/pkg/action/get_metadata_test.go
@@ -0,0 +1,658 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "errors"
+ "io"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ ci "helm.sh/helm/v4/pkg/chart"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestNewGetMetadata(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ assert.NotNil(t, client)
+ assert.Equal(t, cfg, client.cfg)
+ assert.Equal(t, 0, client.Version)
+}
+
+func TestGetMetadata_Run_BasicMetadata(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := time.Now()
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.2.3",
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ err := cfg.Releases.Create(rel)
+ require.NoError(t, err)
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, "1.0.0", result.Version)
+ assert.Equal(t, "v1.2.3", result.AppVersion)
+ assert.Equal(t, "default", result.Namespace)
+ assert.Equal(t, 1, result.Revision)
+ assert.Equal(t, "deployed", result.Status)
+ assert.Equal(t, deployedTime.Format(time.RFC3339), result.DeployedAt)
+ assert.Empty(t, result.Dependencies)
+ assert.Empty(t, result.Annotations)
+}
+
+func TestGetMetadata_Run_WithDependencies(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := time.Now()
+
+ dependencies := []*chart.Dependency{
+ {
+ Name: "mysql",
+ Version: "8.0.25",
+ Repository: "https://charts.bitnami.com/bitnami",
+ },
+ {
+ Name: "redis",
+ Version: "6.2.4",
+ Repository: "https://charts.bitnami.com/bitnami",
+ },
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.2.3",
+ Dependencies: dependencies,
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ require.NoError(t, cfg.Releases.Create(rel))
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ dep0, err := ci.NewDependencyAccessor(result.Dependencies[0])
+ require.NoError(t, err)
+ dep1, err := ci.NewDependencyAccessor(result.Dependencies[1])
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, "1.0.0", result.Version)
+ assert.Equal(t, convertDeps(dependencies), result.Dependencies)
+ assert.Len(t, result.Dependencies, 2)
+ assert.Equal(t, "mysql", dep0.Name())
+ assert.Equal(t, "redis", dep1.Name())
+}
+
+func TestGetMetadata_Run_WithDependenciesAliases(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := time.Now()
+
+ dependencies := []*chart.Dependency{
+ {
+ Name: "mysql",
+ Version: "8.0.25",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Alias: "database",
+ },
+ {
+ Name: "redis",
+ Version: "6.2.4",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Alias: "cache",
+ },
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.2.3",
+ Dependencies: dependencies,
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ require.NoError(t, cfg.Releases.Create(rel))
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ dep0, err := ci.NewDependencyAccessor(result.Dependencies[0])
+ require.NoError(t, err)
+ dep1, err := ci.NewDependencyAccessor(result.Dependencies[1])
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, "1.0.0", result.Version)
+ assert.Equal(t, convertDeps(dependencies), result.Dependencies)
+ assert.Len(t, result.Dependencies, 2)
+ assert.Equal(t, "mysql", dep0.Name())
+ assert.Equal(t, "database", dep0.Alias())
+ assert.Equal(t, "redis", dep1.Name())
+ assert.Equal(t, "cache", dep1.Alias())
+}
+
+func TestGetMetadata_Run_WithMixedDependencies(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := time.Now()
+
+ dependencies := []*chart.Dependency{
+ {
+ Name: "mysql",
+ Version: "8.0.25",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Alias: "database",
+ },
+ {
+ Name: "nginx",
+ Version: "1.20.0",
+ Repository: "https://charts.bitnami.com/bitnami",
+ },
+ {
+ Name: "redis",
+ Version: "6.2.4",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Alias: "cache",
+ },
+ {
+ Name: "postgresql",
+ Version: "11.0.0",
+ Repository: "https://charts.bitnami.com/bitnami",
+ },
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.2.3",
+ Dependencies: dependencies,
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ require.NoError(t, cfg.Releases.Create(rel))
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ dep0, err := ci.NewDependencyAccessor(result.Dependencies[0])
+ require.NoError(t, err)
+ dep1, err := ci.NewDependencyAccessor(result.Dependencies[1])
+ require.NoError(t, err)
+ dep2, err := ci.NewDependencyAccessor(result.Dependencies[2])
+ require.NoError(t, err)
+ dep3, err := ci.NewDependencyAccessor(result.Dependencies[3])
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, "1.0.0", result.Version)
+ assert.Equal(t, convertDeps(dependencies), result.Dependencies)
+ assert.Len(t, result.Dependencies, 4)
+
+ // Verify dependencies with aliases
+ assert.Equal(t, "mysql", dep0.Name())
+ assert.Equal(t, "database", dep0.Alias())
+ assert.Equal(t, "redis", dep2.Name())
+ assert.Equal(t, "cache", dep2.Alias())
+
+ // Verify dependencies without aliases
+ assert.Equal(t, "nginx", dep1.Name())
+ assert.Equal(t, "", dep1.Alias())
+ assert.Equal(t, "postgresql", dep3.Name())
+ assert.Equal(t, "", dep3.Alias())
+}
+
+func TestGetMetadata_Run_WithAnnotations(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := time.Now()
+
+ annotations := map[string]string{
+ "helm.sh/hook": "pre-install",
+ "helm.sh/hook-weight": "5",
+ "custom.annotation": "test-value",
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.2.3",
+ Annotations: annotations,
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ require.NoError(t, cfg.Releases.Create(rel))
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, annotations, result.Annotations)
+ assert.Equal(t, "pre-install", result.Annotations["helm.sh/hook"])
+ assert.Equal(t, "5", result.Annotations["helm.sh/hook-weight"])
+ assert.Equal(t, "test-value", result.Annotations["custom.annotation"])
+}
+
+func TestGetMetadata_Run_SpecificVersion(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+ client.Version = 2
+
+ releaseName := "test-release"
+ deployedTime := time.Now()
+
+ rel1 := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusSuperseded,
+ LastDeployed: deployedTime.Add(-time.Hour),
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.0.0",
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ rel2 := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.1.0",
+ AppVersion: "v1.1.0",
+ },
+ },
+ Version: 2,
+ Namespace: "default",
+ }
+
+ require.NoError(t, cfg.Releases.Create(rel1))
+ require.NoError(t, cfg.Releases.Create(rel2))
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, "test-chart", result.Chart)
+ assert.Equal(t, "1.1.0", result.Version)
+ assert.Equal(t, "v1.1.0", result.AppVersion)
+ assert.Equal(t, 2, result.Revision)
+ assert.Equal(t, "deployed", result.Status)
+}
+
+func TestGetMetadata_Run_DifferentStatuses(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ testCases := []struct {
+ name string
+ status common.Status
+ expected string
+ }{
+ {"deployed", common.StatusDeployed, "deployed"},
+ {"failed", common.StatusFailed, "failed"},
+ {"uninstalled", common.StatusUninstalled, "uninstalled"},
+ {"pending-install", common.StatusPendingInstall, "pending-install"},
+ {"pending-upgrade", common.StatusPendingUpgrade, "pending-upgrade"},
+ {"pending-rollback", common.StatusPendingRollback, "pending-rollback"},
+ {"superseded", common.StatusSuperseded, "superseded"},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ releaseName := "test-release-" + tc.name
+ deployedTime := time.Now()
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: tc.status,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "v1.0.0",
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ require.NoError(t, cfg.Releases.Create(rel))
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, tc.expected, result.Status)
+ })
+ }
+}
+
+func TestGetMetadata_Run_UnreachableKubeClient(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ cfg.KubeClient = &failingKubeClient
+
+ client := NewGetMetadata(cfg)
+
+ _, err := client.Run("test-release")
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "connection refused")
+}
+
+func TestGetMetadata_Run_ReleaseNotFound(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ _, err := client.Run("non-existent-release")
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "not found")
+}
+
+func TestGetMetadata_Run_EmptyAppVersion(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetMetadata(cfg)
+
+ releaseName := "test-release"
+ deployedTime := time.Now()
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ LastDeployed: deployedTime,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "", // Empty app version
+ },
+ },
+ Version: 1,
+ Namespace: "default",
+ }
+
+ require.NoError(t, cfg.Releases.Create(rel))
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, "", result.AppVersion)
+}
+
+func TestMetadata_FormattedDepNames(t *testing.T) {
+ testCases := []struct {
+ name string
+ dependencies []*chart.Dependency
+ expected string
+ }{
+ {
+ name: "no dependencies",
+ dependencies: []*chart.Dependency{},
+ expected: "",
+ },
+ {
+ name: "single dependency",
+ dependencies: []*chart.Dependency{
+ {Name: "mysql"},
+ },
+ expected: "mysql",
+ },
+ {
+ name: "multiple dependencies sorted",
+ dependencies: []*chart.Dependency{
+ {Name: "redis"},
+ {Name: "mysql"},
+ {Name: "nginx"},
+ },
+ expected: "mysql,nginx,redis",
+ },
+ {
+ name: "already sorted dependencies",
+ dependencies: []*chart.Dependency{
+ {Name: "apache"},
+ {Name: "mysql"},
+ {Name: "zookeeper"},
+ },
+ expected: "apache,mysql,zookeeper",
+ },
+ {
+ name: "duplicate names",
+ dependencies: []*chart.Dependency{
+ {Name: "mysql"},
+ {Name: "redis"},
+ {Name: "mysql"},
+ },
+ expected: "mysql,mysql,redis",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ deps := convertDeps(tc.dependencies)
+ metadata := &Metadata{
+ Dependencies: deps,
+ }
+
+ result := metadata.FormattedDepNames()
+ assert.Equal(t, tc.expected, result)
+ })
+ }
+}
+
+func convertDeps(deps []*chart.Dependency) []ci.Dependency {
+ var newDeps = make([]ci.Dependency, len(deps))
+ for i, c := range deps {
+ newDeps[i] = c
+ }
+ return newDeps
+}
+
+func TestMetadata_FormattedDepNames_WithComplexDependencies(t *testing.T) {
+ dependencies := []*chart.Dependency{
+ {
+ Name: "zookeeper",
+ Version: "10.0.0",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Condition: "zookeeper.enabled",
+ },
+ {
+ Name: "apache",
+ Version: "9.0.0",
+ Repository: "https://charts.bitnami.com/bitnami",
+ },
+ {
+ Name: "mysql",
+ Version: "8.0.25",
+ Repository: "https://charts.bitnami.com/bitnami",
+ Condition: "mysql.enabled",
+ },
+ }
+
+ deps := convertDeps(dependencies)
+ metadata := &Metadata{
+ Dependencies: deps,
+ }
+
+ result := metadata.FormattedDepNames()
+ assert.Equal(t, "apache,mysql,zookeeper", result)
+}
+
+func TestMetadata_FormattedDepNames_WithAliases(t *testing.T) {
+ testCases := []struct {
+ name string
+ dependencies []*chart.Dependency
+ expected string
+ }{
+ {
+ name: "dependencies with aliases",
+ dependencies: []*chart.Dependency{
+ {Name: "mysql", Alias: "database"},
+ {Name: "redis", Alias: "cache"},
+ },
+ expected: "mysql,redis",
+ },
+ {
+ name: "mixed dependencies with and without aliases",
+ dependencies: []*chart.Dependency{
+ {Name: "mysql", Alias: "database"},
+ {Name: "nginx"},
+ {Name: "redis", Alias: "cache"},
+ },
+ expected: "mysql,nginx,redis",
+ },
+ {
+ name: "empty alias should use name",
+ dependencies: []*chart.Dependency{
+ {Name: "mysql", Alias: ""},
+ {Name: "redis", Alias: "cache"},
+ },
+ expected: "mysql,redis",
+ },
+ {
+ name: "sorted by name not alias",
+ dependencies: []*chart.Dependency{
+ {Name: "zookeeper", Alias: "a-service"},
+ {Name: "apache", Alias: "z-service"},
+ },
+ expected: "apache,zookeeper",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ deps := convertDeps(tc.dependencies)
+ metadata := &Metadata{
+ Dependencies: deps,
+ }
+
+ result := metadata.FormattedDepNames()
+ assert.Equal(t, tc.expected, result)
+ })
+ }
+}
+
+func TestGetMetadata_Labels(t *testing.T) {
+ rel := releaseStub()
+ rel.Info.Status = common.StatusDeployed
+ customLabels := map[string]string{"key1": "value1", "key2": "value2"}
+ rel.Labels = customLabels
+
+ metaGetter := NewGetMetadata(actionConfigFixture(t))
+ err := metaGetter.cfg.Releases.Create(rel)
+ assert.NoError(t, err)
+
+ metadata, err := metaGetter.Run(rel.Name)
+ assert.NoError(t, err)
+
+ assert.Equal(t, metadata.Name, rel.Name)
+ assert.Equal(t, metadata.Labels, customLabels)
+}
diff --git a/helm/pkg/action/get_test.go b/helm/pkg/action/get_test.go
new file mode 100644
index 000000000..876819ee4
--- /dev/null
+++ b/helm/pkg/action/get_test.go
@@ -0,0 +1,69 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "errors"
+ "io"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+
+ "helm.sh/helm/v4/pkg/release/common"
+)
+
+func TestNewGet(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewGet(config)
+
+ assert.NotNil(t, client)
+ assert.Equal(t, config, client.cfg)
+ assert.Equal(t, 0, client.Version)
+}
+
+func TestGetRun(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewGet(config)
+ simpleRelease := namedReleaseStub("test-release", common.StatusPendingUpgrade)
+ require.NoError(t, config.Releases.Create(simpleRelease))
+
+ releaser, err := client.Run(simpleRelease.Name)
+ require.NoError(t, err)
+
+ result, err := releaserToV1Release(releaser)
+ require.NoError(t, err)
+ assert.Equal(t, simpleRelease.Name, result.Name)
+ assert.Equal(t, simpleRelease.Version, result.Version)
+}
+
+func TestGetRun_UnreachableKubeClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ client := NewGet(config)
+ simpleRelease := namedReleaseStub("test-release", common.StatusPendingUpgrade)
+ require.NoError(t, config.Releases.Create(simpleRelease))
+
+ result, err := client.Run(simpleRelease.Name)
+ assert.Nil(t, result)
+ assert.Error(t, err)
+}
diff --git a/helm/pkg/action/get_values.go b/helm/pkg/action/get_values.go
new file mode 100644
index 000000000..6475a140b
--- /dev/null
+++ b/helm/pkg/action/get_values.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "fmt"
+
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ release "helm.sh/helm/v4/pkg/release"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+// GetValues is the action for checking a given release's values.
+//
+// It provides the implementation of 'helm get values'.
+type GetValues struct {
+ cfg *Configuration
+
+ Version int
+ AllValues bool
+}
+
+// NewGetValues creates a new GetValues object with the given configuration.
+func NewGetValues(cfg *Configuration) *GetValues {
+ return &GetValues{
+ cfg: cfg,
+ }
+}
+
+// Run executes 'helm get values' against the given release.
+func (g *GetValues) Run(name string) (map[string]interface{}, error) {
+ if err := g.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ reli, err := g.cfg.releaseContent(name, g.Version)
+ if err != nil {
+ return nil, err
+ }
+
+ rel, err := releaserToV1Release(reli)
+ if err != nil {
+ return nil, err
+ }
+
+ // If the user wants all values, compute the values and return.
+ if g.AllValues {
+ cfg, err := util.CoalesceValues(rel.Chart, rel.Config)
+ if err != nil {
+ return nil, err
+ }
+ return cfg, nil
+ }
+ return rel.Config, nil
+}
+
+// releaserToV1Release is a helper function to convert a v1 release passed by interface
+// into the type object.
+func releaserToV1Release(rel release.Releaser) (*rspb.Release, error) {
+ switch r := rel.(type) {
+ case rspb.Release:
+ return &r, nil
+ case *rspb.Release:
+ return r, nil
+ case nil:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unsupported release type: %T", rel)
+ }
+}
diff --git a/helm/pkg/action/get_values_test.go b/helm/pkg/action/get_values_test.go
new file mode 100644
index 000000000..69a95a2e4
--- /dev/null
+++ b/helm/pkg/action/get_values_test.go
@@ -0,0 +1,220 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "errors"
+ "io"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestNewGetValues(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+
+ assert.NotNil(t, client)
+ assert.Equal(t, cfg, client.cfg)
+ assert.Equal(t, 0, client.Version)
+ assert.Equal(t, false, client.AllValues)
+}
+
+func TestGetValues_Run_UserConfigOnly(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+
+ releaseName := "test-release"
+ userConfig := map[string]interface{}{
+ "database": map[string]interface{}{
+ "host": "localhost",
+ "port": 5432,
+ },
+ "app": map[string]interface{}{
+ "name": "my-app",
+ "replicas": 3,
+ },
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ },
+ Values: map[string]interface{}{
+ "defaultKey": "defaultValue",
+ "app": map[string]interface{}{
+ "name": "default-app",
+ "timeout": 30,
+ },
+ },
+ },
+ Config: userConfig,
+ Version: 1,
+ Namespace: "default",
+ }
+
+ require.NoError(t, cfg.Releases.Create(rel))
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+ assert.Equal(t, userConfig, result)
+}
+
+func TestGetValues_Run_AllValues(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+ client.AllValues = true
+
+ releaseName := "test-release"
+ userConfig := map[string]interface{}{
+ "database": map[string]interface{}{
+ "host": "localhost",
+ "port": 5432,
+ },
+ "app": map[string]interface{}{
+ "name": "my-app",
+ },
+ }
+
+ chartDefaultValues := map[string]interface{}{
+ "defaultKey": "defaultValue",
+ "app": map[string]interface{}{
+ "name": "default-app",
+ "timeout": 30,
+ },
+ }
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ },
+ Values: chartDefaultValues,
+ },
+ Config: userConfig,
+ Version: 1,
+ Namespace: "default",
+ }
+
+ require.NoError(t, cfg.Releases.Create(rel))
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ assert.Equal(t, "my-app", result["app"].(map[string]interface{})["name"])
+ assert.Equal(t, 30, result["app"].(map[string]interface{})["timeout"])
+ assert.Equal(t, "defaultValue", result["defaultKey"])
+ assert.Equal(t, "localhost", result["database"].(map[string]interface{})["host"])
+ assert.Equal(t, 5432, result["database"].(map[string]interface{})["port"])
+}
+
+func TestGetValues_Run_EmptyValues(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+
+ releaseName := "test-release"
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ },
+ },
+ Config: map[string]interface{}{},
+ Version: 1,
+ Namespace: "default",
+ }
+
+ require.NoError(t, cfg.Releases.Create(rel))
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+ assert.Equal(t, map[string]interface{}{}, result)
+}
+
+func TestGetValues_Run_UnreachableKubeClient(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ cfg.KubeClient = &failingKubeClient
+
+ client := NewGetValues(cfg)
+
+ _, err := client.Run("test-release")
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "connection refused")
+}
+
+func TestGetValues_Run_ReleaseNotFound(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+
+ _, err := client.Run("non-existent-release")
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "not found")
+}
+
+func TestGetValues_Run_NilConfig(t *testing.T) {
+ cfg := actionConfigFixture(t)
+ client := NewGetValues(cfg)
+
+ releaseName := "test-release"
+
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ },
+ },
+ Config: nil,
+ Version: 1,
+ Namespace: "default",
+ }
+
+ require.NoError(t, cfg.Releases.Create(rel))
+
+ result, err := client.Run(releaseName)
+ require.NoError(t, err)
+ assert.Nil(t, result)
+}
diff --git a/helm/pkg/action/history.go b/helm/pkg/action/history.go
new file mode 100644
index 000000000..3d561b3ad
--- /dev/null
+++ b/helm/pkg/action/history.go
@@ -0,0 +1,58 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "fmt"
+
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/release"
+)
+
+// History is the action for checking the release's ledger.
+//
+// It provides the implementation of 'helm history'.
+// It returns all the revisions for a specific release.
+// To list up to one revision of every release in one specific, or in all,
+// namespaces, see the List action.
+type History struct {
+ cfg *Configuration
+
+ Max int
+ Version int
+}
+
+// NewHistory creates a new History object with the given configuration.
+func NewHistory(cfg *Configuration) *History {
+ return &History{
+ cfg: cfg,
+ }
+}
+
+// Run executes 'helm history' against the given release.
+func (h *History) Run(name string) ([]release.Releaser, error) {
+ if err := h.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, fmt.Errorf("release name is invalid: %s", name)
+ }
+
+ h.cfg.Logger().Debug("getting history for release", "release", name)
+ return h.cfg.Releases.History(name)
+}
diff --git a/helm/pkg/action/history_test.go b/helm/pkg/action/history_test.go
new file mode 100644
index 000000000..31fdd4a96
--- /dev/null
+++ b/helm/pkg/action/history_test.go
@@ -0,0 +1,108 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "errors"
+ "io"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ release "helm.sh/helm/v4/pkg/release/v1"
+
+ "helm.sh/helm/v4/pkg/release/common"
+)
+
+func TestNewHistory(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewHistory(config)
+
+ assert.NotNil(t, client)
+ assert.Equal(t, config, client.cfg)
+}
+
+func TestHistoryRun(t *testing.T) {
+ releaseName := "test-release"
+ simpleRelease := namedReleaseStub(releaseName, common.StatusPendingUpgrade)
+ updatedRelease := namedReleaseStub(releaseName, common.StatusDeployed)
+ updatedRelease.Chart.Metadata.Version = "0.1.1"
+ updatedRelease.Version = 2
+
+ config := actionConfigFixture(t)
+ client := NewHistory(config)
+ client.Max = 3
+ client.cfg.Releases.MaxHistory = 3
+ for _, rel := range []*release.Release{simpleRelease, updatedRelease} {
+ if err := client.cfg.Releases.Create(rel); err != nil {
+ t.Fatal(err, "Could not add releases to Config")
+ }
+ }
+
+ releases, err := config.Releases.ListReleases()
+ require.NoError(t, err)
+ assert.Len(t, releases, 2, "expected 2 Releases in Config")
+
+ releasers, err := client.Run(releaseName)
+ require.NoError(t, err)
+ assert.Len(t, releasers, 2, "expected 2 Releases in History result")
+
+ release1, err := releaserToV1Release(releasers[0])
+ require.NoError(t, err)
+ assert.Equal(t, simpleRelease.Name, release1.Name)
+ assert.Equal(t, simpleRelease.Version, release1.Version)
+
+ release2, err := releaserToV1Release(releasers[1])
+ require.NoError(t, err)
+ assert.Equal(t, updatedRelease.Name, release2.Name)
+ assert.Equal(t, updatedRelease.Version, release2.Version)
+}
+
+func TestHistoryRun_UnreachableKubeClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ client := NewHistory(config)
+ result, err := client.Run("release-name")
+ assert.Nil(t, result)
+ assert.Error(t, err)
+}
+
+func TestHistoryRun_InvalidReleaseNames(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewHistory(config)
+ invalidReleaseNames := []string{
+ "",
+ "too-long-release-name-max-53-characters-abcdefghijklmnopqrstuvwxyz",
+ "MyRelease",
+ "release_name",
+ "release@123",
+ "-badstart",
+ "badend-",
+ ".dotstart",
+ }
+
+ for _, name := range invalidReleaseNames {
+ result, err := client.Run(name)
+ assert.Nil(t, result)
+ assert.ErrorContains(t, err, "release name is invalid")
+ }
+}
diff --git a/helm/pkg/action/hooks.go b/helm/pkg/action/hooks.go
new file mode 100644
index 000000000..28033395b
--- /dev/null
+++ b/helm/pkg/action/hooks.go
@@ -0,0 +1,297 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "slices"
+ "sort"
+ "time"
+
+ "helm.sh/helm/v4/pkg/kube"
+
+ "go.yaml.in/yaml/v3"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+// execHook executes all of the hooks for the given hook event.
+func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
+ waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption,
+ timeout time.Duration, serverSideApply bool) error {
+
+ shutdown, err := cfg.execHookWithDelayedShutdown(rl, hook, waitStrategy, waitOptions, timeout, serverSideApply)
+ if shutdown == nil {
+ return err
+ }
+ if err != nil {
+ if err := shutdown(); err != nil {
+ return err
+ }
+ return err
+ }
+ return shutdown()
+}
+
+type ExecuteShutdownFunc = func() error
+
+func shutdownNoOp() error {
+ return nil
+}
+
+// execHookWithDelayedShutdown executes all of the hooks for the given hook event and returns a shutdownHook function to trigger deletions after doing other things like e.g. retrieving logs.
+func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent,
+ waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration,
+ serverSideApply bool) (ExecuteShutdownFunc, error) {
+
+ executingHooks := []*release.Hook{}
+
+ for _, h := range rl.Hooks {
+ for _, e := range h.Events {
+ if e == hook {
+ executingHooks = append(executingHooks, h)
+ }
+ }
+ }
+
+ // hooke are pre-ordered by kind, so keep order stable
+ sort.Stable(hookByWeight(executingHooks))
+
+ for i, h := range executingHooks {
+ // Set default delete policy to before-hook-creation
+ cfg.hookSetDeletePolicy(h)
+
+ if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, waitOptions, timeout); err != nil {
+ return shutdownNoOp, err
+ }
+
+ resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true)
+ if err != nil {
+ return shutdownNoOp, fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err)
+ }
+
+ // Record the time at which the hook was applied to the cluster
+ h.LastRun = release.HookExecution{
+ StartedAt: time.Now(),
+ Phase: release.HookPhaseRunning,
+ }
+ cfg.recordRelease(rl)
+
+ // As long as the implementation of WatchUntilReady does not panic, HookPhaseFailed or HookPhaseSucceeded
+ // should always be set by this function. If we fail to do that for any reason, then HookPhaseUnknown is
+ // the most appropriate value to surface.
+ h.LastRun.Phase = release.HookPhaseUnknown
+
+ // Create hook resources
+ if _, err := cfg.KubeClient.Create(
+ resources,
+ kube.ClientCreateOptionServerSideApply(serverSideApply, false)); err != nil {
+ h.LastRun.CompletedAt = time.Now()
+ h.LastRun.Phase = release.HookPhaseFailed
+ return shutdownNoOp, fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err)
+ }
+
+ var waiter kube.Waiter
+ if c, supportsOptions := cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions {
+ waiter, err = c.GetWaiterWithOptions(waitStrategy, waitOptions...)
+ } else {
+ waiter, err = cfg.KubeClient.GetWaiter(waitStrategy)
+ }
+ if err != nil {
+ return shutdownNoOp, fmt.Errorf("unable to get waiter: %w", err)
+ }
+ // Watch hook resources until they have completed
+ err = waiter.WatchUntilReady(resources, timeout)
+ // Note the time of success/failure
+ h.LastRun.CompletedAt = time.Now()
+ // Mark hook as succeeded or failed
+ if err != nil {
+ h.LastRun.Phase = release.HookPhaseFailed
+ // If a hook is failed, check the annotation of the hook to determine if we should copy the logs client side
+ if errOutputting := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnFailed); errOutputting != nil {
+ // We log the error here as we want to propagate the hook failure upwards to the release object.
+ log.Printf("error outputting logs for hook failure: %v", errOutputting)
+ }
+ // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted
+ // under failed condition. If so, then clear the corresponding resource object in the hook
+ return func() error {
+ if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, waitOptions, timeout); errDeleting != nil {
+ // We log the error here as we want to propagate the hook failure upwards to the release object.
+ log.Printf("error deleting the hook resource on hook failure: %v", errDeleting)
+ }
+
+ // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks
+ // should be deleted under succeeded condition.
+ if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, waitOptions, timeout); err != nil {
+ return err
+ }
+ return err
+ }, err
+ }
+ h.LastRun.Phase = release.HookPhaseSucceeded
+ }
+
+ return func() error {
+ // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted
+ // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook
+ for i := len(executingHooks) - 1; i >= 0; i-- {
+ h := executingHooks[i]
+ if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil {
+ // We log here as we still want to attempt hook resource deletion even if output logging fails.
+ log.Printf("error outputting logs for hook failure: %v", err)
+ }
+ if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, waitOptions, timeout); err != nil {
+ return err
+ }
+ }
+ return nil
+ }, nil
+}
+
+// hookByWeight is a sorter for hooks
+type hookByWeight []*release.Hook
+
+func (x hookByWeight) Len() int { return len(x) }
+func (x hookByWeight) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x hookByWeight) Less(i, j int) bool {
+ if x[i].Weight == x[j].Weight {
+ return x[i].Name < x[j].Name
+ }
+ return x[i].Weight < x[j].Weight
+}
+
+// deleteHookByPolicy deletes a hook if the hook policy instructs it to
+func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy,
+ waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration) error {
+
+ // Never delete CustomResourceDefinitions; this could cause lots of
+ // cascading garbage collection.
+ if h.Kind == "CustomResourceDefinition" {
+ return nil
+ }
+ if cfg.hookHasDeletePolicy(h, policy) {
+ resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), false)
+ if err != nil {
+ return fmt.Errorf("unable to build kubernetes object for deleting hook %s: %w", h.Path, err)
+ }
+ _, errs := cfg.KubeClient.Delete(resources, metav1.DeletePropagationBackground)
+ if len(errs) > 0 {
+ return joinErrors(errs, "; ")
+ }
+
+ var waiter kube.Waiter
+ if c, supportsOptions := cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions {
+ waiter, err = c.GetWaiterWithOptions(waitStrategy, waitOptions...)
+ } else {
+ waiter, err = cfg.KubeClient.GetWaiter(waitStrategy)
+ }
+ if err != nil {
+ return err
+ }
+ if err := waiter.WaitForDelete(resources, timeout); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// deleteHooksByPolicy deletes all hooks if the hook policy instructs it to
+func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy,
+ waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration) error {
+
+ for _, h := range hooks {
+ if err := cfg.deleteHookByPolicy(h, policy, waitStrategy, waitOptions, timeout); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// hookHasDeletePolicy determines whether the defined hook deletion policy matches the hook deletion polices
+// supported by helm. If so, mark the hook as one should be deleted.
+func (cfg *Configuration) hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool {
+ cfg.mutex.Lock()
+ defer cfg.mutex.Unlock()
+ return slices.Contains(h.DeletePolicies, policy)
+}
+
+// hookSetDeletePolicy determines whether the defined hook deletion policy matches the hook deletion polices
+// supported by helm. If so, mark the hook as one should be deleted.
+func (cfg *Configuration) hookSetDeletePolicy(h *release.Hook) {
+ cfg.mutex.Lock()
+ defer cfg.mutex.Unlock()
+ if len(h.DeletePolicies) == 0 {
+ // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion
+ // resources. For all other resource types update in place if a
+ // resource with the same name already exists and is owned by the
+ // current release.
+ h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation}
+ }
+}
+
+// outputLogsByPolicy outputs a pods logs if the hook policy instructs it to
+func (cfg *Configuration) outputLogsByPolicy(h *release.Hook, releaseNamespace string, policy release.HookOutputLogPolicy) error {
+ if !hookHasOutputLogPolicy(h, policy) {
+ return nil
+ }
+ namespace, err := cfg.deriveNamespace(h, releaseNamespace)
+ if err != nil {
+ return err
+ }
+ switch h.Kind {
+ case "Job":
+ return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{LabelSelector: fmt.Sprintf("job-name=%s", h.Name)})
+ case "Pod":
+ return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{FieldSelector: fmt.Sprintf("metadata.name=%s", h.Name)})
+ default:
+ return nil
+ }
+}
+
+func (cfg *Configuration) outputContainerLogsForListOptions(namespace string, listOptions metav1.ListOptions) error {
+ podList, err := cfg.KubeClient.GetPodList(namespace, listOptions)
+ if err != nil {
+ return err
+ }
+
+ return cfg.KubeClient.OutputContainerLogsForPodList(podList, namespace, cfg.HookOutputFunc)
+}
+
+func (cfg *Configuration) deriveNamespace(h *release.Hook, namespace string) (string, error) {
+ tmp := struct {
+ Metadata struct {
+ Namespace string
+ }
+ }{}
+ err := yaml.Unmarshal([]byte(h.Manifest), &tmp)
+ if err != nil {
+ return "", fmt.Errorf("unable to parse metadata.namespace from kubernetes manifest for output logs hook %s: %w", h.Path, err)
+ }
+ if tmp.Metadata.Namespace == "" {
+ return namespace, nil
+ }
+ return tmp.Metadata.Namespace, nil
+}
+
+// hookHasOutputLogPolicy determines whether the defined hook output log policy matches the hook output log policies
+// supported by helm.
+func hookHasOutputLogPolicy(h *release.Hook, policy release.HookOutputLogPolicy) bool {
+ return slices.Contains(h.OutputLogPolicies, policy)
+}
diff --git a/helm/pkg/action/hooks_test.go b/helm/pkg/action/hooks_test.go
new file mode 100644
index 000000000..0270a0630
--- /dev/null
+++ b/helm/pkg/action/hooks_test.go
@@ -0,0 +1,493 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/yaml"
+ "k8s.io/cli-runtime/pkg/resource"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/kube"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ rcommon "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ "helm.sh/helm/v4/pkg/storage"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+func podManifestWithOutputLogs(hookDefinitions []release.HookOutputLogPolicy) string {
+ hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions)
+ return fmt.Sprintf(`kind: Pod
+metadata:
+ name: finding-sharky,
+ annotations:
+ "helm.sh/hook": pre-install
+ "helm.sh/hook-output-log-policy": %s
+spec:
+ containers:
+ - name: sharky-test
+ image: fake-image
+ cmd: fake-command`, hookDefinitionString)
+}
+
+func podManifestWithOutputLogWithNamespace(hookDefinitions []release.HookOutputLogPolicy) string {
+ hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions)
+ return fmt.Sprintf(`kind: Pod
+metadata:
+ name: finding-george
+ namespace: sneaky-namespace
+ annotations:
+ "helm.sh/hook": pre-install
+ "helm.sh/hook-output-log-policy": %s
+spec:
+ containers:
+ - name: george-test
+ image: fake-image
+ cmd: fake-command`, hookDefinitionString)
+}
+
+func jobManifestWithOutputLog(hookDefinitions []release.HookOutputLogPolicy) string {
+ hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions)
+ return fmt.Sprintf(`kind: Job
+apiVersion: batch/v1
+metadata:
+ name: losing-religion
+ annotations:
+ "helm.sh/hook": pre-install
+ "helm.sh/hook-output-log-policy": %s
+spec:
+ completions: 1
+ parallelism: 1
+ activeDeadlineSeconds: 30
+ template:
+ spec:
+ containers:
+ - name: religion-container
+ image: religion-image
+ cmd: religion-command`, hookDefinitionString)
+}
+
+func jobManifestWithOutputLogWithNamespace(hookDefinitions []release.HookOutputLogPolicy) string {
+ hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions)
+ return fmt.Sprintf(`kind: Job
+apiVersion: batch/v1
+metadata:
+ name: losing-religion
+ namespace: rem-namespace
+ annotations:
+ "helm.sh/hook": pre-install
+ "helm.sh/hook-output-log-policy": %s
+spec:
+ completions: 1
+ parallelism: 1
+ activeDeadlineSeconds: 30
+ template:
+ spec:
+ containers:
+ - name: religion-container
+ image: religion-image
+ cmd: religion-command`, hookDefinitionString)
+}
+
+func convertHooksToCommaSeparated(hookDefinitions []release.HookOutputLogPolicy) string {
+ var commaSeparated strings.Builder
+ for i, policy := range hookDefinitions {
+ if i+1 == len(hookDefinitions) {
+ commaSeparated.WriteString(policy.String())
+ } else {
+ commaSeparated.WriteString(policy.String() + ",")
+ }
+ }
+ return commaSeparated.String()
+}
+
+func TestInstallRelease_HookOutputLogsOnFailure(t *testing.T) {
+ // Should output on failure with expected namespace if hook-failed is set
+ runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "spaced", true)
+ runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "sneaky-namespace", true)
+ runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "spaced", true)
+ runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "rem-namespace", true)
+
+ // Should not output on failure with expected namespace if hook-succeed is set
+ runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false)
+ runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false)
+ runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false)
+ runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false)
+}
+
+func TestInstallRelease_HookOutputLogsOnSuccess(t *testing.T) {
+ // Should output on success with expected namespace if hook-succeeded is set
+ runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "spaced", true)
+ runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "sneaky-namespace", true)
+ runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "spaced", true)
+ runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "rem-namespace", true)
+
+ // Should not output on success if hook-failed is set
+ runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false)
+ runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false)
+ runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false)
+ runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false)
+}
+
+func TestInstallRelease_HooksOutputLogsOnSuccessAndFailure(t *testing.T) {
+ // Should output on success with expected namespace if hook-succeeded and hook-failed is set
+ runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true)
+ runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "sneaky-namespace", true)
+ runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true)
+ runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "rem-namespace", true)
+
+ // Should output on failure if hook-succeeded and hook-failed is set
+ runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true)
+ runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "sneaky-namespace", true)
+ runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true)
+ runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "rem-namespace", true)
+}
+
+func runInstallForHooksWithSuccess(t *testing.T, manifest, expectedNamespace string, shouldOutput bool) {
+ t.Helper()
+ var expectedOutput string
+ if shouldOutput {
+ expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace)
+ }
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.ReleaseName = "failed-hooks"
+ outBuffer := &bytes.Buffer{}
+ instAction.cfg.KubeClient = &kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer}
+
+ modTime := time.Now()
+ templates := []*common.File{
+ {Name: "templates/hello", ModTime: modTime, Data: []byte("hello: world")},
+ {Name: "templates/hooks", ModTime: modTime, Data: []byte(manifest)},
+ }
+ vals := map[string]interface{}{}
+
+ resi, err := instAction.Run(buildChartWithTemplates(templates), vals)
+ is.NoError(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Equal(expectedOutput, outBuffer.String())
+ is.Equal(rcommon.StatusDeployed, res.Info.Status)
+}
+
+func runInstallForHooksWithFailure(t *testing.T, manifest, expectedNamespace string, shouldOutput bool) {
+ t.Helper()
+ var expectedOutput string
+ if shouldOutput {
+ expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace)
+ }
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.ReleaseName = "failed-hooks"
+ failingClient := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failingClient.WatchUntilReadyError = fmt.Errorf("failed watch")
+ instAction.cfg.KubeClient = failingClient
+ outBuffer := &bytes.Buffer{}
+ failingClient.PrintingKubeClient = kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer}
+
+ modTime := time.Now()
+ templates := []*common.File{
+ {Name: "templates/hello", ModTime: modTime, Data: []byte("hello: world")},
+ {Name: "templates/hooks", ModTime: modTime, Data: []byte(manifest)},
+ }
+ vals := map[string]interface{}{}
+
+ resi, err := instAction.Run(buildChartWithTemplates(templates), vals)
+ is.Error(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Contains(res.Info.Description, "failed pre-install")
+ is.Equal(expectedOutput, outBuffer.String())
+ is.Equal(rcommon.StatusFailed, res.Info.Status)
+}
+
+type HookFailedError struct{}
+
+func (e *HookFailedError) Error() string {
+ return "Hook failed!"
+}
+
+type HookFailingKubeClient struct {
+ kubefake.PrintingKubeClient
+ failOn resource.Info
+ deleteRecord []resource.Info
+}
+
+type HookFailingKubeWaiter struct {
+ *kubefake.PrintingKubeWaiter
+ failOn resource.Info
+}
+
+func (*HookFailingKubeClient) Build(reader io.Reader, _ bool) (kube.ResourceList, error) {
+ configMap := &v1.ConfigMap{}
+
+ err := yaml.NewYAMLOrJSONDecoder(reader, 1000).Decode(configMap)
+
+ if err != nil {
+ return kube.ResourceList{}, err
+ }
+
+ return kube.ResourceList{{
+ Name: configMap.Name,
+ Namespace: configMap.Namespace,
+ }}, nil
+}
+
+func (h *HookFailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
+ for _, res := range resources {
+ if res.Name == h.failOn.Name && res.Namespace == h.failOn.Namespace {
+ return &HookFailedError{}
+ }
+ }
+ return nil
+}
+
+func (h *HookFailingKubeClient) Delete(resources kube.ResourceList, deletionPropagation metav1.DeletionPropagation) (*kube.Result, []error) {
+ for _, res := range resources {
+ h.deleteRecord = append(h.deleteRecord, resource.Info{
+ Name: res.Name,
+ Namespace: res.Namespace,
+ })
+ }
+
+ return h.PrintingKubeClient.Delete(resources, deletionPropagation)
+}
+
+func (h *HookFailingKubeClient) GetWaiterWithOptions(strategy kube.WaitStrategy, opts ...kube.WaitOption) (kube.Waiter, error) {
+ waiter, _ := h.PrintingKubeClient.GetWaiterWithOptions(strategy, opts...)
+ return &HookFailingKubeWaiter{
+ PrintingKubeWaiter: waiter.(*kubefake.PrintingKubeWaiter),
+ failOn: h.failOn,
+ }, nil
+}
+
+func TestHooksCleanUp(t *testing.T) {
+ hookEvent := release.HookPreInstall
+
+ testCases := []struct {
+ name string
+ inputRelease release.Release
+ failOn resource.Info
+ expectedDeleteRecord []resource.Info
+ expectError bool
+ }{
+ {
+ "Deletion hook runs for previously successful hook on failure of a heavier weight hook",
+ release.Release{
+ Name: "test-release",
+ Namespace: "test",
+ Hooks: []*release.Hook{
+ {
+ Name: "hook-1",
+ Kind: "ConfigMap",
+ Path: "templates/service_account.yaml",
+ Manifest: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: build-config-1
+ namespace: test
+data:
+ foo: bar
+`,
+ Weight: -5,
+ Events: []release.HookEvent{
+ hookEvent,
+ },
+ DeletePolicies: []release.HookDeletePolicy{
+ release.HookBeforeHookCreation,
+ release.HookSucceeded,
+ release.HookFailed,
+ },
+ LastRun: release.HookExecution{
+ Phase: release.HookPhaseSucceeded,
+ },
+ },
+ {
+ Name: "hook-2",
+ Kind: "ConfigMap",
+ Path: "templates/job.yaml",
+ Manifest: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: build-config-2
+ namespace: test
+data:
+ foo: bar
+`,
+ Weight: 0,
+ Events: []release.HookEvent{
+ hookEvent,
+ },
+ DeletePolicies: []release.HookDeletePolicy{
+ release.HookBeforeHookCreation,
+ release.HookSucceeded,
+ release.HookFailed,
+ },
+ LastRun: release.HookExecution{
+ Phase: release.HookPhaseFailed,
+ },
+ },
+ },
+ }, resource.Info{
+ Name: "build-config-2",
+ Namespace: "test",
+ }, []resource.Info{
+ {
+ // This should be in the record for `before-hook-creation`
+ Name: "build-config-1",
+ Namespace: "test",
+ },
+ {
+ // This should be in the record for `before-hook-creation`
+ Name: "build-config-2",
+ Namespace: "test",
+ },
+ {
+ // This should be in the record for cleaning up (the failure first)
+ Name: "build-config-2",
+ Namespace: "test",
+ },
+ {
+ // This should be in the record for cleaning up (then the previously successful)
+ Name: "build-config-1",
+ Namespace: "test",
+ },
+ }, true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ kubeClient := &HookFailingKubeClient{
+ kubefake.PrintingKubeClient{Out: io.Discard}, tc.failOn, []resource.Info{},
+ }
+
+ configuration := &Configuration{
+ Releases: storage.Init(driver.NewMemory()),
+ KubeClient: kubeClient,
+ Capabilities: common.DefaultCapabilities,
+ }
+
+ serverSideApply := true
+ err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, nil, 600, serverSideApply)
+
+ if !reflect.DeepEqual(kubeClient.deleteRecord, tc.expectedDeleteRecord) {
+ t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", kubeClient.deleteRecord, tc.expectedDeleteRecord)
+ }
+
+ if err != nil && !tc.expectError {
+ t.Fatalf("Got an unexpected error.")
+ }
+
+ if err == nil && tc.expectError {
+ t.Fatalf("Expected and error but did not get it.")
+ }
+ })
+ }
+}
+
+func TestConfiguration_hookSetDeletePolicy(t *testing.T) {
+ tests := map[string]struct {
+ policies []release.HookDeletePolicy
+ expected []release.HookDeletePolicy
+ }{
+ "no polices specified result in the default policy": {
+ policies: nil,
+ expected: []release.HookDeletePolicy{
+ release.HookBeforeHookCreation,
+ },
+ },
+ "unknown policy is untouched": {
+ policies: []release.HookDeletePolicy{
+ release.HookDeletePolicy("never"),
+ },
+ expected: []release.HookDeletePolicy{
+ release.HookDeletePolicy("never"),
+ },
+ },
+ }
+ for name, tt := range tests {
+ t.Run(name, func(t *testing.T) {
+ cfg := &Configuration{}
+ h := &release.Hook{
+ DeletePolicies: tt.policies,
+ }
+ cfg.hookSetDeletePolicy(h)
+ assert.Equal(t, tt.expected, h.DeletePolicies)
+ })
+ }
+}
+
+func TestExecHook_WaitOptionsPassedDownstream(t *testing.T) {
+ is := assert.New(t)
+
+ failer := &kubefake.FailingKubeClient{
+ PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard},
+ }
+
+ configuration := &Configuration{
+ Releases: storage.Init(driver.NewMemory()),
+ KubeClient: failer,
+ Capabilities: common.DefaultCapabilities,
+ }
+
+ rel := &release.Release{
+ Name: "test-release",
+ Namespace: "test",
+ Hooks: []*release.Hook{
+ {
+ Name: "test-hook",
+ Kind: "ConfigMap",
+ Path: "templates/hook.yaml",
+ Manifest: `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-hook
+ namespace: test
+data:
+ foo: bar
+`,
+ Weight: 0,
+ Events: []release.HookEvent{
+ release.HookPreInstall,
+ },
+ },
+ },
+ }
+
+ // Use WithWaitContext as a marker WaitOption that we can track
+ ctx := context.Background()
+ waitOptions := []kube.WaitOption{kube.WithWaitContext(ctx)}
+
+ err := configuration.execHook(rel, release.HookPreInstall, kube.StatusWatcherStrategy, waitOptions, 600, false)
+ is.NoError(err)
+
+ // Verify that WaitOptions were passed to GetWaiter
+ is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter")
+}
diff --git a/helm/pkg/action/install.go b/helm/pkg/action/install.go
new file mode 100644
index 000000000..38355491a
--- /dev/null
+++ b/helm/pkg/action/install.go
@@ -0,0 +1,973 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "log/slog"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "text/template"
+ "time"
+
+ "github.com/Masterminds/sprig/v3"
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/cli-runtime/pkg/resource"
+ "sigs.k8s.io/yaml"
+
+ ci "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/downloader"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/kube"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ "helm.sh/helm/v4/pkg/postrenderer"
+ "helm.sh/helm/v4/pkg/registry"
+ ri "helm.sh/helm/v4/pkg/release"
+ rcommon "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/storage"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+// notesFileSuffix that we want to treat specially. It goes through the templating engine
+// but it's not a YAML file (resource) hence can't have hooks, etc. And the user actually
+// wants to see this file after rendering in the status command. However, it must be a suffix
+// since there can be filepath in front of it.
+const notesFileSuffix = "NOTES.txt"
+
+const defaultDirectoryPermission = 0755
+
+// Install performs an installation operation.
+type Install struct {
+ cfg *Configuration
+
+ ChartPathOptions
+
+ // ForceReplace will, if set to `true`, ignore certain warnings and perform the install anyway.
+ //
+ // This should be used with caution.
+ ForceReplace bool
+ // ForceConflicts causes server-side apply to force conflicts ("Overwrite value, become sole manager")
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts
+ ForceConflicts bool
+ // ServerSideApply when true (default) will enable changes to be applied via Kubernetes server-side apply
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/
+ ServerSideApply bool
+ CreateNamespace bool
+ // DryRunStrategy can be set to prepare, but not execute the operation and whether or not to interact with the remote cluster
+ DryRunStrategy DryRunStrategy
+ // HideSecret can be set to true when DryRun is enabled in order to hide
+ // Kubernetes Secrets in the output. It cannot be used outside of DryRun.
+ HideSecret bool
+ DisableHooks bool
+ Replace bool
+ WaitStrategy kube.WaitStrategy
+ WaitOptions []kube.WaitOption
+ WaitForJobs bool
+ Devel bool
+ DependencyUpdate bool
+ Timeout time.Duration
+ Namespace string
+ ReleaseName string
+ GenerateName bool
+ NameTemplate string
+ Description string
+ OutputDir string
+ // RollbackOnFailure enables rolling back (uninstalling) the release on failure if set
+ RollbackOnFailure bool
+ SkipCRDs bool
+ SubNotes bool
+ HideNotes bool
+ SkipSchemaValidation bool
+ DisableOpenAPIValidation bool
+ IncludeCRDs bool
+ Labels map[string]string
+ // KubeVersion allows specifying a custom kubernetes version to use and
+ // APIVersions allows a manual set of supported API Versions to be passed
+ // (for things like templating).
+ KubeVersion *common.KubeVersion
+ APIVersions common.VersionSet
+ // Used by helm template to render charts with .Release.IsUpgrade. Ignored if Dry-Run is false
+ IsUpgrade bool
+ // Enable DNS lookups when rendering templates
+ EnableDNS bool
+ // Used by helm template to add the release as part of OutputDir path
+ // OutputDir/
+ UseReleaseName bool
+ // TakeOwnership will ignore the check for helm annotations and take ownership of the resources.
+ TakeOwnership bool
+ PostRenderer postrenderer.PostRenderer
+ // Lock to control raceconditions when the process receives a SIGTERM
+ Lock sync.Mutex
+ goroutineCount atomic.Int32
+}
+
+// ChartPathOptions captures common options used for controlling chart paths
+type ChartPathOptions struct {
+ CaFile string // --ca-file
+ CertFile string // --cert-file
+ KeyFile string // --key-file
+ InsecureSkipTLSVerify bool // --insecure-skip-verify
+ PlainHTTP bool // --plain-http
+ Keyring string // --keyring
+ Password string // --password
+ PassCredentialsAll bool // --pass-credentials
+ RepoURL string // --repo
+ Username string // --username
+ Verify bool // --verify
+ Version string // --version
+
+ // registryClient provides a registry client but is not added with
+ // options from a flag
+ registryClient *registry.Client
+}
+
+// NewInstall creates a new Install object with the given configuration.
+func NewInstall(cfg *Configuration) *Install {
+ in := &Install{
+ cfg: cfg,
+ ServerSideApply: true,
+ DryRunStrategy: DryRunNone,
+ }
+ in.registryClient = cfg.RegistryClient
+
+ return in
+}
+
+// SetRegistryClient sets the registry client for the install action
+func (i *Install) SetRegistryClient(registryClient *registry.Client) {
+ i.registryClient = registryClient
+}
+
+// GetRegistryClient get the registry client.
+func (i *Install) GetRegistryClient() *registry.Client {
+ return i.registryClient
+}
+
+func (i *Install) installCRDs(crds []chart.CRD) error {
+ // We do these one file at a time in the order they were read.
+ totalItems := []*resource.Info{}
+ for _, obj := range crds {
+ // Read in the resources
+ res, err := i.cfg.KubeClient.Build(bytes.NewBuffer(obj.File.Data), false)
+ if err != nil {
+ return fmt.Errorf("failed to install CRD %s: %w", obj.Name, err)
+ }
+
+ // Send them to Kube
+ if _, err := i.cfg.KubeClient.Create(
+ res,
+ kube.ClientCreateOptionServerSideApply(i.ServerSideApply, i.ForceConflicts)); err != nil {
+ // If the error is CRD already exists, continue.
+ if apierrors.IsAlreadyExists(err) {
+ crdName := obj.Name
+ i.cfg.Logger().Debug("CRD is already present. Skipping", "crd", crdName)
+ continue
+ }
+ return fmt.Errorf("failed to install CRD %s: %w", obj.Name, err)
+ }
+ totalItems = append(totalItems, res...)
+ }
+ if len(totalItems) > 0 {
+ var waiter kube.Waiter
+ var err error
+ if c, supportsOptions := i.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions {
+ waiter, err = c.GetWaiterWithOptions(i.WaitStrategy, i.WaitOptions...)
+ } else {
+ waiter, err = i.cfg.KubeClient.GetWaiter(i.WaitStrategy)
+ }
+ if err != nil {
+ return fmt.Errorf("unable to get waiter: %w", err)
+ }
+ // Give time for the CRD to be recognized.
+ if err := waiter.Wait(totalItems, 60*time.Second); err != nil {
+ return err
+ }
+
+ // If we have already gathered the capabilities, we need to invalidate
+ // the cache so that the new CRDs are recognized. This should only be
+ // the case when an action configuration is reused for multiple actions,
+ // as otherwise it is later loaded by ourselves when getCapabilities
+ // is called later on in the installation process.
+ if i.cfg.Capabilities != nil {
+ discoveryClient, err := i.cfg.RESTClientGetter.ToDiscoveryClient()
+ if err != nil {
+ return err
+ }
+
+ i.cfg.Logger().Debug("clearing discovery cache")
+ discoveryClient.Invalidate()
+
+ _, _ = discoveryClient.ServerGroups()
+ }
+
+ // Invalidate the REST mapper, since it will not have the new CRDs
+ // present.
+ restMapper, err := i.cfg.RESTClientGetter.ToRESTMapper()
+ if err != nil {
+ return err
+ }
+ if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok {
+ i.cfg.Logger().Debug("clearing REST mapper cache")
+ resettable.Reset()
+ }
+ }
+ return nil
+}
+
+// Run executes the installation
+//
+// If DryRun is set to true, this will prepare the release, but not install it
+
+func (i *Install) Run(chrt ci.Charter, vals map[string]interface{}) (ri.Releaser, error) {
+ ctx := context.Background()
+ return i.RunWithContext(ctx, chrt, vals)
+}
+
+// RunWithContext executes the installation with Context
+//
+// When the task is cancelled through ctx, the function returns and the install
+// proceeds in the background.
+func (i *Install) RunWithContext(ctx context.Context, ch ci.Charter, vals map[string]interface{}) (ri.Releaser, error) {
+ var chrt *chart.Chart
+ switch c := ch.(type) {
+ case *chart.Chart:
+ chrt = c
+ case chart.Chart:
+ chrt = &c
+ default:
+ return nil, errors.New("invalid chart apiVersion")
+ }
+
+ if interactWithServer(i.DryRunStrategy) {
+ if err := i.cfg.KubeClient.IsReachable(); err != nil {
+ i.cfg.Logger().Error(fmt.Sprintf("cluster reachability check failed: %v", err))
+ return nil, fmt.Errorf("cluster reachability check failed: %w", err)
+ }
+ }
+
+ // HideSecret must be used with dry run. Otherwise, return an error.
+ if !isDryRun(i.DryRunStrategy) && i.HideSecret {
+ i.cfg.Logger().Error("hiding Kubernetes secrets requires a dry-run mode")
+ return nil, errors.New("hiding Kubernetes secrets requires a dry-run mode")
+ }
+
+ if err := i.availableName(); err != nil {
+ i.cfg.Logger().Error("release name check failed", slog.Any("error", err))
+ return nil, fmt.Errorf("release name check failed: %w", err)
+ }
+
+ if err := chartutil.ProcessDependencies(chrt, vals); err != nil {
+ i.cfg.Logger().Error("chart dependencies processing failed", slog.Any("error", err))
+ return nil, fmt.Errorf("chart dependencies processing failed: %w", err)
+ }
+
+ // Pre-install anything in the crd/ directory. We do this before Helm
+ // contacts the upstream server and builds the capabilities object.
+ if crds := chrt.CRDObjects(); interactWithServer(i.DryRunStrategy) && !i.SkipCRDs && len(crds) > 0 {
+ // On dry run, bail here
+ if isDryRun(i.DryRunStrategy) {
+ i.cfg.Logger().Warn("This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.")
+ } else if err := i.installCRDs(crds); err != nil {
+ return nil, err
+ }
+ }
+
+ if !interactWithServer(i.DryRunStrategy) {
+ // Add mock objects in here so it doesn't use Kube API server
+ // NOTE(bacongobbler): used for `helm template`
+ i.cfg.Capabilities = common.DefaultCapabilities.Copy()
+ if i.KubeVersion != nil {
+ i.cfg.Capabilities.KubeVersion = *i.KubeVersion
+ }
+ i.cfg.Capabilities.APIVersions = append(i.cfg.Capabilities.APIVersions, i.APIVersions...)
+ i.cfg.KubeClient = &kubefake.PrintingKubeClient{Out: io.Discard}
+
+ mem := driver.NewMemory()
+ mem.SetNamespace(i.Namespace)
+ i.cfg.Releases = storage.Init(mem)
+ } else if interactWithServer(i.DryRunStrategy) && len(i.APIVersions) > 0 {
+ i.cfg.Logger().Debug("API Version list given outside of client only mode, this list will be ignored")
+ }
+
+ // Make sure if RollbackOnFailure is set, that wait is set as well. This makes it so
+ // the user doesn't have to specify both
+ if i.WaitStrategy == kube.HookOnlyStrategy && i.RollbackOnFailure {
+ i.WaitStrategy = kube.StatusWatcherStrategy
+ }
+
+ caps, err := i.cfg.getCapabilities()
+ if err != nil {
+ return nil, err
+ }
+
+ // special case for helm template --is-upgrade
+ isUpgrade := i.IsUpgrade && isDryRun(i.DryRunStrategy)
+ options := common.ReleaseOptions{
+ Name: i.ReleaseName,
+ Namespace: i.Namespace,
+ Revision: 1,
+ IsInstall: !isUpgrade,
+ IsUpgrade: isUpgrade,
+ }
+ valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chrt, vals, options, caps, i.SkipSchemaValidation)
+ if err != nil {
+ return nil, err
+ }
+
+ if driver.ContainsSystemLabels(i.Labels) {
+ return nil, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels())
+ }
+
+ rel := i.createRelease(chrt, vals, i.Labels)
+
+ var manifestDoc *bytes.Buffer
+ rel.Hooks, manifestDoc, rel.Info.Notes, err = i.cfg.renderResources(chrt, valuesToRender, i.ReleaseName, i.OutputDir, i.SubNotes, i.UseReleaseName, i.IncludeCRDs, i.PostRenderer, interactWithServer(i.DryRunStrategy), i.EnableDNS, i.HideSecret)
+ // Even for errors, attach this if available
+ if manifestDoc != nil {
+ rel.Manifest = manifestDoc.String()
+ }
+ // Check error from render
+ if err != nil {
+ rel.SetStatus(rcommon.StatusFailed, fmt.Sprintf("failed to render resource: %s", err.Error()))
+ // Return a release with partial data so that the client can show debugging information.
+ return rel, err
+ }
+
+ // Mark this release as in-progress
+ rel.SetStatus(rcommon.StatusPendingInstall, "Initial install underway")
+
+ var toBeAdopted kube.ResourceList
+ resources, err := i.cfg.KubeClient.Build(bytes.NewBufferString(rel.Manifest), !i.DisableOpenAPIValidation)
+ if err != nil {
+ return nil, fmt.Errorf("unable to build kubernetes objects from release manifest: %w", err)
+ }
+
+ // It is safe to use "forceOwnership" here because these are resources currently rendered by the chart.
+ err = resources.Visit(setMetadataVisitor(rel.Name, rel.Namespace, true))
+ if err != nil {
+ return nil, err
+ }
+
+ // Install requires an extra validation step of checking that resources
+ // don't already exist before we actually create resources. If we continue
+ // forward and create the release object with resources that already exist,
+ // we'll end up in a state where we will delete those resources upon
+ // deleting the release because the manifest will be pointing at that
+ // resource
+ if interactWithServer(i.DryRunStrategy) && !isUpgrade && len(resources) > 0 {
+ if i.TakeOwnership {
+ toBeAdopted, err = requireAdoption(resources)
+ } else {
+ toBeAdopted, err = existingResourceConflict(resources, rel.Name, rel.Namespace)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("unable to continue with install: %w", err)
+ }
+ }
+
+ // Bail out here if it is a dry run
+ if isDryRun(i.DryRunStrategy) {
+ rel.Info.Description = "Dry run complete"
+ return rel, nil
+ }
+
+ if i.CreateNamespace {
+ ns := &v1.Namespace{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "v1",
+ Kind: "Namespace",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: i.Namespace,
+ Labels: map[string]string{
+ "name": i.Namespace,
+ },
+ },
+ }
+ buf, err := yaml.Marshal(ns)
+ if err != nil {
+ return nil, err
+ }
+ resourceList, err := i.cfg.KubeClient.Build(bytes.NewBuffer(buf), true)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err := i.cfg.KubeClient.Create(
+ resourceList,
+ kube.ClientCreateOptionServerSideApply(i.ServerSideApply, false)); err != nil && !apierrors.IsAlreadyExists(err) {
+ return nil, err
+ }
+ }
+
+ // If Replace is true, we need to supersede the last release.
+ if i.Replace {
+ if err := i.replaceRelease(rel); err != nil {
+ return nil, err
+ }
+ }
+
+ // Store the release in history before continuing. We always know that this is a create operation
+ if err := i.cfg.Releases.Create(rel); err != nil {
+ // We could try to recover gracefully here, but since nothing has been installed
+ // yet, this is probably safer than trying to continue when we know storage is
+ // not working.
+ return rel, err
+ }
+
+ rel, err = i.performInstallCtx(ctx, rel, toBeAdopted, resources)
+ if err != nil {
+ rel, err = i.failRelease(rel, err)
+ }
+ return rel, err
+}
+
+func (i *Install) performInstallCtx(ctx context.Context, rel *release.Release, toBeAdopted kube.ResourceList, resources kube.ResourceList) (*release.Release, error) {
+ type Msg struct {
+ r *release.Release
+ e error
+ }
+ resultChan := make(chan Msg, 1)
+
+ go func() {
+ i.goroutineCount.Add(1)
+ rel, err := i.performInstall(rel, toBeAdopted, resources)
+ resultChan <- Msg{rel, err}
+ i.goroutineCount.Add(-1)
+ }()
+ select {
+ case <-ctx.Done():
+ err := ctx.Err()
+ return rel, err
+ case msg := <-resultChan:
+ return msg.r, msg.e
+ }
+}
+
+// getGoroutineCount return the number of running routines
+func (i *Install) getGoroutineCount() int32 {
+ return i.goroutineCount.Load()
+}
+
+func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.ResourceList, resources kube.ResourceList) (*release.Release, error) {
+ var err error
+ // pre-install hooks
+ if !i.DisableHooks {
+ if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.WaitOptions, i.Timeout, i.ServerSideApply); err != nil {
+ return rel, fmt.Errorf("failed pre-install: %s", err)
+ }
+ }
+
+ // At this point, we can do the install. Note that before we were detecting whether to
+ // do an update, but it's not clear whether we WANT to do an update if the reuse is set
+ // to true, since that is basically an upgrade operation.
+ if len(toBeAdopted) == 0 && len(resources) > 0 {
+ _, err = i.cfg.KubeClient.Create(
+ resources,
+ kube.ClientCreateOptionServerSideApply(i.ServerSideApply, false))
+ } else if len(resources) > 0 {
+ updateThreeWayMergeForUnstructured := i.TakeOwnership && !i.ServerSideApply // Use three-way merge when taking ownership (and not using server-side apply)
+ _, err = i.cfg.KubeClient.Update(
+ toBeAdopted,
+ resources,
+ kube.ClientUpdateOptionForceReplace(i.ForceReplace),
+ kube.ClientUpdateOptionServerSideApply(i.ServerSideApply, i.ForceConflicts),
+ kube.ClientUpdateOptionThreeWayMergeForUnstructured(updateThreeWayMergeForUnstructured),
+ kube.ClientUpdateOptionUpgradeClientSideFieldManager(true))
+ }
+ if err != nil {
+ return rel, err
+ }
+
+ var waiter kube.Waiter
+ if c, supportsOptions := i.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions {
+ waiter, err = c.GetWaiterWithOptions(i.WaitStrategy, i.WaitOptions...)
+ } else {
+ waiter, err = i.cfg.KubeClient.GetWaiter(i.WaitStrategy)
+ }
+ if err != nil {
+ return rel, fmt.Errorf("failed to get waiter: %w", err)
+ }
+
+ if i.WaitForJobs {
+ err = waiter.WaitWithJobs(resources, i.Timeout)
+ } else {
+ err = waiter.Wait(resources, i.Timeout)
+ }
+ if err != nil {
+ return rel, err
+ }
+
+ if !i.DisableHooks {
+ if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.WaitOptions, i.Timeout, i.ServerSideApply); err != nil {
+ return rel, fmt.Errorf("failed post-install: %s", err)
+ }
+ }
+
+ if len(i.Description) > 0 {
+ rel.SetStatus(rcommon.StatusDeployed, i.Description)
+ } else {
+ rel.SetStatus(rcommon.StatusDeployed, "Install complete")
+ }
+
+ // This is a tricky case. The release has been created, but the result
+ // cannot be recorded. The truest thing to tell the user is that the
+ // release was created. However, the user will not be able to do anything
+ // further with this release.
+ //
+ // One possible strategy would be to do a timed retry to see if we can get
+ // this stored in the future.
+ if err := i.recordRelease(rel); err != nil {
+ i.cfg.Logger().Error("failed to record the release", slog.Any("error", err))
+ }
+
+ return rel, nil
+}
+
+func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) {
+ rel.SetStatus(rcommon.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error()))
+ if i.RollbackOnFailure {
+ i.cfg.Logger().Debug("install failed and rollback-on-failure is set, uninstalling release", "release", i.ReleaseName)
+ uninstall := NewUninstall(i.cfg)
+ uninstall.DisableHooks = i.DisableHooks
+ uninstall.KeepHistory = false
+ uninstall.Timeout = i.Timeout
+ uninstall.WaitStrategy = i.WaitStrategy
+ uninstall.WaitOptions = i.WaitOptions
+ if _, uninstallErr := uninstall.Run(i.ReleaseName); uninstallErr != nil {
+ return rel, fmt.Errorf("an error occurred while uninstalling the release. original install error: %w: %w", err, uninstallErr)
+ }
+ return rel, fmt.Errorf("release %s failed, and has been uninstalled due to rollback-on-failure being set: %w", i.ReleaseName, err)
+ }
+ i.recordRelease(rel) // Ignore the error, since we have another error to deal with.
+ return rel, err
+}
+
+// availableName tests whether a name is available
+//
+// Roughly, this will return an error if name is
+//
+// - empty
+// - too long
+// - already in use, and not deleted
+// - used by a deleted release, and i.Replace is false
+func (i *Install) availableName() error {
+ start := i.ReleaseName
+
+ if err := chartutil.ValidateReleaseName(start); err != nil {
+ return fmt.Errorf("release name %q: %w", start, err)
+ }
+ // On dry run, bail here
+ if isDryRun(i.DryRunStrategy) {
+ return nil
+ }
+
+ h, err := i.cfg.Releases.History(start)
+ if err != nil || len(h) < 1 {
+ return nil
+ }
+
+ hl, err := releaseListToV1List(h)
+ if err != nil {
+ return err
+ }
+
+ releaseutil.Reverse(hl, releaseutil.SortByRevision)
+ rel := hl[0]
+
+ if st := rel.Info.Status; i.Replace && (st == rcommon.StatusUninstalled || st == rcommon.StatusFailed) {
+ return nil
+ }
+ return errors.New("cannot reuse a name that is still in use")
+}
+
+func releaseListToV1List(ls []ri.Releaser) ([]*release.Release, error) {
+ rls := make([]*release.Release, 0, len(ls))
+ for _, val := range ls {
+ rel, err := releaserToV1Release(val)
+ if err != nil {
+ return nil, err
+ }
+ rls = append(rls, rel)
+ }
+
+ return rls, nil
+}
+
+func releaseV1ListToReleaserList(ls []*release.Release) ([]ri.Releaser, error) {
+ rls := make([]ri.Releaser, 0, len(ls))
+ for _, val := range ls {
+ rls = append(rls, val)
+ }
+
+ return rls, nil
+}
+
+// createRelease creates a new release object
+func (i *Install) createRelease(chrt *chart.Chart, rawVals map[string]interface{}, labels map[string]string) *release.Release {
+ ts := i.cfg.Now()
+
+ r := &release.Release{
+ Name: i.ReleaseName,
+ Namespace: i.Namespace,
+ Chart: chrt,
+ Config: rawVals,
+ Info: &release.Info{
+ FirstDeployed: ts,
+ LastDeployed: ts,
+ Status: rcommon.StatusUnknown,
+ },
+ Version: 1,
+ Labels: labels,
+ ApplyMethod: string(determineReleaseSSApplyMethod(i.ServerSideApply)),
+ }
+
+ return r
+}
+
+// recordRelease with an update operation in case reuse has been set.
+func (i *Install) recordRelease(r *release.Release) error {
+ // This is a legacy function which has been reduced to a oneliner. Could probably
+ // refactor it out.
+ return i.cfg.Releases.Update(r)
+}
+
+// replaceRelease replaces an older release with this one
+//
+// This allows us to reuse names by superseding an existing release with a new one
+func (i *Install) replaceRelease(rel *release.Release) error {
+ hist, err := i.cfg.Releases.History(rel.Name)
+ if err != nil || len(hist) == 0 {
+ // No releases exist for this name, so we can return early
+ return nil
+ }
+ hl, err := releaseListToV1List(hist)
+ if err != nil {
+ return err
+ }
+
+ releaseutil.Reverse(hl, releaseutil.SortByRevision)
+ last := hl[0]
+
+ // Update version to the next available
+ rel.Version = last.Version + 1
+
+ // Do not change the status of a failed release.
+ if last.Info.Status == rcommon.StatusFailed {
+ return nil
+ }
+
+ // For any other status, mark it as superseded and store the old record
+ last.SetStatus(rcommon.StatusSuperseded, "superseded by new release")
+ return i.recordRelease(last)
+}
+
+// write the to /. controls if the file is created or content will be appended
+func writeToFile(outputDir string, name string, data string, appendData bool) error {
+ outfileName := strings.Join([]string{outputDir, name}, string(filepath.Separator))
+
+ err := ensureDirectoryForFile(outfileName)
+ if err != nil {
+ return err
+ }
+
+ f, err := createOrOpenFile(outfileName, appendData)
+ if err != nil {
+ return err
+ }
+
+ defer f.Close()
+
+ _, err = fmt.Fprintf(f, "---\n# Source: %s\n%s\n", name, data)
+
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("wrote %s\n", outfileName)
+ return nil
+}
+
+func createOrOpenFile(filename string, appendData bool) (*os.File, error) {
+ if appendData {
+ return os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0600)
+ }
+ return os.Create(filename)
+}
+
+// check if the directory exists to create file. creates if doesn't exist
+func ensureDirectoryForFile(file string) error {
+ baseDir := filepath.Dir(file)
+ _, err := os.Stat(baseDir)
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return err
+ }
+
+ return os.MkdirAll(baseDir, defaultDirectoryPermission)
+}
+
+// NameAndChart returns the name and chart that should be used.
+//
+// This will read the flags and handle name generation if necessary.
+func (i *Install) NameAndChart(args []string) (string, string, error) {
+ flagsNotSet := func() error {
+ if i.GenerateName {
+ return errors.New("cannot set --generate-name and also specify a name")
+ }
+ if i.NameTemplate != "" {
+ return errors.New("cannot set --name-template and also specify a name")
+ }
+ return nil
+ }
+
+ if len(args) > 2 {
+ return args[0], args[1], fmt.Errorf("expected at most two arguments, unexpected arguments: %v", strings.Join(args[2:], ", "))
+ }
+
+ if len(args) == 2 {
+ return args[0], args[1], flagsNotSet()
+ }
+
+ if i.NameTemplate != "" {
+ name, err := TemplateName(i.NameTemplate)
+ return name, args[0], err
+ }
+
+ if i.ReleaseName != "" {
+ return i.ReleaseName, args[0], nil
+ }
+
+ if !i.GenerateName {
+ return "", args[0], errors.New("must either provide a name or specify --generate-name")
+ }
+
+ base := filepath.Base(args[0])
+ if base == "." || base == "" {
+ base = "chart"
+ }
+ // if present, strip out the file extension from the name
+ if idx := strings.Index(base, "."); idx != -1 {
+ base = base[0:idx]
+ }
+
+ return fmt.Sprintf("%s-%d", base, time.Now().Unix()), args[0], nil
+}
+
+// TemplateName renders a name template, returning the name or an error.
+func TemplateName(nameTemplate string) (string, error) {
+ if nameTemplate == "" {
+ return "", nil
+ }
+
+ t, err := template.New("name-template").Funcs(sprig.TxtFuncMap()).Parse(nameTemplate)
+ if err != nil {
+ return "", err
+ }
+ var b bytes.Buffer
+ if err := t.Execute(&b, nil); err != nil {
+ return "", err
+ }
+
+ return b.String(), nil
+}
+
+// CheckDependencies checks the dependencies for a chart.
+func CheckDependencies(ch ci.Charter, reqs []ci.Dependency) error {
+ ac, err := ci.NewAccessor(ch)
+ if err != nil {
+ return err
+ }
+
+ var missing []string
+
+OUTER:
+ for _, r := range reqs {
+ rac, err := ci.NewDependencyAccessor(r)
+ if err != nil {
+ return err
+ }
+ for _, d := range ac.Dependencies() {
+ dac, err := ci.NewAccessor(d)
+ if err != nil {
+ return err
+ }
+ if dac.Name() == rac.Name() {
+ continue OUTER
+ }
+ }
+ missing = append(missing, rac.Name())
+ }
+
+ if len(missing) > 0 {
+ return fmt.Errorf("found in Chart.yaml, but missing in charts/ directory: %s", strings.Join(missing, ", "))
+ }
+ return nil
+}
+
+func portOrDefault(u *url.URL) string {
+ if p := u.Port(); p != "" {
+ return p
+ }
+
+ switch u.Scheme {
+ case "http":
+ return "80"
+ case "https":
+ return "443"
+ default:
+ return ""
+ }
+}
+
+func urlEqual(u1, u2 *url.URL) bool {
+ return u1.Scheme == u2.Scheme && u1.Hostname() == u2.Hostname() && portOrDefault(u1) == portOrDefault(u2)
+}
+
+// LocateChart looks for a chart directory in known places, and returns either the full path or an error.
+//
+// This does not ensure that the chart is well-formed; only that the requested filename exists.
+//
+// Order of resolution:
+// - relative to current working directory when --repo flag is not presented
+// - if path is absolute or begins with '.', error out here
+// - URL
+//
+// If 'verify' was set on ChartPathOptions, this will attempt to also verify the chart.
+func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) (string, error) {
+ if registry.IsOCI(name) && c.registryClient == nil {
+ return "", fmt.Errorf("unable to lookup chart %q, missing registry client", name)
+ }
+
+ name = strings.TrimSpace(name)
+ version := strings.TrimSpace(c.Version)
+
+ if c.RepoURL == "" {
+ if _, err := os.Stat(name); err == nil {
+ abs, err := filepath.Abs(name)
+ if err != nil {
+ return abs, err
+ }
+ if c.Verify {
+ if _, err := downloader.VerifyChart(abs, abs+".prov", c.Keyring); err != nil {
+ return "", err
+ }
+ }
+ return abs, nil
+ }
+ if filepath.IsAbs(name) || strings.HasPrefix(name, ".") {
+ return name, fmt.Errorf("path %q not found", name)
+ }
+ }
+
+ dl := downloader.ChartDownloader{
+ Out: os.Stdout,
+ Keyring: c.Keyring,
+ Getters: getter.All(settings),
+ Options: []getter.Option{
+ getter.WithPassCredentialsAll(c.PassCredentialsAll),
+ getter.WithTLSClientConfig(c.CertFile, c.KeyFile, c.CaFile),
+ getter.WithInsecureSkipVerifyTLS(c.InsecureSkipTLSVerify),
+ getter.WithPlainHTTP(c.PlainHTTP),
+ getter.WithBasicAuth(c.Username, c.Password),
+ },
+ RepositoryConfig: settings.RepositoryConfig,
+ RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
+ RegistryClient: c.registryClient,
+ }
+
+ if registry.IsOCI(name) {
+ dl.Options = append(dl.Options, getter.WithRegistryClient(c.registryClient))
+ }
+
+ if c.Verify {
+ dl.Verify = downloader.VerifyAlways
+ }
+ if c.RepoURL != "" {
+ chartURL, err := repo.FindChartInRepoURL(
+ c.RepoURL,
+ name,
+ getter.All(settings),
+ repo.WithChartVersion(version),
+ repo.WithClientTLS(c.CertFile, c.KeyFile, c.CaFile),
+ repo.WithUsernamePassword(c.Username, c.Password),
+ repo.WithInsecureSkipTLSVerify(c.InsecureSkipTLSVerify),
+ repo.WithPassCredentialsAll(c.PassCredentialsAll),
+ )
+ if err != nil {
+ return "", err
+ }
+ name = chartURL
+
+ // Only pass the user/pass on when the user has said to or when the
+ // location of the chart repo and the chart are the same domain.
+ u1, err := url.Parse(c.RepoURL)
+ if err != nil {
+ return "", err
+ }
+ u2, err := url.Parse(chartURL)
+ if err != nil {
+ return "", err
+ }
+
+ // Host on URL (returned from url.Parse) contains the port if present.
+ // This check ensures credentials are not passed between different
+ // services on different ports.
+ if c.PassCredentialsAll || urlEqual(u1, u2) {
+ dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password))
+ } else {
+ dl.Options = append(dl.Options, getter.WithBasicAuth("", ""))
+ }
+ } else {
+ dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password))
+ }
+
+ if err := os.MkdirAll(settings.RepositoryCache, 0755); err != nil {
+ return "", err
+ }
+
+ filename, _, err := dl.DownloadToCache(name, version)
+ if err != nil {
+ return "", err
+ }
+
+ lname, err := filepath.Abs(filename)
+ if err != nil {
+ return filename, err
+ }
+ return lname, nil
+}
diff --git a/helm/pkg/action/install_test.go b/helm/pkg/action/install_test.go
new file mode 100644
index 000000000..47080aef8
--- /dev/null
+++ b/helm/pkg/action/install_test.go
@@ -0,0 +1,1210 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ appsv1 "k8s.io/api/apps/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kuberuntime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest/fake"
+
+ ci "helm.sh/helm/v4/pkg/chart"
+
+ "helm.sh/helm/v4/internal/test"
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/kube"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ "helm.sh/helm/v4/pkg/registry"
+ rcommon "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+type nameTemplateTestCase struct {
+ tpl string
+ expected string
+ expectedErrorStr string
+}
+
+func createDummyResourceList(owned bool) kube.ResourceList {
+ obj := &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "dummyName",
+ Namespace: "spaced",
+ },
+ }
+
+ if owned {
+ obj.Labels = map[string]string{
+ "app.kubernetes.io/managed-by": "Helm",
+ }
+ obj.Annotations = map[string]string{
+ "meta.helm.sh/release-name": "test-install-release",
+ "meta.helm.sh/release-namespace": "spaced",
+ }
+ }
+
+ resInfo := resource.Info{
+ Name: "dummyName",
+ Namespace: "spaced",
+ Mapping: &meta.RESTMapping{
+ Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"},
+ GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"},
+ Scope: meta.RESTScopeNamespace,
+ },
+ Object: obj,
+ }
+ body := io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(appsv1Codec, obj))))
+
+ resInfo.Client = &fake.RESTClient{
+ GroupVersion: schema.GroupVersion{Group: "apps", Version: "v1"},
+ NegotiatedSerializer: scheme.Codecs.WithoutConversion(),
+ Client: fake.CreateHTTPClient(func(_ *http.Request) (*http.Response, error) {
+ header := http.Header{}
+ header.Set("Content-Type", kuberuntime.ContentTypeJSON)
+ return &http.Response{
+ StatusCode: http.StatusOK,
+ Header: header,
+ Body: body,
+ }, nil
+ }),
+ }
+ var resourceList kube.ResourceList
+ resourceList.Append(&resInfo)
+ return resourceList
+}
+
+func installActionWithConfig(config *Configuration) *Install {
+ instAction := NewInstall(config)
+ instAction.Namespace = "spaced"
+ instAction.ReleaseName = "test-install-release"
+
+ return instAction
+}
+
+func installAction(t *testing.T) *Install {
+ t.Helper()
+ config := actionConfigFixture(t)
+ instAction := NewInstall(config)
+ instAction.Namespace = "spaced"
+ instAction.ReleaseName = "test-install-release"
+
+ return instAction
+}
+
+func TestInstallRelease(t *testing.T) {
+ is := assert.New(t)
+ req := require.New(t)
+
+ instAction := installAction(t)
+ vals := map[string]interface{}{}
+ ctx, done := context.WithCancel(t.Context())
+ resi, err := instAction.RunWithContext(ctx, buildChart(), vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Equal(res.Name, "test-install-release", "Expected release name.")
+ is.Equal(res.Namespace, "spaced")
+
+ r, err := instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.NoError(err)
+
+ rel, err := releaserToV1Release(r)
+ is.NoError(err)
+
+ is.Len(rel.Hooks, 1)
+ is.Equal(rel.Hooks[0].Manifest, manifestWithHook)
+ is.Equal(rel.Hooks[0].Events[0], release.HookPostInstall)
+ is.Equal(rel.Hooks[0].Events[1], release.HookPreDelete, "Expected event 0 is pre-delete")
+
+ is.NotEqual(len(res.Manifest), 0)
+ is.NotEqual(len(rel.Manifest), 0)
+ is.Contains(rel.Manifest, "---\n# Source: hello/templates/hello\nhello: world")
+ is.Equal(rel.Info.Description, "Install complete")
+
+ // Detecting previous bug where context termination after successful release
+ // caused release to fail.
+ done()
+ time.Sleep(time.Millisecond * 100)
+ lastRelease, err := instAction.cfg.Releases.Last(rel.Name)
+ req.NoError(err)
+ lrel, err := releaserToV1Release(lastRelease)
+ is.NoError(err)
+ is.Equal(lrel.Info.Status, rcommon.StatusDeployed)
+}
+
+func TestInstallReleaseWithTakeOwnership_ResourceNotOwned(t *testing.T) {
+ // This test will test checking ownership of a resource
+ // returned by the fake client. If the resource is not
+ // owned by the chart, ownership is taken.
+ // To verify ownership has been taken, the fake client
+ // needs to store state which is a bigger rewrite.
+ // TODO: Ensure fake kube client stores state. Maybe using
+ // "k8s.io/client-go/kubernetes/fake" could be sufficient? i.e
+ // "Client{Namespace: namespace, kubeClient: k8sfake.NewClientset()}"
+
+ is := assert.New(t)
+
+ // Resource list from cluster is NOT owned by helm chart
+ config := actionConfigFixtureWithDummyResources(t, createDummyResourceList(false))
+ instAction := installActionWithConfig(config)
+ instAction.TakeOwnership = true
+ resi, err := instAction.Run(buildChart(), nil)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ r, err := instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.NoError(err)
+
+ rel, err := releaserToV1Release(r)
+ is.NoError(err)
+
+ is.Equal(rel.Info.Description, "Install complete")
+}
+
+func TestInstallReleaseWithTakeOwnership_ResourceOwned(t *testing.T) {
+ is := assert.New(t)
+
+ // Resource list from cluster is owned by helm chart
+ config := actionConfigFixtureWithDummyResources(t, createDummyResourceList(true))
+ instAction := installActionWithConfig(config)
+ instAction.TakeOwnership = false
+ resi, err := instAction.Run(buildChart(), nil)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ r, err := instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.NoError(err)
+
+ rel, err := releaserToV1Release(r)
+ is.NoError(err)
+
+ is.Equal(rel.Info.Description, "Install complete")
+}
+
+func TestInstallReleaseWithTakeOwnership_ResourceOwnedNoFlag(t *testing.T) {
+ is := assert.New(t)
+
+ // Resource list from cluster is NOT owned by helm chart
+ config := actionConfigFixtureWithDummyResources(t, createDummyResourceList(false))
+ instAction := installActionWithConfig(config)
+ _, err := instAction.Run(buildChart(), nil)
+ is.Error(err)
+ is.Contains(err.Error(), "unable to continue with install")
+}
+
+func TestInstallReleaseWithValues(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ userVals := map[string]interface{}{
+ "nestedKey": map[string]interface{}{
+ "simpleKey": "simpleValue",
+ },
+ }
+ expectedUserValues := map[string]interface{}{
+ "nestedKey": map[string]interface{}{
+ "simpleKey": "simpleValue",
+ },
+ }
+ resi, err := instAction.Run(buildChart(withSampleValues()), userVals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Equal(res.Name, "test-install-release", "Expected release name.")
+ is.Equal(res.Namespace, "spaced")
+
+ r, err := instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.NoError(err)
+
+ rel, err := releaserToV1Release(r)
+ is.NoError(err)
+
+ is.Len(rel.Hooks, 1)
+ is.Equal(rel.Hooks[0].Manifest, manifestWithHook)
+ is.Equal(rel.Hooks[0].Events[0], release.HookPostInstall)
+ is.Equal(rel.Hooks[0].Events[1], release.HookPreDelete, "Expected event 0 is pre-delete")
+
+ is.NotEqual(len(res.Manifest), 0)
+ is.NotEqual(len(rel.Manifest), 0)
+ is.Contains(rel.Manifest, "---\n# Source: hello/templates/hello\nhello: world")
+ is.Equal("Install complete", rel.Info.Description)
+ is.Equal(expectedUserValues, rel.Config)
+}
+
+func TestInstallRelease_NoName(t *testing.T) {
+ instAction := installAction(t)
+ instAction.ReleaseName = ""
+ vals := map[string]interface{}{}
+ _, err := instAction.Run(buildChart(), vals)
+ if err == nil {
+ t.Fatal("expected failure when no name is specified")
+ }
+ assert.Contains(t, err.Error(), "no name provided")
+}
+
+func TestInstallRelease_WithNotes(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.ReleaseName = "with-notes"
+ vals := map[string]interface{}{}
+ resi, err := instAction.Run(buildChart(withNotes("note here")), vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ is.Equal(res.Name, "with-notes")
+ is.Equal(res.Namespace, "spaced")
+
+ r, err := instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.NoError(err)
+ rel, err := releaserToV1Release(r)
+ is.NoError(err)
+ is.Len(rel.Hooks, 1)
+ is.Equal(rel.Hooks[0].Manifest, manifestWithHook)
+ is.Equal(rel.Hooks[0].Events[0], release.HookPostInstall)
+ is.Equal(rel.Hooks[0].Events[1], release.HookPreDelete, "Expected event 0 is pre-delete")
+ is.NotEqual(len(res.Manifest), 0)
+ is.NotEqual(len(rel.Manifest), 0)
+ is.Contains(rel.Manifest, "---\n# Source: hello/templates/hello\nhello: world")
+ is.Equal(rel.Info.Description, "Install complete")
+
+ is.Equal(rel.Info.Notes, "note here")
+}
+
+func TestInstallRelease_WithNotesRendered(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.ReleaseName = "with-notes"
+ vals := map[string]interface{}{}
+ resi, err := instAction.Run(buildChart(withNotes("got-{{.Release.Name}}")), vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ r, err := instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.NoError(err)
+ rel, err := releaserToV1Release(r)
+ is.NoError(err)
+
+ expectedNotes := fmt.Sprintf("got-%s", res.Name)
+ is.Equal(expectedNotes, rel.Info.Notes)
+ is.Equal(rel.Info.Description, "Install complete")
+}
+
+func TestInstallRelease_WithChartAndDependencyParentNotes(t *testing.T) {
+ // Regression: Make sure that the child's notes don't override the parent's
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.ReleaseName = "with-notes"
+ vals := map[string]interface{}{}
+ resi, err := instAction.Run(buildChart(withNotes("parent"), withDependency(withNotes("child"))), vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ r, err := instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.NoError(err)
+ rel, err := releaserToV1Release(r)
+ is.NoError(err)
+ is.Equal("with-notes", rel.Name)
+ is.Equal("parent", rel.Info.Notes)
+ is.Equal(rel.Info.Description, "Install complete")
+}
+
+func TestInstallRelease_WithChartAndDependencyAllNotes(t *testing.T) {
+ // Regression: Make sure that the child's notes don't override the parent's
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.ReleaseName = "with-notes"
+ instAction.SubNotes = true
+ vals := map[string]interface{}{}
+ resi, err := instAction.Run(buildChart(withNotes("parent"), withDependency(withNotes("child"))), vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ r, err := instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.NoError(err)
+ rel, err := releaserToV1Release(r)
+ is.NoError(err)
+ is.Equal("with-notes", rel.Name)
+ // test run can return as either 'parent\nchild' or 'child\nparent'
+ if !strings.Contains(rel.Info.Notes, "parent") && !strings.Contains(rel.Info.Notes, "child") {
+ t.Fatalf("Expected 'parent\nchild' or 'child\nparent', got '%s'", rel.Info.Notes)
+ }
+ is.Equal(rel.Info.Description, "Install complete")
+}
+
+func TestInstallRelease_DryRunClient(t *testing.T) {
+ for _, dryRunStrategy := range []DryRunStrategy{DryRunClient, DryRunServer} {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.DryRunStrategy = dryRunStrategy
+
+ vals := map[string]interface{}{}
+ resi, err := instAction.Run(buildChart(withSampleTemplates()), vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ is.Contains(res.Manifest, "---\n# Source: hello/templates/hello\nhello: world")
+ is.Contains(res.Manifest, "---\n# Source: hello/templates/goodbye\ngoodbye: world")
+ is.Contains(res.Manifest, "hello: Earth")
+ is.NotContains(res.Manifest, "hello: {{ template \"_planet\" . }}")
+ is.NotContains(res.Manifest, "empty")
+
+ _, err = instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.Error(err)
+ is.Len(res.Hooks, 1)
+ is.True(res.Hooks[0].LastRun.CompletedAt.IsZero(), "expect hook to not be marked as run")
+ is.Equal(res.Info.Description, "Dry run complete")
+ }
+}
+
+func TestInstallRelease_DryRunHiddenSecret(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+
+ // First perform a normal dry-run with the secret and confirm its presence.
+ instAction.DryRunStrategy = DryRunClient
+ vals := map[string]interface{}{}
+ resi, err := instAction.Run(buildChart(withSampleSecret(), withSampleTemplates()), vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Contains(res.Manifest, "---\n# Source: hello/templates/secret.yaml\napiVersion: v1\nkind: Secret")
+
+ _, err = instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.Error(err)
+ is.Equal(res.Info.Description, "Dry run complete")
+
+ // Perform a dry-run where the secret should not be present
+ instAction.HideSecret = true
+ vals = map[string]interface{}{}
+ res2i, err := instAction.Run(buildChart(withSampleSecret(), withSampleTemplates()), vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res2, err := releaserToV1Release(res2i)
+ is.NoError(err)
+
+ is.NotContains(res2.Manifest, "---\n# Source: hello/templates/secret.yaml\napiVersion: v1\nkind: Secret")
+
+ _, err = instAction.cfg.Releases.Get(res2.Name, res2.Version)
+ is.Error(err)
+ is.Equal(res2.Info.Description, "Dry run complete")
+
+ // Ensure there is an error when HideSecret True but not in a dry-run mode
+ instAction.DryRunStrategy = DryRunNone
+ vals = map[string]interface{}{}
+ _, err = instAction.Run(buildChart(withSampleSecret(), withSampleTemplates()), vals)
+ if err == nil {
+ t.Fatalf("Did not get expected an error when dry-run false and hide secret is true")
+ }
+}
+
+// Regression test for #7955
+func TestInstallRelease_DryRun_Lookup(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.DryRunStrategy = DryRunNone
+ vals := map[string]interface{}{}
+
+ mockChart := buildChart(withSampleTemplates())
+ mockChart.Templates = append(mockChart.Templates, &common.File{
+ Name: "templates/lookup",
+ ModTime: time.Now(),
+ Data: []byte(`goodbye: {{ lookup "v1" "Namespace" "" "___" }}`),
+ })
+
+ resi, err := instAction.Run(mockChart, vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ is.Contains(res.Manifest, "goodbye: map[]")
+}
+
+func TestInstallReleaseIncorrectTemplate_DryRun(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.DryRunStrategy = DryRunNone
+ vals := map[string]interface{}{}
+ _, err := instAction.Run(buildChart(withSampleIncludingIncorrectTemplates()), vals)
+ expectedErr := `hello/templates/incorrect:1:10
+ executing "hello/templates/incorrect" at <.Values.bad.doh>:
+ nil pointer evaluating interface {}.doh`
+ if err == nil {
+ t.Fatalf("Install should fail containing error: %s", expectedErr)
+ }
+ is.Contains(err.Error(), expectedErr)
+}
+
+func TestInstallRelease_NoHooks(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.DisableHooks = true
+ instAction.ReleaseName = "no-hooks"
+ require.NoError(t, instAction.cfg.Releases.Create(releaseStub()))
+
+ vals := map[string]interface{}{}
+ resi, err := instAction.Run(buildChart(), vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ is.True(res.Hooks[0].LastRun.CompletedAt.IsZero(), "hooks should not run with no-hooks")
+}
+
+func TestInstallRelease_FailedHooks(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.ReleaseName = "failed-hooks"
+ failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WatchUntilReadyError = fmt.Errorf("Failed watch")
+ instAction.cfg.KubeClient = failer
+ outBuffer := &bytes.Buffer{}
+ failer.PrintingKubeClient = kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer}
+
+ vals := map[string]interface{}{}
+ resi, err := instAction.Run(buildChart(), vals)
+ is.Error(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Contains(res.Info.Description, "failed post-install")
+ is.Equal("", outBuffer.String())
+ is.Equal(rcommon.StatusFailed, res.Info.Status)
+}
+
+func TestInstallRelease_ReplaceRelease(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.Replace = true
+
+ rel := releaseStub()
+ rel.Info.Status = rcommon.StatusUninstalled
+ require.NoError(t, instAction.cfg.Releases.Create(rel))
+ instAction.ReleaseName = rel.Name
+
+ vals := map[string]interface{}{}
+ resi, err := instAction.Run(buildChart(), vals)
+ is.NoError(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ // This should have been auto-incremented
+ is.Equal(2, res.Version)
+ is.Equal(res.Name, rel.Name)
+
+ r, err := instAction.cfg.Releases.Get(rel.Name, res.Version)
+ is.NoError(err)
+ getres, err := releaserToV1Release(r)
+ is.NoError(err)
+ is.Equal(getres.Info.Status, rcommon.StatusDeployed)
+}
+
+func TestInstallRelease_KubeVersion(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ vals := map[string]interface{}{}
+ _, err := instAction.Run(buildChart(withKube(">=0.0.0")), vals)
+ is.NoError(err)
+
+ // This should fail for a few hundred years
+ instAction.ReleaseName = "should-fail"
+ vals = map[string]interface{}{}
+ _, err = instAction.Run(buildChart(withKube(">=99.0.0")), vals)
+ is.Error(err)
+ is.Contains(err.Error(), "chart requires kubeVersion: >=99.0.0 which is incompatible with Kubernetes v1.20.")
+}
+
+func TestInstallRelease_Wait(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.ReleaseName = "come-fail-away"
+ failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitError = fmt.Errorf("I timed out")
+ instAction.cfg.KubeClient = failer
+ instAction.WaitStrategy = kube.StatusWatcherStrategy
+ vals := map[string]interface{}{}
+
+ goroutines := instAction.getGoroutineCount()
+
+ resi, err := instAction.Run(buildChart(), vals)
+ is.Error(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Contains(res.Info.Description, "I timed out")
+ is.Equal(res.Info.Status, rcommon.StatusFailed)
+
+ is.Equal(goroutines, instAction.getGoroutineCount())
+}
+func TestInstallRelease_Wait_Interrupted(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.ReleaseName = "interrupted-release"
+ failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitDuration = 10 * time.Second
+ instAction.cfg.KubeClient = failer
+ instAction.WaitStrategy = kube.StatusWatcherStrategy
+ vals := map[string]interface{}{}
+
+ ctx, cancel := context.WithCancel(t.Context())
+ time.AfterFunc(time.Second, cancel)
+
+ goroutines := instAction.getGoroutineCount()
+
+ _, err := instAction.RunWithContext(ctx, buildChart(), vals)
+ is.Error(err)
+ is.Contains(err.Error(), "context canceled")
+
+ is.Equal(goroutines+1, instAction.getGoroutineCount()) // installation goroutine still is in background
+ time.Sleep(10 * time.Second) // wait for goroutine to finish
+ is.Equal(goroutines, instAction.getGoroutineCount())
+}
+func TestInstallRelease_WaitForJobs(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.ReleaseName = "come-fail-away"
+ failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitError = fmt.Errorf("I timed out")
+ instAction.cfg.KubeClient = failer
+ instAction.WaitStrategy = kube.StatusWatcherStrategy
+ instAction.WaitForJobs = true
+ vals := map[string]interface{}{}
+
+ resi, err := instAction.Run(buildChart(), vals)
+ is.Error(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Contains(res.Info.Description, "I timed out")
+ is.Equal(res.Info.Status, rcommon.StatusFailed)
+}
+
+func TestInstallRelease_RollbackOnFailure(t *testing.T) {
+ is := assert.New(t)
+
+ t.Run("rollback-on-failure uninstall succeeds", func(t *testing.T) {
+ instAction := installAction(t)
+ instAction.ReleaseName = "come-fail-away"
+ failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitError = fmt.Errorf("I timed out")
+ instAction.cfg.KubeClient = failer
+ instAction.RollbackOnFailure = true
+ // disabling hooks to avoid an early fail when
+ // WaitForDelete is called on the pre-delete hook execution
+ instAction.DisableHooks = true
+ vals := map[string]interface{}{}
+
+ resi, err := instAction.Run(buildChart(), vals)
+ is.Error(err)
+ is.Contains(err.Error(), "I timed out")
+ is.Contains(err.Error(), "rollback-on-failure")
+
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ // Now make sure it isn't in storage anymore
+ _, err = instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.Error(err)
+ is.Equal(err, driver.ErrReleaseNotFound)
+ })
+
+ t.Run("rollback-on-failure uninstall fails", func(t *testing.T) {
+ instAction := installAction(t)
+ instAction.ReleaseName = "come-fail-away-with-me"
+ failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitError = fmt.Errorf("I timed out")
+ failer.DeleteError = fmt.Errorf("uninstall fail")
+ instAction.cfg.KubeClient = failer
+ instAction.RollbackOnFailure = true
+ vals := map[string]interface{}{}
+
+ _, err := instAction.Run(buildChart(), vals)
+ is.Error(err)
+ is.Contains(err.Error(), "I timed out")
+ is.Contains(err.Error(), "uninstall fail")
+ is.Contains(err.Error(), "an error occurred while uninstalling the release")
+ })
+}
+func TestInstallRelease_RollbackOnFailure_Interrupted(t *testing.T) {
+
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.ReleaseName = "interrupted-release"
+ failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitDuration = 10 * time.Second
+ instAction.cfg.KubeClient = failer
+ instAction.RollbackOnFailure = true
+ vals := map[string]interface{}{}
+
+ ctx, cancel := context.WithCancel(t.Context())
+ time.AfterFunc(time.Second, cancel)
+
+ goroutines := instAction.getGoroutineCount()
+
+ resi, err := instAction.RunWithContext(ctx, buildChart(), vals)
+ is.Error(err)
+ is.Contains(err.Error(), "context canceled")
+ is.Contains(err.Error(), "rollback-on-failure")
+ is.Contains(err.Error(), "uninstalled")
+
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ // Now make sure it isn't in storage anymore
+ _, err = instAction.cfg.Releases.Get(res.Name, res.Version)
+ is.Error(err)
+ is.Equal(err, driver.ErrReleaseNotFound)
+ is.Equal(goroutines+1, instAction.getGoroutineCount()) // installation goroutine still is in background
+ time.Sleep(10 * time.Second) // wait for goroutine to finish
+ is.Equal(goroutines, instAction.getGoroutineCount())
+
+}
+func TestNameTemplate(t *testing.T) {
+ testCases := []nameTemplateTestCase{
+ // Just a straight up nop please
+ {
+ tpl: "foobar",
+ expected: "foobar",
+ expectedErrorStr: "",
+ },
+ // Random numbers at the end for fun & profit
+ {
+ tpl: "foobar-{{randNumeric 6}}",
+ expected: "foobar-[0-9]{6}$",
+ expectedErrorStr: "",
+ },
+ // Random numbers in the middle for fun & profit
+ {
+ tpl: "foobar-{{randNumeric 4}}-baz",
+ expected: "foobar-[0-9]{4}-baz$",
+ expectedErrorStr: "",
+ },
+ // No such function
+ {
+ tpl: "foobar-{{randInteger}}",
+ expected: "",
+ expectedErrorStr: "function \"randInteger\" not defined",
+ },
+ // Invalid template
+ {
+ tpl: "foobar-{{",
+ expected: "",
+ expectedErrorStr: "template: name-template:1: unclosed action",
+ },
+ }
+
+ for _, tc := range testCases {
+
+ n, err := TemplateName(tc.tpl)
+ if err != nil {
+ if tc.expectedErrorStr == "" {
+ t.Errorf("Was not expecting error, but got: %v", err)
+ continue
+ }
+ re, compErr := regexp.Compile(tc.expectedErrorStr)
+ if compErr != nil {
+ t.Errorf("Expected error string failed to compile: %v", compErr)
+ continue
+ }
+ if !re.MatchString(err.Error()) {
+ t.Errorf("Error didn't match for %s expected %s but got %v", tc.tpl, tc.expectedErrorStr, err)
+ continue
+ }
+ }
+ if err == nil && tc.expectedErrorStr != "" {
+ t.Errorf("Was expecting error %s but didn't get an error back", tc.expectedErrorStr)
+ }
+
+ if tc.expected != "" {
+ re, err := regexp.Compile(tc.expected)
+ if err != nil {
+ t.Errorf("Expected string failed to compile: %v", err)
+ continue
+ }
+ if !re.MatchString(n) {
+ t.Errorf("Returned name didn't match for %s expected %s but got %s", tc.tpl, tc.expected, n)
+ }
+ }
+ }
+}
+
+func TestInstallReleaseOutputDir(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ vals := map[string]interface{}{}
+
+ dir := t.TempDir()
+
+ instAction.OutputDir = dir
+
+ _, err := instAction.Run(buildChart(withSampleTemplates(), withMultipleManifestTemplate()), vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+
+ _, err = os.Stat(filepath.Join(dir, "hello/templates/goodbye"))
+ is.NoError(err)
+
+ _, err = os.Stat(filepath.Join(dir, "hello/templates/hello"))
+ is.NoError(err)
+
+ _, err = os.Stat(filepath.Join(dir, "hello/templates/with-partials"))
+ is.NoError(err)
+
+ _, err = os.Stat(filepath.Join(dir, "hello/templates/rbac"))
+ is.NoError(err)
+
+ test.AssertGoldenFile(t, filepath.Join(dir, "hello/templates/rbac"), "rbac.txt")
+
+ _, err = os.Stat(filepath.Join(dir, "hello/templates/empty"))
+ is.True(errors.Is(err, fs.ErrNotExist))
+}
+
+func TestInstallOutputDirWithReleaseName(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ vals := map[string]interface{}{}
+
+ dir := t.TempDir()
+
+ instAction.OutputDir = dir
+ instAction.UseReleaseName = true
+ instAction.ReleaseName = "madra"
+
+ newDir := filepath.Join(dir, instAction.ReleaseName)
+
+ _, err := instAction.Run(buildChart(withSampleTemplates(), withMultipleManifestTemplate()), vals)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+
+ _, err = os.Stat(filepath.Join(newDir, "hello/templates/goodbye"))
+ is.NoError(err)
+
+ _, err = os.Stat(filepath.Join(newDir, "hello/templates/hello"))
+ is.NoError(err)
+
+ _, err = os.Stat(filepath.Join(newDir, "hello/templates/with-partials"))
+ is.NoError(err)
+
+ _, err = os.Stat(filepath.Join(newDir, "hello/templates/rbac"))
+ is.NoError(err)
+
+ test.AssertGoldenFile(t, filepath.Join(newDir, "hello/templates/rbac"), "rbac.txt")
+
+ _, err = os.Stat(filepath.Join(newDir, "hello/templates/empty"))
+ is.True(errors.Is(err, fs.ErrNotExist))
+}
+
+func TestNameAndChart(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ chartName := "./foo"
+
+ name, chrt, err := instAction.NameAndChart([]string{chartName})
+ if err != nil {
+ t.Fatal(err)
+ }
+ is.Equal(instAction.ReleaseName, name)
+ is.Equal(chartName, chrt)
+
+ instAction.GenerateName = true
+ _, _, err = instAction.NameAndChart([]string{"foo", chartName})
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+ is.Equal("cannot set --generate-name and also specify a name", err.Error())
+
+ instAction.GenerateName = false
+ instAction.NameTemplate = "{{ . }}"
+ _, _, err = instAction.NameAndChart([]string{"foo", chartName})
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+ is.Equal("cannot set --name-template and also specify a name", err.Error())
+
+ instAction.NameTemplate = ""
+ instAction.ReleaseName = ""
+ _, _, err = instAction.NameAndChart([]string{chartName})
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+ is.Equal("must either provide a name or specify --generate-name", err.Error())
+
+ instAction.NameTemplate = ""
+ instAction.ReleaseName = ""
+ _, _, err = instAction.NameAndChart([]string{"foo", chartName, "bar"})
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+ is.Equal("expected at most two arguments, unexpected arguments: bar", err.Error())
+}
+
+func TestNameAndChartGenerateName(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+
+ instAction.ReleaseName = ""
+ instAction.GenerateName = true
+
+ tests := []struct {
+ Name string
+ Chart string
+ ExpectedName string
+ }{
+ {
+ "local filepath",
+ "./chart",
+ fmt.Sprintf("chart-%d", time.Now().Unix()),
+ },
+ {
+ "dot filepath",
+ ".",
+ fmt.Sprintf("chart-%d", time.Now().Unix()),
+ },
+ {
+ "empty filepath",
+ "",
+ fmt.Sprintf("chart-%d", time.Now().Unix()),
+ },
+ {
+ "packaged chart",
+ "chart.tgz",
+ fmt.Sprintf("chart-%d", time.Now().Unix()),
+ },
+ {
+ "packaged chart with .tar.gz extension",
+ "chart.tar.gz",
+ fmt.Sprintf("chart-%d", time.Now().Unix()),
+ },
+ {
+ "packaged chart with local extension",
+ "./chart.tgz",
+ fmt.Sprintf("chart-%d", time.Now().Unix()),
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.Name, func(t *testing.T) {
+ t.Parallel()
+
+ name, chrt, err := instAction.NameAndChart([]string{tc.Chart})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ is.Equal(tc.ExpectedName, name)
+ is.Equal(tc.Chart, chrt)
+ })
+ }
+}
+
+func TestInstallWithLabels(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.Labels = map[string]string{
+ "key1": "val1",
+ "key2": "val2",
+ }
+ resi, err := instAction.Run(buildChart(), nil)
+ if err != nil {
+ t.Fatalf("Failed install: %s", err)
+ }
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ is.Equal(instAction.Labels, res.Labels)
+}
+
+func TestInstallWithSystemLabels(t *testing.T) {
+ is := assert.New(t)
+ instAction := installAction(t)
+ instAction.Labels = map[string]string{
+ "owner": "val1",
+ "key2": "val2",
+ }
+ _, err := instAction.Run(buildChart(), nil)
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+
+ is.Equal(fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()), err)
+}
+
+func TestUrlEqual(t *testing.T) {
+ is := assert.New(t)
+
+ tests := []struct {
+ name string
+ url1 string
+ url2 string
+ expected bool
+ }{
+ {
+ name: "identical URLs",
+ url1: "https://example.com:443",
+ url2: "https://example.com:443",
+ expected: true,
+ },
+ {
+ name: "same host, scheme, default HTTPS port vs explicit",
+ url1: "https://example.com",
+ url2: "https://example.com:443",
+ expected: true,
+ },
+ {
+ name: "same host, scheme, default HTTP port vs explicit",
+ url1: "http://example.com",
+ url2: "http://example.com:80",
+ expected: true,
+ },
+ {
+ name: "different schemes",
+ url1: "http://example.com",
+ url2: "https://example.com",
+ expected: false,
+ },
+ {
+ name: "different hosts",
+ url1: "https://example.com",
+ url2: "https://www.example.com",
+ expected: false,
+ },
+ {
+ name: "different ports",
+ url1: "https://example.com:8080",
+ url2: "https://example.com:9090",
+ expected: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ u1, err := url.Parse(tc.url1)
+ if err != nil {
+ t.Fatalf("Failed to parse URL1 %s: %v", tc.url1, err)
+ }
+ u2, err := url.Parse(tc.url2)
+ if err != nil {
+ t.Fatalf("Failed to parse URL2 %s: %v", tc.url2, err)
+ }
+
+ is.Equal(tc.expected, urlEqual(u1, u2))
+ })
+ }
+}
+
+func TestInstallRun_UnreachableKubeClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ instAction := NewInstall(config)
+ ctx, done := context.WithCancel(t.Context())
+ chrt := buildChart()
+ res, err := instAction.RunWithContext(ctx, chrt, nil)
+
+ done()
+ assert.Nil(t, res)
+ assert.ErrorContains(t, err, "connection refused")
+}
+
+func TestInstallSetRegistryClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ instAction := NewInstall(config)
+
+ registryClient := ®istry.Client{}
+ instAction.SetRegistryClient(registryClient)
+
+ assert.Equal(t, registryClient, instAction.GetRegistryClient())
+}
+
+func TestInstalLCRDs(t *testing.T) {
+ config := actionConfigFixture(t)
+ instAction := NewInstall(config)
+
+ mockFile := common.File{
+ Name: "crds/foo.yaml",
+ Data: []byte("hello"),
+ }
+ mockChart := buildChart(withFile(mockFile))
+ crdsToInstall := mockChart.CRDObjects()
+ assert.Len(t, crdsToInstall, 1)
+ assert.Equal(t, crdsToInstall[0].File.Data, mockFile.Data)
+
+ require.NoError(t, instAction.installCRDs(crdsToInstall))
+}
+
+func TestInstalLCRDs_KubeClient_BuildError(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.BuildError = errors.New("build error")
+ config.KubeClient = &failingKubeClient
+ instAction := NewInstall(config)
+
+ mockFile := common.File{
+ Name: "crds/foo.yaml",
+ Data: []byte("hello"),
+ }
+ mockChart := buildChart(withFile(mockFile))
+ crdsToInstall := mockChart.CRDObjects()
+
+ require.Error(t, instAction.installCRDs(crdsToInstall), "failed to install CRD")
+}
+
+func TestInstalLCRDs_KubeClient_CreateError(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.CreateError = errors.New("create error")
+ config.KubeClient = &failingKubeClient
+ instAction := NewInstall(config)
+
+ mockFile := common.File{
+ Name: "crds/foo.yaml",
+ Data: []byte("hello"),
+ }
+ mockChart := buildChart(withFile(mockFile))
+ crdsToInstall := mockChart.CRDObjects()
+
+ require.Error(t, instAction.installCRDs(crdsToInstall), "failed to install CRD")
+}
+
+func TestInstalLCRDs_AlreadyExist(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ mockError := &apierrors.StatusError{ErrStatus: metav1.Status{
+ Status: metav1.StatusFailure,
+ Reason: metav1.StatusReasonAlreadyExists,
+ }}
+ failingKubeClient.CreateError = mockError
+ config.KubeClient = &failingKubeClient
+ instAction := NewInstall(config)
+
+ mockFile := common.File{
+ Name: "crds/foo.yaml",
+ Data: []byte("hello"),
+ }
+ mockChart := buildChart(withFile(mockFile))
+ crdsToInstall := mockChart.CRDObjects()
+
+ assert.Nil(t, instAction.installCRDs(crdsToInstall))
+}
+
+func TestInstalLCRDs_WaiterError(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.WaitError = errors.New("wait error")
+ failingKubeClient.BuildDummy = true
+ config.KubeClient = &failingKubeClient
+ instAction := NewInstall(config)
+
+ mockFile := common.File{
+ Name: "crds/foo.yaml",
+ Data: []byte("hello"),
+ }
+ mockChart := buildChart(withFile(mockFile))
+ crdsToInstall := mockChart.CRDObjects()
+
+ require.Error(t, instAction.installCRDs(crdsToInstall), "wait error")
+}
+
+func TestCheckDependencies(t *testing.T) {
+ dependency := chart.Dependency{Name: "hello"}
+ mockChart := buildChart(withDependency())
+
+ assert.Nil(t, CheckDependencies(mockChart, []ci.Dependency{&dependency}))
+}
+
+func TestCheckDependencies_MissingDependency(t *testing.T) {
+ dependency := chart.Dependency{Name: "missing"}
+ mockChart := buildChart(withDependency())
+
+ assert.ErrorContains(t, CheckDependencies(mockChart, []ci.Dependency{&dependency}), "missing in charts")
+}
+
+func TestInstallRelease_WaitOptionsPassedDownstream(t *testing.T) {
+ is := assert.New(t)
+
+ instAction := installAction(t)
+ instAction.ReleaseName = "wait-options-test"
+ instAction.WaitStrategy = kube.StatusWatcherStrategy
+
+ // Use WithWaitContext as a marker WaitOption that we can track
+ ctx := context.Background()
+ instAction.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)}
+
+ // Access the underlying FailingKubeClient to check recorded options
+ failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+
+ vals := map[string]interface{}{}
+ _, err := instAction.Run(buildChart(), vals)
+ is.NoError(err)
+
+ // Verify that WaitOptions were passed to GetWaiter
+ is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter")
+}
diff --git a/helm/pkg/action/lazyclient.go b/helm/pkg/action/lazyclient.go
new file mode 100644
index 000000000..9037782bb
--- /dev/null
+++ b/helm/pkg/action/lazyclient.go
@@ -0,0 +1,197 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "context"
+ "sync"
+
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/watch"
+ applycorev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ "k8s.io/client-go/kubernetes"
+ corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+)
+
+// lazyClient is a workaround to deal with Kubernetes having an unstable client API.
+// In Kubernetes v1.18 the defaults where removed which broke creating a
+// client without an explicit configuration. ಠ_ಠ
+type lazyClient struct {
+ // client caches an initialized kubernetes client
+ initClient sync.Once
+ client kubernetes.Interface
+ clientErr error
+
+ // clientFn loads a kubernetes client
+ clientFn func() (*kubernetes.Clientset, error)
+
+ // namespace passed to each client request
+ namespace string
+}
+
+func (s *lazyClient) init() error {
+ s.initClient.Do(func() {
+ s.client, s.clientErr = s.clientFn()
+ })
+ return s.clientErr
+}
+
+// secretClient implements a corev1.SecretsInterface
+type secretClient struct{ *lazyClient }
+
+var _ corev1.SecretInterface = (*secretClient)(nil)
+
+func newSecretClient(lc *lazyClient) *secretClient {
+ return &secretClient{lazyClient: lc}
+}
+
+func (s *secretClient) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Create(ctx, secret, opts)
+}
+
+func (s *secretClient) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Update(ctx, secret, opts)
+}
+
+func (s *secretClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ if err := s.init(); err != nil {
+ return err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Delete(ctx, name, opts)
+}
+
+func (s *secretClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ if err := s.init(); err != nil {
+ return err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).DeleteCollection(ctx, opts, listOpts)
+}
+
+func (s *secretClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Get(ctx, name, opts)
+}
+
+func (s *secretClient) List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).List(ctx, opts)
+}
+
+func (s *secretClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Watch(ctx, opts)
+}
+
+func (s *secretClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*v1.Secret, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Patch(ctx, name, pt, data, opts, subresources...)
+}
+
+func (s *secretClient) Apply(ctx context.Context, secretConfiguration *applycorev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (*v1.Secret, error) {
+ if err := s.init(); err != nil {
+ return nil, err
+ }
+ return s.client.CoreV1().Secrets(s.namespace).Apply(ctx, secretConfiguration, opts)
+}
+
+// configMapClient implements a corev1.ConfigMapInterface
+type configMapClient struct{ *lazyClient }
+
+var _ corev1.ConfigMapInterface = (*configMapClient)(nil)
+
+func newConfigMapClient(lc *lazyClient) *configMapClient {
+ return &configMapClient{lazyClient: lc}
+}
+
+func (c *configMapClient) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (*v1.ConfigMap, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Create(ctx, configMap, opts)
+}
+
+func (c *configMapClient) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (*v1.ConfigMap, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Update(ctx, configMap, opts)
+}
+
+func (c *configMapClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ if err := c.init(); err != nil {
+ return err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Delete(ctx, name, opts)
+}
+
+func (c *configMapClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ if err := c.init(); err != nil {
+ return err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).DeleteCollection(ctx, opts, listOpts)
+}
+
+func (c *configMapClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ConfigMap, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Get(ctx, name, opts)
+}
+
+func (c *configMapClient) List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).List(ctx, opts)
+}
+
+func (c *configMapClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Watch(ctx, opts)
+}
+
+func (c *configMapClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*v1.ConfigMap, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Patch(ctx, name, pt, data, opts, subresources...)
+}
+
+func (c *configMapClient) Apply(ctx context.Context, configMap *applycorev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (*v1.ConfigMap, error) {
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c.client.CoreV1().ConfigMaps(c.namespace).Apply(ctx, configMap, opts)
+}
diff --git a/helm/pkg/action/lint.go b/helm/pkg/action/lint.go
new file mode 100644
index 000000000..208fd4637
--- /dev/null
+++ b/helm/pkg/action/lint.go
@@ -0,0 +1,136 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/v2/lint"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+)
+
+// Lint is the action for checking that the semantics of a chart are well-formed.
+//
+// It provides the implementation of 'helm lint'.
+type Lint struct {
+ Strict bool
+ Namespace string
+ WithSubcharts bool
+ Quiet bool
+ SkipSchemaValidation bool
+ KubeVersion *common.KubeVersion
+}
+
+// LintResult is the result of Lint
+type LintResult struct {
+ TotalChartsLinted int
+ Messages []support.Message
+ Errors []error
+}
+
+// NewLint creates a new Lint object with the given configuration.
+func NewLint() *Lint {
+ return &Lint{}
+}
+
+// Run executes 'helm Lint' against the given chart.
+func (l *Lint) Run(paths []string, vals map[string]interface{}) *LintResult {
+ lowestTolerance := support.ErrorSev
+ if l.Strict {
+ lowestTolerance = support.WarningSev
+ }
+ result := &LintResult{}
+ for _, path := range paths {
+ linter, err := lintChart(path, vals, l.Namespace, l.KubeVersion, l.SkipSchemaValidation)
+ if err != nil {
+ result.Errors = append(result.Errors, err)
+ continue
+ }
+
+ result.Messages = append(result.Messages, linter.Messages...)
+ result.TotalChartsLinted++
+ for _, msg := range linter.Messages {
+ if msg.Severity >= lowestTolerance {
+ result.Errors = append(result.Errors, msg.Err)
+ }
+ }
+ }
+ return result
+}
+
+// HasWarningsOrErrors checks is LintResult has any warnings or errors
+func HasWarningsOrErrors(result *LintResult) bool {
+ for _, msg := range result.Messages {
+ if msg.Severity > support.InfoSev {
+ return true
+ }
+ }
+ return len(result.Errors) > 0
+}
+
+func lintChart(path string, vals map[string]interface{}, namespace string, kubeVersion *common.KubeVersion, skipSchemaValidation bool) (support.Linter, error) {
+ var chartPath string
+ linter := support.Linter{}
+
+ if strings.HasSuffix(path, ".tgz") || strings.HasSuffix(path, ".tar.gz") {
+ tempDir, err := os.MkdirTemp("", "helm-lint")
+ if err != nil {
+ return linter, fmt.Errorf("unable to create temp dir to extract tarball: %w", err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ file, err := os.Open(path)
+ if err != nil {
+ return linter, fmt.Errorf("unable to open tarball: %w", err)
+ }
+ defer file.Close()
+
+ if err = chartutil.Expand(tempDir, file); err != nil {
+ return linter, fmt.Errorf("unable to extract tarball: %w", err)
+ }
+
+ files, err := os.ReadDir(tempDir)
+ if err != nil {
+ return linter, fmt.Errorf("unable to read temporary output directory %s: %w", tempDir, err)
+ }
+ if !files[0].IsDir() {
+ return linter, fmt.Errorf("unexpected file %s in temporary output directory %s", files[0].Name(), tempDir)
+ }
+
+ chartPath = filepath.Join(tempDir, files[0].Name())
+ } else {
+ chartPath = path
+ }
+
+ // Guard: Error out if this is not a chart.
+ if _, err := os.Stat(filepath.Join(chartPath, "Chart.yaml")); err != nil {
+ return linter, fmt.Errorf("unable to check Chart.yaml file in chart: %w", err)
+ }
+
+ return lint.RunAll(
+ chartPath,
+ vals,
+ namespace,
+ lint.WithKubeVersion(kubeVersion),
+ lint.WithSkipSchemaValidation(skipSchemaValidation),
+ ), nil
+}
diff --git a/helm/pkg/action/lint_test.go b/helm/pkg/action/lint_test.go
new file mode 100644
index 000000000..4684f91f1
--- /dev/null
+++ b/helm/pkg/action/lint_test.go
@@ -0,0 +1,212 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+)
+
+var (
+ values = make(map[string]interface{})
+ namespace = "testNamespace"
+ chart1MultipleChartLint = "testdata/charts/multiplecharts-lint-chart-1"
+ chart2MultipleChartLint = "testdata/charts/multiplecharts-lint-chart-2"
+ corruptedTgzChart = "testdata/charts/corrupted-compressed-chart.tgz"
+ chartWithNoTemplatesDir = "testdata/charts/chart-with-no-templates-dir"
+)
+
+func TestLintChart(t *testing.T) {
+ tests := []struct {
+ name string
+ chartPath string
+ err bool
+ skipSchemaValidation bool
+ }{
+ {
+ name: "decompressed-chart",
+ chartPath: "testdata/charts/decompressedchart/",
+ },
+ {
+ name: "archived-chart-path",
+ chartPath: "testdata/charts/compressedchart-0.1.0.tgz",
+ },
+ {
+ name: "archived-chart-path-with-hyphens",
+ chartPath: "testdata/charts/compressedchart-with-hyphens-0.1.0.tgz",
+ },
+ {
+ name: "archived-tar-gz-chart-path",
+ chartPath: "testdata/charts/compressedchart-0.1.0.tar.gz",
+ },
+ {
+ name: "invalid-archived-chart-path",
+ chartPath: "testdata/charts/invalidcompressedchart0.1.0.tgz",
+ err: true,
+ },
+ {
+ name: "chart-missing-manifest",
+ chartPath: "testdata/charts/chart-missing-manifest",
+ err: true,
+ },
+ {
+ name: "chart-with-schema",
+ chartPath: "testdata/charts/chart-with-schema",
+ },
+ {
+ name: "chart-with-schema-negative",
+ chartPath: "testdata/charts/chart-with-schema-negative",
+ },
+ {
+ name: "chart-with-schema-negative-skip-validation",
+ chartPath: "testdata/charts/chart-with-schema-negative",
+ skipSchemaValidation: true,
+ },
+ {
+ name: "pre-release-chart",
+ chartPath: "testdata/charts/pre-release-chart-0.1.0-alpha.tgz",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := lintChart(tt.chartPath, map[string]interface{}{}, namespace, nil, tt.skipSchemaValidation)
+ switch {
+ case err != nil && !tt.err:
+ t.Errorf("%s", err)
+ case err == nil && tt.err:
+ t.Errorf("Expected a chart parsing error")
+ }
+ })
+ }
+}
+
+func TestNonExistentChart(t *testing.T) {
+ t.Run("should error out for non existent tgz chart", func(t *testing.T) {
+ testCharts := []string{"non-existent-chart.tgz"}
+ expectedError := "unable to open tarball: open non-existent-chart.tgz: no such file or directory"
+ testLint := NewLint()
+
+ result := testLint.Run(testCharts, values)
+ if len(result.Errors) != 1 {
+ t.Error("expected one error, but got", len(result.Errors))
+ }
+
+ actual := result.Errors[0].Error()
+ if actual != expectedError {
+ t.Errorf("expected '%s', but got '%s'", expectedError, actual)
+ }
+ })
+
+ t.Run("should error out for corrupted tgz chart", func(t *testing.T) {
+ testCharts := []string{corruptedTgzChart}
+ expectedEOFError := "unable to extract tarball: EOF"
+ testLint := NewLint()
+
+ result := testLint.Run(testCharts, values)
+ if len(result.Errors) != 1 {
+ t.Error("expected one error, but got", len(result.Errors))
+ }
+
+ actual := result.Errors[0].Error()
+ if actual != expectedEOFError {
+ t.Errorf("expected '%s', but got '%s'", expectedEOFError, actual)
+ }
+ })
+}
+
+func TestLint_MultipleCharts(t *testing.T) {
+ testCharts := []string{chart2MultipleChartLint, chart1MultipleChartLint}
+ testLint := NewLint()
+ if result := testLint.Run(testCharts, values); len(result.Errors) > 0 {
+ t.Error(result.Errors)
+ }
+}
+
+func TestLint_EmptyResultErrors(t *testing.T) {
+ testCharts := []string{chart2MultipleChartLint}
+ testLint := NewLint()
+ if result := testLint.Run(testCharts, values); len(result.Errors) > 0 {
+ t.Error("Expected no error, got more")
+ }
+}
+
+func TestLint_ChartWithWarnings(t *testing.T) {
+ t.Run("should pass when not strict", func(t *testing.T) {
+ testCharts := []string{chartWithNoTemplatesDir}
+ testLint := NewLint()
+ testLint.Strict = false
+ if result := testLint.Run(testCharts, values); len(result.Errors) > 0 {
+ t.Error("Expected no error, got more")
+ }
+ })
+
+ t.Run("should fail with one error when strict", func(t *testing.T) {
+ testCharts := []string{chartWithNoTemplatesDir}
+ testLint := NewLint()
+ testLint.Strict = true
+ if result := testLint.Run(testCharts, values); len(result.Errors) != 1 {
+ t.Error("expected one error, but got", len(result.Errors))
+ }
+ })
+}
+
+func TestHasWarningsOrErrors(t *testing.T) {
+ testError := errors.New("test-error")
+ cases := []struct {
+ name string
+ data LintResult
+ expected bool
+ }{
+ {
+ name: "has no warning messages and no errors",
+ data: LintResult{TotalChartsLinted: 1, Messages: make([]support.Message, 0), Errors: make([]error, 0)},
+ expected: false,
+ },
+ {
+ name: "has error",
+ data: LintResult{TotalChartsLinted: 1, Messages: make([]support.Message, 0), Errors: []error{testError}},
+ expected: true,
+ },
+ {
+ name: "has info message only",
+ data: LintResult{TotalChartsLinted: 1, Messages: []support.Message{{Severity: support.InfoSev, Path: "", Err: testError}}, Errors: make([]error, 0)},
+ expected: false,
+ },
+ {
+ name: "has warning message",
+ data: LintResult{TotalChartsLinted: 1, Messages: []support.Message{{Severity: support.WarningSev, Path: "", Err: testError}}, Errors: make([]error, 0)},
+ expected: true,
+ },
+ {
+ name: "has error message",
+ data: LintResult{TotalChartsLinted: 1, Messages: []support.Message{{Severity: support.ErrorSev, Path: "", Err: testError}}, Errors: make([]error, 0)},
+ expected: true,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ result := HasWarningsOrErrors(&tc.data)
+ assert.Equal(t, tc.expected, result)
+ })
+ }
+}
diff --git a/helm/pkg/action/list.go b/helm/pkg/action/list.go
new file mode 100644
index 000000000..06727bd9a
--- /dev/null
+++ b/helm/pkg/action/list.go
@@ -0,0 +1,334 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "path"
+ "regexp"
+
+ "k8s.io/apimachinery/pkg/labels"
+
+ ri "helm.sh/helm/v4/pkg/release"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+)
+
+// ListStates represents zero or more status codes that a list item may have set
+//
+// Because this is used as a bitmask filter, more than one bit can be flipped
+// in the ListStates.
+type ListStates uint
+
+const (
+ // ListDeployed filters on status "deployed"
+ ListDeployed ListStates = 1 << iota
+ // ListUninstalled filters on status "uninstalled"
+ ListUninstalled
+ // ListUninstalling filters on status "uninstalling" (uninstall in progress)
+ ListUninstalling
+ // ListPendingInstall filters on status "pending" (deployment in progress)
+ ListPendingInstall
+ // ListPendingUpgrade filters on status "pending_upgrade" (upgrade in progress)
+ ListPendingUpgrade
+ // ListPendingRollback filters on status "pending_rollback" (rollback in progress)
+ ListPendingRollback
+ // ListSuperseded filters on status "superseded" (historical release version that is no longer deployed)
+ ListSuperseded
+ // ListFailed filters on status "failed" (release version not deployed because of error)
+ ListFailed
+ // ListUnknown filters on an unknown status
+ ListUnknown
+)
+
+// FromName takes a state name and returns a ListStates representation.
+//
+// Currently, there are only names for individual flipped bits, so the returned
+// ListStates will only match one of the constants. However, it is possible that
+// this behavior could change in the future.
+func (s ListStates) FromName(str string) ListStates {
+ switch str {
+ case "deployed":
+ return ListDeployed
+ case "uninstalled":
+ return ListUninstalled
+ case "superseded":
+ return ListSuperseded
+ case "failed":
+ return ListFailed
+ case "uninstalling":
+ return ListUninstalling
+ case "pending-install":
+ return ListPendingInstall
+ case "pending-upgrade":
+ return ListPendingUpgrade
+ case "pending-rollback":
+ return ListPendingRollback
+ }
+ return ListUnknown
+}
+
+// ListAll is a convenience for enabling all list filters
+const ListAll = ListDeployed | ListUninstalled | ListUninstalling | ListPendingInstall | ListPendingRollback | ListPendingUpgrade | ListSuperseded | ListFailed
+
+// Sorter is a top-level sort
+type Sorter uint
+
+const (
+ // ByNameDesc sorts by descending lexicographic order
+ ByNameDesc Sorter = iota + 1
+ // ByDateAsc sorts by ascending dates (oldest updated release first)
+ ByDateAsc
+ // ByDateDesc sorts by descending dates (latest updated release first)
+ ByDateDesc
+)
+
+// List is the action for listing releases.
+//
+// It provides, for example, the implementation of 'helm list'.
+// It returns no more than one revision of every release in one specific, or in
+// all, namespaces.
+// To list all the revisions of a specific release, see the History action.
+type List struct {
+ cfg *Configuration
+
+ // All ignores the limit/offset
+ All bool
+ // AllNamespaces searches across namespaces
+ AllNamespaces bool
+ // Sort indicates the sort to use
+ //
+ // see pkg/releaseutil for several useful sorters
+ Sort Sorter
+ // Overrides the default lexicographic sorting
+ ByDate bool
+ SortReverse bool
+ // StateMask accepts a bitmask of states for items to show.
+ // The default is ListDeployed
+ StateMask ListStates
+ // Limit is the number of items to return per Run()
+ Limit int
+ // Offset is the starting index for the Run() call
+ Offset int
+ // Filter is a filter that is applied to the results
+ Filter string
+ Short bool
+ NoHeaders bool
+ TimeFormat string
+ Uninstalled bool
+ Superseded bool
+ Uninstalling bool
+ Deployed bool
+ Failed bool
+ Pending bool
+ Selector string
+}
+
+// NewList constructs a new *List
+func NewList(cfg *Configuration) *List {
+ return &List{
+ StateMask: ListAll,
+ cfg: cfg,
+ }
+}
+
+// Run executes the list command, returning a set of matches.
+func (l *List) Run() ([]ri.Releaser, error) {
+ if err := l.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ var filter *regexp.Regexp
+ if l.Filter != "" {
+ var err error
+ filter, err = regexp.Compile(l.Filter)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ results, err := l.cfg.Releases.List(func(rel ri.Releaser) bool {
+ r, err := releaserToV1Release(rel)
+ if err != nil {
+ return false
+ }
+ // Skip anything that doesn't match the filter.
+ if filter != nil && !filter.MatchString(r.Name) {
+ return false
+ }
+
+ return true
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ if results == nil {
+ return results, nil
+ }
+
+ rresults, err := releaseListToV1List(results)
+ if err != nil {
+ return nil, err
+ }
+
+ // by definition, superseded releases are never shown if
+ // only the latest releases are returned. so if requested statemask
+ // is _only_ ListSuperseded, skip the latest release filter
+ if l.StateMask != ListSuperseded {
+ rresults = filterLatestReleases(rresults)
+ }
+
+ // State mask application must occur after filtering to
+ // latest releases, otherwise outdated entries can be returned
+ rresults = l.filterStateMask(rresults)
+
+ // Skip anything that doesn't match the selector
+ selectorObj, err := labels.Parse(l.Selector)
+ if err != nil {
+ return nil, err
+ }
+ rresults = l.filterSelector(rresults, selectorObj)
+
+ // Unfortunately, we have to sort before truncating, which can incur substantial overhead
+ l.sort(rresults)
+
+ // Guard on offset
+ if l.Offset >= len(rresults) {
+ return releaseV1ListToReleaserList([]*release.Release{})
+ }
+
+ // Calculate the limit and offset, and then truncate results if necessary.
+ limit := len(results)
+ if l.Limit > 0 && l.Limit < limit {
+ limit = l.Limit
+ }
+ last := l.Offset + limit
+ if l := len(rresults); l < last {
+ last = l
+ }
+ rresults = rresults[l.Offset:last]
+
+ return releaseV1ListToReleaserList(rresults)
+}
+
+// sort is an in-place sort where order is based on the value of a.Sort
+func (l *List) sort(rels []*release.Release) {
+ if l.SortReverse {
+ l.Sort = ByNameDesc
+ }
+
+ if l.ByDate {
+ l.Sort = ByDateDesc
+ if l.SortReverse {
+ l.Sort = ByDateAsc
+ }
+ }
+
+ switch l.Sort {
+ case ByDateDesc:
+ releaseutil.SortByDate(rels)
+ case ByDateAsc:
+ releaseutil.Reverse(rels, releaseutil.SortByDate)
+ case ByNameDesc:
+ releaseutil.Reverse(rels, releaseutil.SortByName)
+ default:
+ releaseutil.SortByName(rels)
+ }
+}
+
+// filterLatestReleases returns a list scrubbed of old releases.
+func filterLatestReleases(releases []*release.Release) []*release.Release {
+ latestReleases := make(map[string]*release.Release)
+
+ for _, rls := range releases {
+ name, namespace := rls.Name, rls.Namespace
+ key := path.Join(namespace, name)
+ if latestRelease, exists := latestReleases[key]; exists && latestRelease.Version > rls.Version {
+ continue
+ }
+ latestReleases[key] = rls
+ }
+
+ var list = make([]*release.Release, 0, len(latestReleases))
+ for _, rls := range latestReleases {
+ list = append(list, rls)
+ }
+ return list
+}
+
+func (l *List) filterStateMask(releases []*release.Release) []*release.Release {
+ desiredStateReleases := make([]*release.Release, 0)
+
+ for _, rls := range releases {
+ currentStatus := l.StateMask.FromName(rls.Info.Status.String())
+ mask := l.StateMask & currentStatus
+ if mask == 0 {
+ continue
+ }
+ desiredStateReleases = append(desiredStateReleases, rls)
+ }
+
+ return desiredStateReleases
+}
+
+func (l *List) filterSelector(releases []*release.Release, selector labels.Selector) []*release.Release {
+ desiredStateReleases := make([]*release.Release, 0)
+
+ for _, rls := range releases {
+ if selector.Matches(labels.Set(rls.Labels)) {
+ desiredStateReleases = append(desiredStateReleases, rls)
+ }
+ }
+
+ return desiredStateReleases
+}
+
+// SetStateMask calculates the state mask based on parameters.
+func (l *List) SetStateMask() {
+ if l.All {
+ l.StateMask = ListAll
+ return
+ }
+
+ state := ListStates(0)
+ if l.Deployed {
+ state |= ListDeployed
+ }
+ if l.Uninstalled {
+ state |= ListUninstalled
+ }
+ if l.Uninstalling {
+ state |= ListUninstalling
+ }
+ if l.Pending {
+ state |= ListPendingInstall | ListPendingRollback | ListPendingUpgrade
+ }
+ if l.Failed {
+ state |= ListFailed
+ }
+ if l.Superseded {
+ state |= ListSuperseded
+ }
+
+ // Apply a default
+ if state == 0 {
+ state = ListAll
+ }
+
+ l.StateMask = state
+}
diff --git a/helm/pkg/action/list_test.go b/helm/pkg/action/list_test.go
new file mode 100644
index 000000000..643bcea42
--- /dev/null
+++ b/helm/pkg/action/list_test.go
@@ -0,0 +1,422 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "errors"
+ "io"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ ri "helm.sh/helm/v4/pkg/release"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ "helm.sh/helm/v4/pkg/storage"
+)
+
+func TestListStates(t *testing.T) {
+ for input, expect := range map[string]ListStates{
+ "deployed": ListDeployed,
+ "uninstalled": ListUninstalled,
+ "uninstalling": ListUninstalling,
+ "superseded": ListSuperseded,
+ "failed": ListFailed,
+ "pending-install": ListPendingInstall,
+ "pending-rollback": ListPendingRollback,
+ "pending-upgrade": ListPendingUpgrade,
+ "unknown": ListUnknown,
+ "totally made up key": ListUnknown,
+ } {
+ if expect != expect.FromName(input) {
+ t.Errorf("Expected %d for %s", expect, input)
+ }
+ // This is a cheap way to verify that ListAll actually allows everything but Unknown
+ if got := expect.FromName(input); got != ListUnknown && got&ListAll == 0 {
+ t.Errorf("Expected %s to match the ListAll filter", input)
+ }
+ }
+
+ filter := ListDeployed | ListPendingRollback
+ if status := filter.FromName("deployed"); filter&status == 0 {
+ t.Errorf("Expected %d to match mask %d", status, filter)
+ }
+ if status := filter.FromName("failed"); filter&status != 0 {
+ t.Errorf("Expected %d to fail to match mask %d", status, filter)
+ }
+}
+
+func TestList_Empty(t *testing.T) {
+ lister := NewList(actionConfigFixture(t))
+ list, err := lister.Run()
+ assert.NoError(t, err)
+ assert.Len(t, list, 0)
+}
+
+func newListFixture(t *testing.T) *List {
+ t.Helper()
+ return NewList(actionConfigFixture(t))
+}
+
+func TestList_OneNamespace(t *testing.T) {
+ is := assert.New(t)
+ lister := newListFixture(t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
+ list, err := lister.Run()
+ is.NoError(err)
+ is.Len(list, 3)
+}
+
+func TestList_AllNamespaces(t *testing.T) {
+ is := assert.New(t)
+ lister := newListFixture(t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
+ lister.AllNamespaces = true
+ lister.SetStateMask()
+ list, err := lister.Run()
+ is.NoError(err)
+ is.Len(list, 3)
+}
+
+func TestList_Sort(t *testing.T) {
+ is := assert.New(t)
+ lister := newListFixture(t)
+ lister.Sort = ByNameDesc // Other sorts are tested elsewhere
+ makeMeSomeReleases(t, lister.cfg.Releases)
+ l, err := lister.Run()
+ is.NoError(err)
+ list, err := releaseListToV1List(l)
+ is.NoError(err)
+
+ is.Len(list, 3)
+ is.Equal("two", list[0].Name)
+ is.Equal("three", list[1].Name)
+ is.Equal("one", list[2].Name)
+}
+
+func TestList_Limit(t *testing.T) {
+ is := assert.New(t)
+ lister := newListFixture(t)
+ lister.Limit = 2
+ makeMeSomeReleases(t, lister.cfg.Releases)
+ l, err := lister.Run()
+ is.NoError(err)
+ list, err := releaseListToV1List(l)
+ is.NoError(err)
+ is.Len(list, 2)
+ // Lex order means one, three, two
+ is.Equal("one", list[0].Name)
+ is.Equal("three", list[1].Name)
+}
+
+func TestList_BigLimit(t *testing.T) {
+ is := assert.New(t)
+ lister := newListFixture(t)
+ lister.Limit = 20
+ makeMeSomeReleases(t, lister.cfg.Releases)
+ l, err := lister.Run()
+ is.NoError(err)
+ list, err := releaseListToV1List(l)
+ is.NoError(err)
+ is.Len(list, 3)
+
+ // Lex order means one, three, two
+ is.Equal("one", list[0].Name)
+ is.Equal("three", list[1].Name)
+ is.Equal("two", list[2].Name)
+}
+
+func TestList_LimitOffset(t *testing.T) {
+ is := assert.New(t)
+ lister := newListFixture(t)
+ lister.Limit = 2
+ lister.Offset = 1
+ makeMeSomeReleases(t, lister.cfg.Releases)
+ l, err := lister.Run()
+ is.NoError(err)
+ list, err := releaseListToV1List(l)
+ is.NoError(err)
+ is.Len(list, 2)
+
+ // Lex order means one, three, two
+ is.Equal("three", list[0].Name)
+ is.Equal("two", list[1].Name)
+}
+
+func TestList_LimitOffsetOutOfBounds(t *testing.T) {
+ is := assert.New(t)
+ lister := newListFixture(t)
+ lister.Limit = 2
+ lister.Offset = 3 // Last item is index 2
+ makeMeSomeReleases(t, lister.cfg.Releases)
+ list, err := lister.Run()
+ is.NoError(err)
+ is.Len(list, 0)
+
+ lister.Limit = 10
+ lister.Offset = 1
+ list, err = lister.Run()
+ is.NoError(err)
+ is.Len(list, 2)
+}
+
+func TestList_StateMask(t *testing.T) {
+ is := assert.New(t)
+ lister := newListFixture(t)
+ makeMeSomeReleases(t, lister.cfg.Releases)
+ oner, err := lister.cfg.Releases.Get("one", 1)
+ is.NoError(err)
+
+ var one release.Release
+ switch v := oner.(type) {
+ case release.Release:
+ one = v
+ case *release.Release:
+ one = *v
+ default:
+ t.Fatal("unsupported release type")
+ }
+
+ one.SetStatus(common.StatusUninstalled, "uninstalled")
+ err = lister.cfg.Releases.Update(one)
+ is.NoError(err)
+
+ res, err := lister.Run()
+ is.NoError(err)
+ is.Len(res, 3)
+
+ ac0, err := ri.NewAccessor(res[0])
+ is.NoError(err)
+ ac1, err := ri.NewAccessor(res[1])
+ is.NoError(err)
+ ac2, err := ri.NewAccessor(res[2])
+ is.NoError(err)
+
+ is.Equal("one", ac0.Name())
+ is.Equal("three", ac1.Name())
+ is.Equal("two", ac2.Name())
+
+ lister.StateMask = ListUninstalled
+ res, err = lister.Run()
+ is.NoError(err)
+ is.Len(res, 1)
+ ac0, err = ri.NewAccessor(res[0])
+ is.NoError(err)
+ is.Equal("one", ac0.Name())
+
+ lister.StateMask |= ListDeployed
+ res, err = lister.Run()
+ is.NoError(err)
+ is.Len(res, 3)
+}
+
+func TestList_StateMaskWithStaleRevisions(t *testing.T) {
+ is := assert.New(t)
+ lister := newListFixture(t)
+ lister.StateMask = ListFailed
+
+ makeMeSomeReleasesWithStaleFailure(t, lister.cfg.Releases)
+
+ res, err := lister.Run()
+
+ is.NoError(err)
+ is.Len(res, 1)
+
+ // "dirty" release should _not_ be present as most recent
+ // release is deployed despite failed release in past
+ ac0, err := ri.NewAccessor(res[0])
+ is.NoError(err)
+ is.Equal("failed", ac0.Name())
+}
+
+func makeMeSomeReleasesWithStaleFailure(t *testing.T, store *storage.Storage) {
+ t.Helper()
+ one := namedReleaseStub("clean", common.StatusDeployed)
+ one.Namespace = "default"
+ one.Version = 1
+
+ two := namedReleaseStub("dirty", common.StatusDeployed)
+ two.Namespace = "default"
+ two.Version = 1
+
+ three := namedReleaseStub("dirty", common.StatusFailed)
+ three.Namespace = "default"
+ three.Version = 2
+
+ four := namedReleaseStub("dirty", common.StatusDeployed)
+ four.Namespace = "default"
+ four.Version = 3
+
+ five := namedReleaseStub("failed", common.StatusFailed)
+ five.Namespace = "default"
+ five.Version = 1
+
+ for _, rel := range []*release.Release{one, two, three, four, five} {
+ if err := store.Create(rel); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ all, err := store.ListReleases()
+ assert.NoError(t, err)
+ assert.Len(t, all, 5, "sanity test: five items added")
+}
+
+func TestList_Filter(t *testing.T) {
+ is := assert.New(t)
+ lister := newListFixture(t)
+ lister.Filter = "th."
+ makeMeSomeReleases(t, lister.cfg.Releases)
+
+ res, err := lister.Run()
+ is.NoError(err)
+ is.Len(res, 1)
+ ac0, err := ri.NewAccessor(res[0])
+ is.NoError(err)
+ is.Equal("three", ac0.Name())
+}
+
+func TestList_FilterFailsCompile(t *testing.T) {
+ is := assert.New(t)
+ lister := newListFixture(t)
+ lister.Filter = "t[h.{{{"
+ makeMeSomeReleases(t, lister.cfg.Releases)
+
+ _, err := lister.Run()
+ is.Error(err)
+}
+
+func makeMeSomeReleases(t *testing.T, store *storage.Storage) {
+ t.Helper()
+ one := releaseStub()
+ one.Name = "one"
+ one.Namespace = "default"
+ one.Version = 1
+ two := releaseStub()
+ two.Name = "two"
+ two.Namespace = "default"
+ two.Version = 2
+ three := releaseStub()
+ three.Name = "three"
+ three.Namespace = "default"
+ three.Version = 3
+
+ for _, rel := range []*release.Release{one, two, three} {
+ if err := store.Create(rel); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ all, err := store.ListReleases()
+ assert.NoError(t, err)
+ assert.Len(t, all, 3, "sanity test: three items added")
+}
+
+func TestFilterLatestReleases(t *testing.T) {
+ t.Run("should filter old versions of the same release", func(t *testing.T) {
+ r1 := releaseStub()
+ r1.Name = "r"
+ r1.Version = 1
+ r2 := releaseStub()
+ r2.Name = "r"
+ r2.Version = 2
+ another := releaseStub()
+ another.Name = "another"
+ another.Version = 1
+
+ filteredList := filterLatestReleases([]*release.Release{r1, r2, another})
+ expectedFilteredList := []*release.Release{r2, another}
+
+ assert.ElementsMatch(t, expectedFilteredList, filteredList)
+ })
+
+ t.Run("should not filter out any version across namespaces", func(t *testing.T) {
+ r1 := releaseStub()
+ r1.Name = "r"
+ r1.Namespace = "default"
+ r1.Version = 1
+ r2 := releaseStub()
+ r2.Name = "r"
+ r2.Namespace = "testing"
+ r2.Version = 2
+
+ filteredList := filterLatestReleases([]*release.Release{r1, r2})
+ expectedFilteredList := []*release.Release{r1, r2}
+
+ assert.ElementsMatch(t, expectedFilteredList, filteredList)
+ })
+}
+
+func TestSelectorList(t *testing.T) {
+ r1 := releaseStub()
+ r1.Name = "r1"
+ r1.Version = 1
+ r1.Labels = map[string]string{"key": "value1"}
+ r2 := releaseStub()
+ r2.Name = "r2"
+ r2.Version = 1
+ r2.Labels = map[string]string{"key": "value2"}
+ r3 := releaseStub()
+ r3.Name = "r3"
+ r3.Version = 1
+ r3.Labels = map[string]string{}
+
+ lister := newListFixture(t)
+ for _, rel := range []*release.Release{r1, r2, r3} {
+ if err := lister.cfg.Releases.Create(rel); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ t.Run("should fail selector parsing", func(t *testing.T) {
+ is := assert.New(t)
+ lister.Selector = "a?=b"
+
+ _, err := lister.Run()
+ is.Error(err)
+ })
+
+ t.Run("should select one release with matching label", func(t *testing.T) {
+ lister.Selector = "key==value1"
+ res, _ := lister.Run()
+
+ expectedFilteredList := []*release.Release{r1}
+ assert.ElementsMatch(t, expectedFilteredList, res)
+ })
+
+ t.Run("should select two releases with non matching label", func(t *testing.T) {
+ lister.Selector = "key!=value1"
+ res, _ := lister.Run()
+
+ expectedFilteredList := []*release.Release{r2, r3}
+ assert.ElementsMatch(t, expectedFilteredList, res)
+ })
+}
+
+func TestListRun_UnreachableKubeClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ lister := NewList(config)
+ result, err := lister.Run()
+
+ assert.Nil(t, result)
+ assert.ErrorContains(t, err, "connection refused")
+}
diff --git a/helm/pkg/action/package.go b/helm/pkg/action/package.go
new file mode 100644
index 000000000..0ab49538c
--- /dev/null
+++ b/helm/pkg/action/package.go
@@ -0,0 +1,256 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/Masterminds/semver/v3"
+ "golang.org/x/term"
+ "sigs.k8s.io/yaml"
+
+ ci "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/loader"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+// Package is the action for packaging a chart.
+//
+// It provides the implementation of 'helm package'.
+type Package struct {
+ Sign bool
+ Key string
+ Keyring string
+ PassphraseFile string
+ cachedPassphrase []byte
+ Version string
+ AppVersion string
+ Destination string
+ DependencyUpdate bool
+
+ RepositoryConfig string
+ RepositoryCache string
+ PlainHTTP bool
+ Username string
+ Password string
+ CertFile string
+ KeyFile string
+ CaFile string
+ InsecureSkipTLSVerify bool
+}
+
+const (
+ passPhraseFileStdin = "-"
+)
+
+// NewPackage creates a new Package object with the given configuration.
+func NewPackage() *Package {
+ return &Package{}
+}
+
+// Run executes 'helm package' against the given chart and returns the path to the packaged chart.
+func (p *Package) Run(path string, _ map[string]interface{}) (string, error) {
+ chrt, err := loader.LoadDir(path)
+ if err != nil {
+ return "", err
+ }
+ var ch *chart.Chart
+ switch c := chrt.(type) {
+ case *chart.Chart:
+ ch = c
+ case chart.Chart:
+ ch = &c
+ default:
+ return "", errors.New("invalid chart apiVersion")
+ }
+
+ ac, err := ci.NewAccessor(ch)
+ if err != nil {
+ return "", err
+ }
+
+ // If version is set, modify the version.
+ if p.Version != "" {
+ ch.Metadata.Version = p.Version
+ }
+
+ if err := validateVersion(ch.Metadata.Version); err != nil {
+ return "", err
+ }
+
+ if p.AppVersion != "" {
+ ch.Metadata.AppVersion = p.AppVersion
+ }
+
+ if reqs := ac.MetaDependencies(); len(reqs) > 0 {
+ if err := CheckDependencies(ch, reqs); err != nil {
+ return "", err
+ }
+ }
+
+ var dest string
+ if p.Destination == "." {
+ // Save to the current working directory.
+ dest, err = os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ } else {
+ // Otherwise save to set destination
+ dest = p.Destination
+ }
+
+ name, err := chartutil.Save(ch, dest)
+ if err != nil {
+ return "", fmt.Errorf("failed to save: %w", err)
+ }
+
+ if p.Sign {
+ err = p.Clearsign(name)
+ }
+
+ return name, err
+}
+
+// validateVersion Verify that version is a Version, and error out if it is not.
+func validateVersion(ver string) error {
+ if _, err := semver.NewVersion(ver); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Clearsign signs a chart
+func (p *Package) Clearsign(filename string) error {
+ // Load keyring
+ signer, err := provenance.NewFromKeyring(p.Keyring, p.Key)
+ if err != nil {
+ return err
+ }
+
+ passphraseFetcher := promptUser
+ if p.PassphraseFile != "" {
+ passphraseFetcher, err = p.passphraseFileFetcher(p.PassphraseFile, os.Stdin)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := signer.DecryptKey(passphraseFetcher); err != nil {
+ return err
+ }
+
+ // Load the chart archive to extract metadata
+ chrt, err := loader.LoadFile(filename)
+ if err != nil {
+ return fmt.Errorf("failed to load chart for signing: %w", err)
+ }
+ var ch *chart.Chart
+ switch c := chrt.(type) {
+ case *chart.Chart:
+ ch = c
+ case chart.Chart:
+ ch = &c
+ default:
+ return errors.New("invalid chart apiVersion")
+ }
+
+ // Marshal chart metadata to YAML bytes
+ metadataBytes, err := yaml.Marshal(ch.Metadata)
+ if err != nil {
+ return fmt.Errorf("failed to marshal chart metadata: %w", err)
+ }
+
+ // Read the chart archive file
+ archiveData, err := os.ReadFile(filename)
+ if err != nil {
+ return fmt.Errorf("failed to read chart archive: %w", err)
+ }
+
+ // Use the generic provenance signing function
+ sig, err := signer.ClearSign(archiveData, filepath.Base(filename), metadataBytes)
+ if err != nil {
+ return err
+ }
+
+ return os.WriteFile(filename+".prov", []byte(sig), 0644)
+}
+
+// promptUser implements provenance.PassphraseFetcher
+func promptUser(name string) ([]byte, error) {
+ fmt.Printf("Password for key %q > ", name)
+ // syscall.Stdin is not an int in all environments and needs to be coerced
+ // into one there (e.g., Windows)
+ pw, err := term.ReadPassword(int(syscall.Stdin))
+ fmt.Println()
+ return pw, err
+}
+
+func (p *Package) passphraseFileFetcher(passphraseFile string, stdin *os.File) (provenance.PassphraseFetcher, error) {
+ // When reading from stdin we cache the passphrase here. If we are
+ // packaging multiple charts, we reuse the cached passphrase. This
+ // allows giving the passphrase once on stdin without failing with
+ // complaints about stdin already being closed.
+ //
+ // An alternative to this would be to omit file.Close() for stdin
+ // below and require the user to provide the same passphrase once
+ // per chart on stdin, but that does not seem very user-friendly.
+
+ if p.cachedPassphrase == nil {
+ file, err := openPassphraseFile(passphraseFile, stdin)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ reader := bufio.NewReader(file)
+ passphrase, _, err := reader.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ p.cachedPassphrase = passphrase
+
+ return func(_ string) ([]byte, error) {
+ return passphrase, nil
+ }, nil
+ }
+
+ return func(_ string) ([]byte, error) {
+ return p.cachedPassphrase, nil
+ }, nil
+}
+
+func openPassphraseFile(passphraseFile string, stdin *os.File) (*os.File, error) {
+ if passphraseFile == passPhraseFileStdin {
+ stat, err := stdin.Stat()
+ if err != nil {
+ return nil, err
+ }
+ if (stat.Mode() & os.ModeNamedPipe) == 0 {
+ return nil, errors.New("specified reading passphrase from stdin, without input on stdin")
+ }
+ return stdin, nil
+ }
+ return os.Open(passphraseFile)
+}
diff --git a/helm/pkg/action/package_test.go b/helm/pkg/action/package_test.go
new file mode 100644
index 000000000..84dcb71c1
--- /dev/null
+++ b/helm/pkg/action/package_test.go
@@ -0,0 +1,171 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "os"
+ "path"
+ "testing"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+)
+
+func TestPassphraseFileFetcher(t *testing.T) {
+ secret := "secret"
+ directory := ensure.TempFile(t, "passphrase-file", []byte(secret))
+ testPkg := NewPackage()
+
+ fetcher, err := testPkg.passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil)
+ if err != nil {
+ t.Fatal("Unable to create passphraseFileFetcher", err)
+ }
+
+ passphrase, err := fetcher("key")
+ if err != nil {
+ t.Fatal("Unable to fetch passphrase")
+ }
+
+ if string(passphrase) != secret {
+ t.Errorf("Expected %s got %s", secret, string(passphrase))
+ }
+}
+
+func TestPassphraseFileFetcher_WithLineBreak(t *testing.T) {
+ secret := "secret"
+ directory := ensure.TempFile(t, "passphrase-file", []byte(secret+"\n\n."))
+ testPkg := NewPackage()
+
+ fetcher, err := testPkg.passphraseFileFetcher(path.Join(directory, "passphrase-file"), nil)
+ if err != nil {
+ t.Fatal("Unable to create passphraseFileFetcher", err)
+ }
+
+ passphrase, err := fetcher("key")
+ if err != nil {
+ t.Fatal("Unable to fetch passphrase")
+ }
+
+ if string(passphrase) != secret {
+ t.Errorf("Expected %s got %s", secret, string(passphrase))
+ }
+}
+
+func TestPassphraseFileFetcher_WithInvalidStdin(t *testing.T) {
+ directory := t.TempDir()
+ testPkg := NewPackage()
+
+ stdin, err := os.CreateTemp(directory, "non-existing")
+ if err != nil {
+ t.Fatal("Unable to create test file", err)
+ }
+
+ if _, err := testPkg.passphraseFileFetcher("-", stdin); err == nil {
+ t.Error("Expected passphraseFileFetcher returning an error")
+ }
+}
+
+func TestPassphraseFileFetcher_WithStdinAndMultipleFetches(t *testing.T) {
+ testPkg := NewPackage()
+ stdin, w, err := os.Pipe()
+ if err != nil {
+ t.Fatal("Unable to create pipe", err)
+ }
+
+ passphrase := "secret-from-stdin"
+
+ go func() {
+ _, err = w.Write([]byte(passphrase + "\n"))
+ require.NoError(t, err)
+ }()
+
+ for range 4 {
+ fetcher, err := testPkg.passphraseFileFetcher("-", stdin)
+ if err != nil {
+ t.Errorf("Expected passphraseFileFetcher to not return an error, but got %v", err)
+ }
+
+ pass, err := fetcher("key")
+ if err != nil {
+ t.Errorf("Expected passphraseFileFetcher invocation to succeed, failed with %v", err)
+ }
+
+ if string(pass) != string(passphrase) {
+ t.Errorf("Expected multiple passphrase fetch to return %q, got %q", passphrase, pass)
+ }
+ }
+}
+
+func TestValidateVersion(t *testing.T) {
+ type args struct {
+ ver string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr error
+ }{
+ {
+ "normal semver version",
+ args{
+ ver: "1.1.3-23658",
+ },
+ nil,
+ },
+ {
+ "Pre version number starting with 0",
+ args{
+ ver: "1.1.3-023658",
+ },
+ semver.ErrSegmentStartsZero,
+ },
+ {
+ "Invalid version number",
+ args{
+ ver: "1.1.3.sd.023658",
+ },
+ semver.ErrInvalidSemVer,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := validateVersion(tt.args.ver); err != nil {
+ if err != tt.wantErr {
+ t.Errorf("Expected {%v}, got {%v}", tt.wantErr, err)
+ }
+
+ }
+ })
+ }
+}
+
+func TestRun_ErrorPath(t *testing.T) {
+ client := NewPackage()
+ _, err := client.Run("err-path", nil)
+ require.Error(t, err)
+}
+
+func TestRun(t *testing.T) {
+ chartPath := "testdata/charts/chart-with-schema"
+ client := NewPackage()
+ filename, err := client.Run(chartPath, nil)
+ require.NoError(t, err)
+ require.Equal(t, "empty-0.1.0.tgz", filename)
+ require.NoError(t, os.Remove(filename))
+}
diff --git a/helm/pkg/action/pull.go b/helm/pkg/action/pull.go
new file mode 100644
index 000000000..dd051167b
--- /dev/null
+++ b/helm/pkg/action/pull.go
@@ -0,0 +1,175 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/downloader"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/registry"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+// Pull is the action for checking a given release's information.
+//
+// It provides the implementation of 'helm pull'.
+type Pull struct {
+ ChartPathOptions
+
+ Settings *cli.EnvSettings // TODO: refactor this out of pkg/action
+
+ Devel bool
+ Untar bool
+ VerifyLater bool
+ UntarDir string
+ DestDir string
+ cfg *Configuration
+}
+
+type PullOpt func(*Pull)
+
+func WithConfig(cfg *Configuration) PullOpt {
+ return func(p *Pull) {
+ p.cfg = cfg
+ }
+}
+
+// NewPull creates a new Pull with configuration options.
+func NewPull(opts ...PullOpt) *Pull {
+ p := &Pull{}
+ for _, fn := range opts {
+ fn(p)
+ }
+
+ return p
+}
+
+// SetRegistryClient sets the registry client on the pull configuration object.
+func (p *Pull) SetRegistryClient(client *registry.Client) {
+ p.cfg.RegistryClient = client
+}
+
+// Run executes 'helm pull' against the given release.
+func (p *Pull) Run(chartRef string) (string, error) {
+ var out strings.Builder
+
+ c := downloader.ChartDownloader{
+ Out: &out,
+ Keyring: p.Keyring,
+ Verify: downloader.VerifyNever,
+ Getters: getter.All(p.Settings),
+ Options: []getter.Option{
+ getter.WithBasicAuth(p.Username, p.Password),
+ getter.WithPassCredentialsAll(p.PassCredentialsAll),
+ getter.WithTLSClientConfig(p.CertFile, p.KeyFile, p.CaFile),
+ getter.WithInsecureSkipVerifyTLS(p.InsecureSkipTLSVerify),
+ getter.WithPlainHTTP(p.PlainHTTP),
+ },
+ RegistryClient: p.cfg.RegistryClient,
+ RepositoryConfig: p.Settings.RepositoryConfig,
+ RepositoryCache: p.Settings.RepositoryCache,
+ ContentCache: p.Settings.ContentCache,
+ }
+
+ if registry.IsOCI(chartRef) {
+ c.Options = append(c.Options,
+ getter.WithRegistryClient(p.cfg.RegistryClient))
+ c.RegistryClient = p.cfg.RegistryClient
+ }
+
+ if p.Verify {
+ c.Verify = downloader.VerifyAlways
+ } else if p.VerifyLater {
+ c.Verify = downloader.VerifyLater
+ }
+
+ // If untar is set, we fetch to a tempdir, then untar and copy after
+ // verification.
+ dest := p.DestDir
+ if p.Untar {
+ var err error
+ dest, err = os.MkdirTemp("", "helm-")
+ if err != nil {
+ return out.String(), fmt.Errorf("failed to untar: %w", err)
+ }
+ defer os.RemoveAll(dest)
+ }
+
+ downloadSourceRef := chartRef
+ if p.RepoURL != "" {
+ chartURL, err := repo.FindChartInRepoURL(
+ p.RepoURL,
+ chartRef,
+ getter.All(p.Settings),
+ repo.WithChartVersion(p.Version),
+ repo.WithClientTLS(p.CertFile, p.KeyFile, p.CaFile),
+ repo.WithUsernamePassword(p.Username, p.Password),
+ repo.WithInsecureSkipTLSVerify(p.InsecureSkipTLSVerify),
+ repo.WithPassCredentialsAll(p.PassCredentialsAll),
+ )
+ if err != nil {
+ return out.String(), err
+ }
+ downloadSourceRef = chartURL
+ }
+
+ saved, v, err := c.DownloadTo(downloadSourceRef, p.Version, dest)
+ if err != nil {
+ return out.String(), err
+ }
+
+ if p.Verify {
+ for name := range v.SignedBy.Identities {
+ fmt.Fprintf(&out, "Signed by: %v\n", name)
+ }
+ fmt.Fprintf(&out, "Using Key With Fingerprint: %X\n", v.SignedBy.PrimaryKey.Fingerprint)
+ fmt.Fprintf(&out, "Chart Hash Verified: %s\n", v.FileHash)
+ }
+
+ // After verification, untar the chart into the requested directory.
+ if p.Untar {
+ ud := p.UntarDir
+ if !filepath.IsAbs(ud) {
+ ud = filepath.Join(p.DestDir, ud)
+ }
+ // Let udCheck to check conflict file/dir without replacing ud when untarDir is the current directory(.).
+ udCheck := ud
+ if udCheck == "." {
+ _, udCheck = filepath.Split(chartRef)
+ } else {
+ _, chartName := filepath.Split(chartRef)
+ udCheck = filepath.Join(udCheck, chartName)
+ }
+
+ if _, err := os.Stat(udCheck); err != nil {
+ if err := os.MkdirAll(udCheck, 0755); err != nil {
+ return out.String(), fmt.Errorf("failed to untar (mkdir): %w", err)
+ }
+ } else {
+ return out.String(), fmt.Errorf("failed to untar: a file or directory with the name %s already exists", udCheck)
+ }
+
+ return out.String(), chartutil.ExpandFile(ud, saved)
+ }
+ return out.String(), nil
+}
diff --git a/helm/pkg/action/pull_test.go b/helm/pkg/action/pull_test.go
new file mode 100644
index 000000000..ba212973e
--- /dev/null
+++ b/helm/pkg/action/pull_test.go
@@ -0,0 +1,80 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+func TestNewPull(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewPull(WithConfig(config))
+
+ assert.NotNil(t, client)
+ assert.Equal(t, config, client.cfg)
+}
+
+func TestPullSetRegistryClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewPull(WithConfig(config))
+
+ registryClient := ®istry.Client{}
+ client.SetRegistryClient(registryClient)
+ assert.Equal(t, registryClient, client.cfg.RegistryClient)
+}
+
+func TestPullRun_ChartNotFound(t *testing.T) {
+ srv, err := startLocalServerForTests(t, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer srv.Close()
+
+ config := actionConfigFixture(t)
+ client := NewPull(WithConfig(config))
+ client.Settings = cli.New()
+ client.RepoURL = srv.URL
+
+ chartRef := "nginx"
+ _, err = client.Run(chartRef)
+ require.ErrorContains(t, err, "404 Not Found")
+}
+
+func startLocalServerForTests(t *testing.T, handler http.Handler) (*httptest.Server, error) {
+ t.Helper()
+ if handler == nil {
+ fileBytes, err := os.ReadFile("../repo/v1/testdata/local-index.yaml")
+ if err != nil {
+ return nil, err
+ }
+ handler = http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ _, err = w.Write(fileBytes)
+ require.NoError(t, err)
+ })
+ }
+
+ return httptest.NewServer(handler), nil
+}
diff --git a/helm/pkg/action/push.go b/helm/pkg/action/push.go
new file mode 100644
index 000000000..0c7148f65
--- /dev/null
+++ b/helm/pkg/action/push.go
@@ -0,0 +1,112 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "io"
+ "strings"
+
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/pusher"
+ "helm.sh/helm/v4/pkg/registry"
+ "helm.sh/helm/v4/pkg/uploader"
+)
+
+// Push is the action for uploading a chart.
+//
+// It provides the implementation of 'helm push'.
+type Push struct {
+ Settings *cli.EnvSettings
+ cfg *Configuration
+ certFile string
+ keyFile string
+ caFile string
+ insecureSkipTLSVerify bool
+ plainHTTP bool
+ out io.Writer
+}
+
+// PushOpt is a type of function that sets options for a push action.
+type PushOpt func(*Push)
+
+// WithPushConfig sets the cfg field on the push configuration object.
+func WithPushConfig(cfg *Configuration) PushOpt {
+ return func(p *Push) {
+ p.cfg = cfg
+ }
+}
+
+// WithTLSClientConfig sets the certFile, keyFile, and caFile fields on the push configuration object.
+func WithTLSClientConfig(certFile, keyFile, caFile string) PushOpt {
+ return func(p *Push) {
+ p.certFile = certFile
+ p.keyFile = keyFile
+ p.caFile = caFile
+ }
+}
+
+// WithInsecureSkipTLSVerify determines if a TLS Certificate will be checked
+func WithInsecureSkipTLSVerify(insecureSkipTLSVerify bool) PushOpt {
+ return func(p *Push) {
+ p.insecureSkipTLSVerify = insecureSkipTLSVerify
+ }
+}
+
+// WithPlainHTTP configures the use of plain HTTP connections.
+func WithPlainHTTP(plainHTTP bool) PushOpt {
+ return func(p *Push) {
+ p.plainHTTP = plainHTTP
+ }
+}
+
+// WithPushOptWriter sets the registryOut field on the push configuration object.
+func WithPushOptWriter(out io.Writer) PushOpt {
+ return func(p *Push) {
+ p.out = out
+ }
+}
+
+// NewPushWithOpts creates a new push, with configuration options.
+func NewPushWithOpts(opts ...PushOpt) *Push {
+ p := &Push{}
+ for _, fn := range opts {
+ fn(p)
+ }
+ return p
+}
+
+// Run executes 'helm push' against the given chart archive.
+func (p *Push) Run(chartRef string, remote string) (string, error) {
+ var out strings.Builder
+
+ c := uploader.ChartUploader{
+ Out: &out,
+ Pushers: pusher.All(p.Settings),
+ Options: []pusher.Option{
+ pusher.WithTLSClientConfig(p.certFile, p.keyFile, p.caFile),
+ pusher.WithInsecureSkipTLSVerify(p.insecureSkipTLSVerify),
+ pusher.WithPlainHTTP(p.plainHTTP),
+ },
+ }
+
+ if registry.IsOCI(remote) {
+ // Don't use the default registry client if tls options are set.
+ c.Options = append(c.Options, pusher.WithRegistryClient(p.cfg.RegistryClient))
+ }
+
+ return out.String(), c.UploadTo(chartRef, remote)
+}
diff --git a/helm/pkg/action/push_test.go b/helm/pkg/action/push_test.go
new file mode 100644
index 000000000..35c6f3efc
--- /dev/null
+++ b/helm/pkg/action/push_test.go
@@ -0,0 +1,66 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewPushWithPushConfig(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewPushWithOpts(WithPushConfig(config))
+
+ assert.NotNil(t, client)
+ assert.Equal(t, config, client.cfg)
+}
+
+func TestNewPushWithTLSClientConfig(t *testing.T) {
+ certFile := "certFile"
+ keyFile := "keyFile"
+ caFile := "caFile"
+ client := NewPushWithOpts(WithTLSClientConfig(certFile, keyFile, caFile))
+
+ assert.NotNil(t, client)
+ assert.Equal(t, certFile, client.certFile)
+ assert.Equal(t, keyFile, client.keyFile)
+ assert.Equal(t, caFile, client.caFile)
+}
+
+func TestNewPushWithInsecureSkipTLSVerify(t *testing.T) {
+ client := NewPushWithOpts(WithInsecureSkipTLSVerify(true))
+
+ assert.NotNil(t, client)
+ assert.Equal(t, true, client.insecureSkipTLSVerify)
+}
+
+func TestNewPushWithPlainHTTP(t *testing.T) {
+ client := NewPushWithOpts(WithPlainHTTP(true))
+
+ assert.NotNil(t, client)
+ assert.Equal(t, true, client.plainHTTP)
+}
+
+func TestNewPushWithPushOptWriter(t *testing.T) {
+ buf := new(bytes.Buffer)
+ client := NewPushWithOpts(WithPushOptWriter(buf))
+
+ assert.NotNil(t, client)
+ assert.Equal(t, buf, client.out)
+}
diff --git a/helm/pkg/action/registry_login.go b/helm/pkg/action/registry_login.go
new file mode 100644
index 000000000..fd9d4bfc6
--- /dev/null
+++ b/helm/pkg/action/registry_login.go
@@ -0,0 +1,99 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "io"
+
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+// RegistryLogin performs a registry login operation.
+type RegistryLogin struct {
+ cfg *Configuration
+ certFile string
+ keyFile string
+ caFile string
+ insecure bool
+ plainHTTP bool
+}
+
+type RegistryLoginOpt func(*RegistryLogin) error
+
+// WithCertFile specifies the path to the certificate file to use for TLS.
+func WithCertFile(certFile string) RegistryLoginOpt {
+ return func(r *RegistryLogin) error {
+ r.certFile = certFile
+ return nil
+ }
+}
+
+// WithInsecure specifies whether to verify certificates.
+func WithInsecure(insecure bool) RegistryLoginOpt {
+ return func(r *RegistryLogin) error {
+ r.insecure = insecure
+ return nil
+ }
+}
+
+// WithKeyFile specifies the path to the key file to use for TLS.
+func WithKeyFile(keyFile string) RegistryLoginOpt {
+ return func(r *RegistryLogin) error {
+ r.keyFile = keyFile
+ return nil
+ }
+}
+
+// WithCAFile specifies the path to the CA file to use for TLS.
+func WithCAFile(caFile string) RegistryLoginOpt {
+ return func(r *RegistryLogin) error {
+ r.caFile = caFile
+ return nil
+ }
+}
+
+// WithPlainHTTPLogin use http rather than https for login.
+func WithPlainHTTPLogin(isPlain bool) RegistryLoginOpt {
+ return func(r *RegistryLogin) error {
+ r.plainHTTP = isPlain
+ return nil
+ }
+}
+
+// NewRegistryLogin creates a new RegistryLogin object with the given configuration.
+func NewRegistryLogin(cfg *Configuration) *RegistryLogin {
+ return &RegistryLogin{
+ cfg: cfg,
+ }
+}
+
+// Run executes the registry login operation
+func (a *RegistryLogin) Run(_ io.Writer, hostname string, username string, password string, opts ...RegistryLoginOpt) error {
+ for _, opt := range opts {
+ if err := opt(a); err != nil {
+ return err
+ }
+ }
+
+ return a.cfg.RegistryClient.Login(
+ hostname,
+ registry.LoginOptBasicAuth(username, password),
+ registry.LoginOptInsecure(a.insecure),
+ registry.LoginOptTLSClientConfig(a.certFile, a.keyFile, a.caFile),
+ registry.LoginOptPlainText(a.plainHTTP),
+ )
+}
diff --git a/helm/pkg/action/registry_login_test.go b/helm/pkg/action/registry_login_test.go
new file mode 100644
index 000000000..de2450d9d
--- /dev/null
+++ b/helm/pkg/action/registry_login_test.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewRegistryLogin(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewRegistryLogin(config)
+
+ assert.NotNil(t, client)
+ assert.Equal(t, config, client.cfg)
+}
+
+func TestWithCertFile(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewRegistryLogin(config)
+
+ certFile := "testdata/cert.pem"
+ opt := WithCertFile(certFile)
+
+ assert.Nil(t, opt(client))
+ assert.Equal(t, certFile, client.certFile)
+}
+
+func TestWithInsecure(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewRegistryLogin(config)
+
+ opt := WithInsecure(true)
+
+ assert.Nil(t, opt(client))
+ assert.Equal(t, true, client.insecure)
+}
+
+func TestWithKeyFile(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewRegistryLogin(config)
+
+ keyFile := "testdata/key.pem"
+ opt := WithKeyFile(keyFile)
+
+ assert.Nil(t, opt(client))
+ assert.Equal(t, keyFile, client.keyFile)
+}
+
+func TestWithCAFile(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewRegistryLogin(config)
+
+ caFile := "testdata/ca.pem"
+ opt := WithCAFile(caFile)
+
+ assert.Nil(t, opt(client))
+ assert.Equal(t, caFile, client.caFile)
+}
+
+func TestWithPlainHTTPLogin(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewRegistryLogin(config)
+
+ opt := WithPlainHTTPLogin(true)
+
+ assert.Nil(t, opt(client))
+ assert.Equal(t, true, client.plainHTTP)
+}
diff --git a/helm/pkg/action/registry_logout.go b/helm/pkg/action/registry_logout.go
new file mode 100644
index 000000000..7ce92defc
--- /dev/null
+++ b/helm/pkg/action/registry_logout.go
@@ -0,0 +1,38 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "io"
+)
+
+// RegistryLogout performs a registry login operation.
+type RegistryLogout struct {
+ cfg *Configuration
+}
+
+// NewRegistryLogout creates a new RegistryLogout object with the given configuration.
+func NewRegistryLogout(cfg *Configuration) *RegistryLogout {
+ return &RegistryLogout{
+ cfg: cfg,
+ }
+}
+
+// Run executes the registry logout operation
+func (a *RegistryLogout) Run(_ io.Writer, hostname string) error {
+ return a.cfg.RegistryClient.Logout(hostname)
+}
diff --git a/helm/pkg/action/registry_logout_test.go b/helm/pkg/action/registry_logout_test.go
new file mode 100644
index 000000000..669d9c9ba
--- /dev/null
+++ b/helm/pkg/action/registry_logout_test.go
@@ -0,0 +1,31 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewRegistryLogout(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewRegistryLogout(config)
+
+ assert.NotNil(t, client)
+ assert.Equal(t, config, client.cfg)
+}
diff --git a/helm/pkg/action/release_testing.go b/helm/pkg/action/release_testing.go
new file mode 100644
index 000000000..043a41236
--- /dev/null
+++ b/helm/pkg/action/release_testing.go
@@ -0,0 +1,154 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "slices"
+ "sort"
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/kube"
+ ri "helm.sh/helm/v4/pkg/release"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+const (
+ ExcludeNameFilter = "!name"
+ IncludeNameFilter = "name"
+)
+
+// ReleaseTesting is the action for testing a release.
+//
+// It provides the implementation of 'helm test'.
+type ReleaseTesting struct {
+ cfg *Configuration
+ Timeout time.Duration
+ WaitOptions []kube.WaitOption
+ // Used for fetching logs from test pods
+ Namespace string
+ Filters map[string][]string
+}
+
+// NewReleaseTesting creates a new ReleaseTesting object with the given configuration.
+func NewReleaseTesting(cfg *Configuration) *ReleaseTesting {
+ return &ReleaseTesting{
+ cfg: cfg,
+ Filters: map[string][]string{},
+ }
+}
+
+// Run executes 'helm test' against the given release.
+func (r *ReleaseTesting) Run(name string) (ri.Releaser, ExecuteShutdownFunc, error) {
+ if err := r.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, shutdownNoOp, err
+ }
+
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, shutdownNoOp, fmt.Errorf("releaseTest: Release name is invalid: %s", name)
+ }
+
+ // finds the non-deleted release with the given name
+ reli, err := r.cfg.Releases.Last(name)
+ if err != nil {
+ return reli, shutdownNoOp, err
+ }
+
+ rel, err := releaserToV1Release(reli)
+ if err != nil {
+ return reli, shutdownNoOp, err
+ }
+
+ skippedHooks := []*release.Hook{}
+ executingHooks := []*release.Hook{}
+ if len(r.Filters[ExcludeNameFilter]) != 0 {
+ for _, h := range rel.Hooks {
+ if slices.Contains(r.Filters[ExcludeNameFilter], h.Name) {
+ skippedHooks = append(skippedHooks, h)
+ } else {
+ executingHooks = append(executingHooks, h)
+ }
+ }
+ rel.Hooks = executingHooks
+ }
+ if len(r.Filters[IncludeNameFilter]) != 0 {
+ executingHooks = nil
+ for _, h := range rel.Hooks {
+ if slices.Contains(r.Filters[IncludeNameFilter], h.Name) {
+ executingHooks = append(executingHooks, h)
+ } else {
+ skippedHooks = append(skippedHooks, h)
+ }
+ }
+ rel.Hooks = executingHooks
+ }
+
+ serverSideApply := rel.ApplyMethod == string(release.ApplyMethodServerSideApply)
+ shutdown, err := r.cfg.execHookWithDelayedShutdown(rel, release.HookTest, kube.StatusWatcherStrategy, r.WaitOptions, r.Timeout, serverSideApply)
+
+ if err != nil {
+ rel.Hooks = append(skippedHooks, rel.Hooks...)
+ r.cfg.Releases.Update(reli)
+ return reli, shutdown, err
+ }
+
+ rel.Hooks = append(skippedHooks, rel.Hooks...)
+ return reli, shutdown, r.cfg.Releases.Update(reli)
+}
+
+// GetPodLogs will write the logs for all test pods in the given release into
+// the given writer. These can be immediately output to the user or captured for
+// other uses
+func (r *ReleaseTesting) GetPodLogs(out io.Writer, rel *release.Release) error {
+ client, err := r.cfg.KubernetesClientSet()
+ if err != nil {
+ return fmt.Errorf("unable to get kubernetes client to fetch pod logs: %w", err)
+ }
+
+ hooksByWight := append([]*release.Hook{}, rel.Hooks...)
+ sort.Stable(hookByWeight(hooksByWight))
+ for _, h := range hooksByWight {
+ for _, e := range h.Events {
+ if e == release.HookTest {
+ if slices.Contains(r.Filters[ExcludeNameFilter], h.Name) {
+ continue
+ }
+ if len(r.Filters[IncludeNameFilter]) > 0 && !slices.Contains(r.Filters[IncludeNameFilter], h.Name) {
+ continue
+ }
+ req := client.CoreV1().Pods(r.Namespace).GetLogs(h.Name, &v1.PodLogOptions{})
+ logReader, err := req.Stream(context.Background())
+ if err != nil {
+ return fmt.Errorf("unable to get pod logs for %s: %w", h.Name, err)
+ }
+
+ fmt.Fprintf(out, "POD LOGS: %s\n", h.Name)
+ _, err = io.Copy(out, logReader)
+ fmt.Fprintln(out)
+ if err != nil {
+ return fmt.Errorf("unable to write pod logs for %s: %w", h.Name, err)
+ }
+ }
+ }
+ }
+ return nil
+}
diff --git a/helm/pkg/action/release_testing_test.go b/helm/pkg/action/release_testing_test.go
new file mode 100644
index 000000000..ab35e104a
--- /dev/null
+++ b/helm/pkg/action/release_testing_test.go
@@ -0,0 +1,119 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/kube"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestNewReleaseTesting(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewReleaseTesting(config)
+
+ assert.NotNil(t, client)
+ assert.Equal(t, config, client.cfg)
+}
+
+func TestReleaseTestingRun_UnreachableKubeClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ client := NewReleaseTesting(config)
+ result, _, err := client.Run("")
+ assert.Nil(t, result)
+ assert.Error(t, err)
+}
+
+func TestReleaseTestingGetPodLogs_FilterEvents(t *testing.T) {
+ config := actionConfigFixture(t)
+ require.NoError(t, config.Init(cli.New().RESTClientGetter(), "", os.Getenv("HELM_DRIVER")))
+ client := NewReleaseTesting(config)
+ client.Filters[ExcludeNameFilter] = []string{"event-1"}
+ client.Filters[IncludeNameFilter] = []string{"event-3"}
+
+ hooks := []*release.Hook{
+ {
+ Name: "event-1",
+ Events: []release.HookEvent{release.HookTest},
+ },
+ {
+ Name: "event-2",
+ Events: []release.HookEvent{release.HookTest},
+ },
+ }
+
+ out := &bytes.Buffer{}
+ require.NoError(t, client.GetPodLogs(out, &release.Release{Hooks: hooks}))
+
+ assert.Empty(t, out.String())
+}
+
+func TestReleaseTestingGetPodLogs_PodRetrievalError(t *testing.T) {
+ config := actionConfigFixture(t)
+ require.NoError(t, config.Init(cli.New().RESTClientGetter(), "", os.Getenv("HELM_DRIVER")))
+ client := NewReleaseTesting(config)
+
+ hooks := []*release.Hook{
+ {
+ Name: "event-1",
+ Events: []release.HookEvent{release.HookTest},
+ },
+ }
+
+ require.ErrorContains(t, client.GetPodLogs(&bytes.Buffer{}, &release.Release{Hooks: hooks}), "unable to get pod logs")
+}
+
+func TestReleaseTesting_WaitOptionsPassedDownstream(t *testing.T) {
+ is := assert.New(t)
+ config := actionConfigFixture(t)
+
+ // Create a release with a test hook
+ rel := releaseStub()
+ rel.Name = "wait-options-test-release"
+ rel.ApplyMethod = "csa"
+ require.NoError(t, config.Releases.Create(rel))
+
+ client := NewReleaseTesting(config)
+
+ // Use WithWaitContext as a marker WaitOption that we can track
+ ctx := context.Background()
+ client.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)}
+
+ // Access the underlying FailingKubeClient to check recorded options
+ failer := config.KubeClient.(*kubefake.FailingKubeClient)
+
+ _, _, err := client.Run(rel.Name)
+ is.NoError(err)
+
+ // Verify that WaitOptions were passed to GetWaiter
+ is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter")
+}
diff --git a/helm/pkg/action/resource_policy.go b/helm/pkg/action/resource_policy.go
new file mode 100644
index 000000000..fcea98ad6
--- /dev/null
+++ b/helm/pkg/action/resource_policy.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "strings"
+
+ "helm.sh/helm/v4/pkg/kube"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+)
+
+func filterManifestsToKeep(manifests []releaseutil.Manifest) (keep, remaining []releaseutil.Manifest) {
+ for _, m := range manifests {
+ if m.Head.Metadata == nil || m.Head.Metadata.Annotations == nil || len(m.Head.Metadata.Annotations) == 0 {
+ remaining = append(remaining, m)
+ continue
+ }
+
+ resourcePolicyType, ok := m.Head.Metadata.Annotations[kube.ResourcePolicyAnno]
+ if !ok {
+ remaining = append(remaining, m)
+ continue
+ }
+
+ resourcePolicyType = strings.ToLower(strings.TrimSpace(resourcePolicyType))
+ if resourcePolicyType == kube.KeepPolicy {
+ keep = append(keep, m)
+ }
+
+ }
+ return keep, remaining
+}
diff --git a/helm/pkg/action/rollback.go b/helm/pkg/action/rollback.go
new file mode 100644
index 000000000..03150532e
--- /dev/null
+++ b/helm/pkg/action/rollback.go
@@ -0,0 +1,305 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/kube"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+// Rollback is the action for rolling back to a given release.
+//
+// It provides the implementation of 'helm rollback'.
+type Rollback struct {
+ cfg *Configuration
+
+ Version int
+ Timeout time.Duration
+ WaitStrategy kube.WaitStrategy
+ WaitOptions []kube.WaitOption
+ WaitForJobs bool
+ DisableHooks bool
+ // DryRunStrategy can be set to prepare, but not execute the operation and whether or not to interact with the remote cluster
+ DryRunStrategy DryRunStrategy
+ // ForceReplace will, if set to `true`, ignore certain warnings and perform the rollback anyway.
+ //
+ // This should be used with caution.
+ ForceReplace bool
+ // ForceConflicts causes server-side apply to force conflicts ("Overwrite value, become sole manager")
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts
+ ForceConflicts bool
+ // ServerSideApply enables changes to be applied via Kubernetes server-side apply
+ // Can be the string: "true", "false" or "auto"
+ // When "auto", sever-side usage will be based upon the releases previous usage
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/
+ ServerSideApply string
+ CleanupOnFail bool
+ MaxHistory int // MaxHistory limits the maximum number of revisions saved per release
+}
+
+// NewRollback creates a new Rollback object with the given configuration.
+func NewRollback(cfg *Configuration) *Rollback {
+ return &Rollback{
+ cfg: cfg,
+ DryRunStrategy: DryRunNone,
+ }
+}
+
+// Run executes 'helm rollback' against the given release.
+func (r *Rollback) Run(name string) error {
+ if err := r.cfg.KubeClient.IsReachable(); err != nil {
+ return err
+ }
+
+ r.cfg.Releases.MaxHistory = r.MaxHistory
+
+ r.cfg.Logger().Debug("preparing rollback", "name", name)
+ currentRelease, targetRelease, serverSideApply, err := r.prepareRollback(name)
+ if err != nil {
+ return err
+ }
+
+ if !isDryRun(r.DryRunStrategy) {
+ r.cfg.Logger().Debug("creating rolled back release", "name", name)
+ if err := r.cfg.Releases.Create(targetRelease); err != nil {
+ return err
+ }
+ }
+
+ r.cfg.Logger().Debug("performing rollback", "name", name)
+ if _, err := r.performRollback(currentRelease, targetRelease, serverSideApply); err != nil {
+ return err
+ }
+
+ if !isDryRun(r.DryRunStrategy) {
+ r.cfg.Logger().Debug("updating status for rolled back release", "name", name)
+ if err := r.cfg.Releases.Update(targetRelease); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// prepareRollback finds the previous release and prepares a new release object with
+// the previous release's configuration
+func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, bool, error) {
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, nil, false, fmt.Errorf("prepareRollback: Release name is invalid: %s", name)
+ }
+
+ if r.Version < 0 {
+ return nil, nil, false, errInvalidRevision
+ }
+
+ currentReleasei, err := r.cfg.Releases.Last(name)
+ if err != nil {
+ return nil, nil, false, err
+ }
+
+ currentRelease, err := releaserToV1Release(currentReleasei)
+ if err != nil {
+ return nil, nil, false, err
+ }
+
+ previousVersion := r.Version
+ if r.Version == 0 {
+ previousVersion = currentRelease.Version - 1
+ }
+
+ historyReleases, err := r.cfg.Releases.History(name)
+ if err != nil {
+ return nil, nil, false, err
+ }
+
+ // Check if the history version to be rolled back exists
+ previousVersionExist := false
+ for _, historyReleasei := range historyReleases {
+ historyRelease, err := releaserToV1Release(historyReleasei)
+ if err != nil {
+ return nil, nil, false, err
+ }
+ version := historyRelease.Version
+ if previousVersion == version {
+ previousVersionExist = true
+ break
+ }
+ }
+ if !previousVersionExist {
+ return nil, nil, false, fmt.Errorf("release has no %d version", previousVersion)
+ }
+
+ r.cfg.Logger().Debug("rolling back", "name", name, "currentVersion", currentRelease.Version, "targetVersion", previousVersion)
+
+ previousReleasei, err := r.cfg.Releases.Get(name, previousVersion)
+ if err != nil {
+ return nil, nil, false, err
+ }
+ previousRelease, err := releaserToV1Release(previousReleasei)
+ if err != nil {
+ return nil, nil, false, err
+ }
+
+ serverSideApply, err := getUpgradeServerSideValue(r.ServerSideApply, previousRelease.ApplyMethod)
+ if err != nil {
+ return nil, nil, false, err
+ }
+
+ // Store a new release object with previous release's configuration
+ targetRelease := &release.Release{
+ Name: name,
+ Namespace: currentRelease.Namespace,
+ Chart: previousRelease.Chart,
+ Config: previousRelease.Config,
+ Info: &release.Info{
+ FirstDeployed: currentRelease.Info.FirstDeployed,
+ LastDeployed: time.Now(),
+ Status: common.StatusPendingRollback,
+ Notes: previousRelease.Info.Notes,
+ // Because we lose the reference to previous version elsewhere, we set the
+ // message here, and only override it later if we experience failure.
+ Description: fmt.Sprintf("Rollback to %d", previousVersion),
+ },
+ Version: currentRelease.Version + 1,
+ Labels: previousRelease.Labels,
+ Manifest: previousRelease.Manifest,
+ Hooks: previousRelease.Hooks,
+ ApplyMethod: string(determineReleaseSSApplyMethod(serverSideApply)),
+ }
+
+ return currentRelease, targetRelease, serverSideApply, nil
+}
+
+func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release, serverSideApply bool) (*release.Release, error) {
+ if isDryRun(r.DryRunStrategy) {
+ r.cfg.Logger().Debug("dry run", "name", targetRelease.Name)
+ return targetRelease, nil
+ }
+
+ current, err := r.cfg.KubeClient.Build(bytes.NewBufferString(currentRelease.Manifest), false)
+ if err != nil {
+ return targetRelease, fmt.Errorf("unable to build kubernetes objects from current release manifest: %w", err)
+ }
+ target, err := r.cfg.KubeClient.Build(bytes.NewBufferString(targetRelease.Manifest), false)
+ if err != nil {
+ return targetRelease, fmt.Errorf("unable to build kubernetes objects from new release manifest: %w", err)
+ }
+
+ // pre-rollback hooks
+
+ if !r.DisableHooks {
+ if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.WaitOptions, r.Timeout, serverSideApply); err != nil {
+ return targetRelease, err
+ }
+ } else {
+ r.cfg.Logger().Debug("rollback hooks disabled", "name", targetRelease.Name)
+ }
+
+ // It is safe to use "forceOwnership" here because these are resources currently rendered by the chart.
+ err = target.Visit(setMetadataVisitor(targetRelease.Name, targetRelease.Namespace, true))
+ if err != nil {
+ return targetRelease, fmt.Errorf("unable to set metadata visitor from target release: %w", err)
+ }
+ results, err := r.cfg.KubeClient.Update(
+ current,
+ target,
+ kube.ClientUpdateOptionForceReplace(r.ForceReplace),
+ kube.ClientUpdateOptionServerSideApply(serverSideApply, r.ForceConflicts),
+ kube.ClientUpdateOptionThreeWayMergeForUnstructured(false),
+ kube.ClientUpdateOptionUpgradeClientSideFieldManager(true))
+
+ if err != nil {
+ msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err)
+ r.cfg.Logger().Warn(msg)
+ currentRelease.Info.Status = common.StatusSuperseded
+ targetRelease.Info.Status = common.StatusFailed
+ targetRelease.Info.Description = msg
+ r.cfg.recordRelease(currentRelease)
+ r.cfg.recordRelease(targetRelease)
+ if r.CleanupOnFail {
+ r.cfg.Logger().Debug("cleanup on fail set, cleaning up resources", "count", len(results.Created))
+ _, errs := r.cfg.KubeClient.Delete(results.Created, metav1.DeletePropagationBackground)
+ if errs != nil {
+ return targetRelease, fmt.Errorf(
+ "an error occurred while cleaning up resources. original rollback error: %w",
+ fmt.Errorf("unable to cleanup resources: %w", joinErrors(errs, ", ")))
+ }
+ r.cfg.Logger().Debug("resource cleanup complete")
+ }
+ return targetRelease, err
+ }
+
+ var waiter kube.Waiter
+ if c, supportsOptions := r.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions {
+ waiter, err = c.GetWaiterWithOptions(r.WaitStrategy, r.WaitOptions...)
+ } else {
+ waiter, err = r.cfg.KubeClient.GetWaiter(r.WaitStrategy)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("unable to get waiter: %w", err)
+ }
+ if r.WaitForJobs {
+ if err := waiter.WaitWithJobs(target, r.Timeout); err != nil {
+ targetRelease.SetStatus(common.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
+ r.cfg.recordRelease(currentRelease)
+ r.cfg.recordRelease(targetRelease)
+ return targetRelease, fmt.Errorf("release %s failed: %w", targetRelease.Name, err)
+ }
+ } else {
+ if err := waiter.Wait(target, r.Timeout); err != nil {
+ targetRelease.SetStatus(common.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
+ r.cfg.recordRelease(currentRelease)
+ r.cfg.recordRelease(targetRelease)
+ return targetRelease, fmt.Errorf("release %s failed: %w", targetRelease.Name, err)
+ }
+ }
+
+ // post-rollback hooks
+ if !r.DisableHooks {
+ if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.WaitOptions, r.Timeout, serverSideApply); err != nil {
+ return targetRelease, err
+ }
+ }
+
+ deployed, err := r.cfg.Releases.DeployedAll(currentRelease.Name)
+ if err != nil && !errors.Is(err, driver.ErrNoDeployedReleases) {
+ return nil, err
+ }
+ // Supersede all previous deployments, see issue #2941.
+ for _, reli := range deployed {
+ rel, err := releaserToV1Release(reli)
+ if err != nil {
+ return nil, err
+ }
+ r.cfg.Logger().Debug("superseding previous deployment", "version", rel.Version)
+ rel.Info.Status = common.StatusSuperseded
+ r.cfg.recordRelease(rel)
+ }
+
+ targetRelease.Info.Status = common.StatusDeployed
+
+ return targetRelease, nil
+}
diff --git a/helm/pkg/action/rollback_test.go b/helm/pkg/action/rollback_test.go
new file mode 100644
index 000000000..deb6c7c80
--- /dev/null
+++ b/helm/pkg/action/rollback_test.go
@@ -0,0 +1,85 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "context"
+ "errors"
+ "io"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/pkg/kube"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+)
+
+func TestNewRollback(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewRollback(config)
+
+ assert.NotNil(t, client)
+ assert.Equal(t, config, client.cfg)
+}
+
+func TestRollbackRun_UnreachableKubeClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ client := NewRollback(config)
+ assert.Error(t, client.Run(""))
+}
+
+func TestRollback_WaitOptionsPassedDownstream(t *testing.T) {
+ is := assert.New(t)
+ config := actionConfigFixture(t)
+
+ // Create a deployed release and a second version to roll back to
+ rel := releaseStub()
+ rel.Name = "wait-options-rollback"
+ rel.Info.Status = "deployed"
+ rel.ApplyMethod = "csa"
+ require.NoError(t, config.Releases.Create(rel))
+
+ rel2 := releaseStub()
+ rel2.Name = "wait-options-rollback"
+ rel2.Version = 2
+ rel2.Info.Status = "deployed"
+ rel2.ApplyMethod = "csa"
+ require.NoError(t, config.Releases.Create(rel2))
+
+ client := NewRollback(config)
+ client.Version = 1
+ client.WaitStrategy = kube.StatusWatcherStrategy
+ client.ServerSideApply = "auto"
+
+ // Use WithWaitContext as a marker WaitOption that we can track
+ ctx := context.Background()
+ client.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)}
+
+ // Access the underlying FailingKubeClient to check recorded options
+ failer := config.KubeClient.(*kubefake.FailingKubeClient)
+
+ err := client.Run(rel.Name)
+ is.NoError(err)
+
+ // Verify that WaitOptions were passed to GetWaiter
+ is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter")
+}
diff --git a/helm/pkg/action/show.go b/helm/pkg/action/show.go
new file mode 100644
index 000000000..4195d69a5
--- /dev/null
+++ b/helm/pkg/action/show.go
@@ -0,0 +1,156 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+
+ "k8s.io/cli-runtime/pkg/printers"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+// ShowOutputFormat is the format of the output of `helm show`
+type ShowOutputFormat string
+
+const (
+ // ShowAll is the format which shows all the information of a chart
+ ShowAll ShowOutputFormat = "all"
+ // ShowChart is the format which only shows the chart's definition
+ ShowChart ShowOutputFormat = "chart"
+ // ShowValues is the format which only shows the chart's values
+ ShowValues ShowOutputFormat = "values"
+ // ShowReadme is the format which only shows the chart's README
+ ShowReadme ShowOutputFormat = "readme"
+ // ShowCRDs is the format which only shows the chart's CRDs
+ ShowCRDs ShowOutputFormat = "crds"
+)
+
+var readmeFileNames = []string{"readme.md", "readme.txt", "readme"}
+
+func (o ShowOutputFormat) String() string {
+ return string(o)
+}
+
+// Show is the action for checking a given release's information.
+//
+// It provides the implementation of 'helm show' and its respective subcommands.
+type Show struct {
+ ChartPathOptions
+ Devel bool
+ OutputFormat ShowOutputFormat
+ JSONPathTemplate string
+ chart *chart.Chart // for testing
+}
+
+// NewShow creates a new Show object with the given configuration.
+func NewShow(output ShowOutputFormat, cfg *Configuration) *Show {
+ sh := &Show{
+ OutputFormat: output,
+ }
+ sh.registryClient = cfg.RegistryClient
+
+ return sh
+}
+
+// SetRegistryClient sets the registry client to use when pulling a chart from a registry.
+func (s *Show) SetRegistryClient(client *registry.Client) {
+ s.registryClient = client
+}
+
+// Run executes 'helm show' against the given release.
+func (s *Show) Run(chartpath string) (string, error) {
+ if s.chart == nil {
+ chrt, err := loader.Load(chartpath)
+ if err != nil {
+ return "", err
+ }
+ s.chart = chrt
+ }
+ cf, err := yaml.Marshal(s.chart.Metadata)
+ if err != nil {
+ return "", err
+ }
+
+ var out strings.Builder
+ if s.OutputFormat == ShowChart || s.OutputFormat == ShowAll {
+ fmt.Fprintf(&out, "%s\n", cf)
+ }
+
+ if (s.OutputFormat == ShowValues || s.OutputFormat == ShowAll) && s.chart.Values != nil {
+ if s.OutputFormat == ShowAll {
+ fmt.Fprintln(&out, "---")
+ }
+ if s.JSONPathTemplate != "" {
+ printer, err := printers.NewJSONPathPrinter(s.JSONPathTemplate)
+ if err != nil {
+ return "", fmt.Errorf("error parsing jsonpath %s: %w", s.JSONPathTemplate, err)
+ }
+ printer.Execute(&out, s.chart.Values)
+ } else {
+ for _, f := range s.chart.Raw {
+ if f.Name == chartutil.ValuesfileName {
+ fmt.Fprintln(&out, string(f.Data))
+ }
+ }
+ }
+ }
+
+ if s.OutputFormat == ShowReadme || s.OutputFormat == ShowAll {
+ readme := findReadme(s.chart.Files)
+ if readme != nil {
+ if s.OutputFormat == ShowAll {
+ fmt.Fprintln(&out, "---")
+ }
+ fmt.Fprintf(&out, "%s\n", readme.Data)
+ }
+ }
+
+ if s.OutputFormat == ShowCRDs || s.OutputFormat == ShowAll {
+ crds := s.chart.CRDObjects()
+ if len(crds) > 0 {
+ for _, crd := range crds {
+ if !bytes.HasPrefix(crd.File.Data, []byte("---")) {
+ fmt.Fprintln(&out, "---")
+ }
+ fmt.Fprintf(&out, "%s\n", string(crd.File.Data))
+ }
+ }
+ }
+ return out.String(), nil
+}
+
+func findReadme(files []*common.File) (file *common.File) {
+ for _, file := range files {
+ for _, n := range readmeFileNames {
+ if file == nil {
+ continue
+ }
+ if strings.EqualFold(file.Name, n) {
+ return file
+ }
+ }
+ }
+ return nil
+}
diff --git a/helm/pkg/action/show_test.go b/helm/pkg/action/show_test.go
new file mode 100644
index 000000000..6e270ac6d
--- /dev/null
+++ b/helm/pkg/action/show_test.go
@@ -0,0 +1,182 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+func TestShow(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewShow(ShowAll, config)
+ modTime := time.Now()
+ client.chart = &chart.Chart{
+ Metadata: &chart.Metadata{Name: "alpine"},
+ Files: []*common.File{
+ {Name: "README.md", ModTime: modTime, Data: []byte("README\n")},
+ {Name: "crds/ignoreme.txt", ModTime: modTime, Data: []byte("error")},
+ {Name: "crds/foo.yaml", ModTime: modTime, Data: []byte("---\nfoo\n")},
+ {Name: "crds/bar.json", ModTime: modTime, Data: []byte("---\nbar\n")},
+ {Name: "crds/baz.yaml", ModTime: modTime, Data: []byte("baz\n")},
+ },
+ Raw: []*common.File{
+ {Name: "values.yaml", ModTime: modTime, Data: []byte("VALUES\n")},
+ },
+ Values: map[string]interface{}{},
+ }
+
+ output, err := client.Run("")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expect := `name: alpine
+
+---
+VALUES
+
+---
+README
+
+---
+foo
+
+---
+bar
+
+---
+baz
+
+`
+ if output != expect {
+ t.Errorf("Expected\n%q\nGot\n%q\n", expect, output)
+ }
+}
+
+func TestShowNoValues(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewShow(ShowAll, config)
+ client.chart = new(chart.Chart)
+
+ // Regression tests for missing values. See issue #1024.
+ client.OutputFormat = ShowValues
+ output, err := client.Run("")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(output) != 0 {
+ t.Errorf("expected empty values buffer, got %s", output)
+ }
+}
+
+func TestShowValuesByJsonPathFormat(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewShow(ShowValues, config)
+ client.JSONPathTemplate = "{$.nestedKey.simpleKey}"
+ client.chart = buildChart(withSampleValues())
+ output, err := client.Run("")
+ if err != nil {
+ t.Fatal(err)
+ }
+ expect := "simpleValue"
+ if output != expect {
+ t.Errorf("Expected\n%q\nGot\n%q\n", expect, output)
+ }
+}
+
+func TestShowCRDs(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewShow(ShowCRDs, config)
+ modTime := time.Now()
+ client.chart = &chart.Chart{
+ Metadata: &chart.Metadata{Name: "alpine"},
+ Files: []*common.File{
+ {Name: "crds/ignoreme.txt", ModTime: modTime, Data: []byte("error")},
+ {Name: "crds/foo.yaml", ModTime: modTime, Data: []byte("---\nfoo\n")},
+ {Name: "crds/bar.json", ModTime: modTime, Data: []byte("---\nbar\n")},
+ {Name: "crds/baz.yaml", ModTime: modTime, Data: []byte("baz\n")},
+ },
+ }
+
+ output, err := client.Run("")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expect := `---
+foo
+
+---
+bar
+
+---
+baz
+
+`
+ if output != expect {
+ t.Errorf("Expected\n%q\nGot\n%q\n", expect, output)
+ }
+}
+
+func TestShowNoReadme(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewShow(ShowAll, config)
+ modTime := time.Now()
+ client.chart = &chart.Chart{
+ Metadata: &chart.Metadata{Name: "alpine"},
+ Files: []*common.File{
+ {Name: "crds/ignoreme.txt", ModTime: modTime, Data: []byte("error")},
+ {Name: "crds/foo.yaml", ModTime: modTime, Data: []byte("---\nfoo\n")},
+ {Name: "crds/bar.json", ModTime: modTime, Data: []byte("---\nbar\n")},
+ },
+ }
+
+ output, err := client.Run("")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expect := `name: alpine
+
+---
+foo
+
+---
+bar
+
+`
+ if output != expect {
+ t.Errorf("Expected\n%q\nGot\n%q\n", expect, output)
+ }
+}
+
+func TestShowSetRegistryClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewShow(ShowAll, config)
+
+ registryClient := ®istry.Client{}
+ client.SetRegistryClient(registryClient)
+ assert.Equal(t, registryClient, client.registryClient)
+}
diff --git a/helm/pkg/action/status.go b/helm/pkg/action/status.go
new file mode 100644
index 000000000..2e6a1992c
--- /dev/null
+++ b/helm/pkg/action/status.go
@@ -0,0 +1,83 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+
+ "helm.sh/helm/v4/pkg/kube"
+ ri "helm.sh/helm/v4/pkg/release"
+)
+
+// Status is the action for checking the deployment status of releases.
+//
+// It provides the implementation of 'helm status'.
+type Status struct {
+ cfg *Configuration
+
+ Version int
+
+ // ShowResourcesTable is used with ShowResources. When true this will cause
+ // the resulting objects to be retrieved as a kind=table.
+ ShowResourcesTable bool
+}
+
+// NewStatus creates a new Status object with the given configuration.
+func NewStatus(cfg *Configuration) *Status {
+ return &Status{
+ cfg: cfg,
+ }
+}
+
+// Run executes 'helm status' against the given release.
+func (s *Status) Run(name string) (ri.Releaser, error) {
+ if err := s.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ reli, err := s.cfg.releaseContent(name, s.Version)
+ if err != nil {
+ return nil, err
+ }
+
+ rel, err := releaserToV1Release(reli)
+ if err != nil {
+ return nil, err
+ }
+
+ var resources kube.ResourceList
+ if s.ShowResourcesTable {
+ resources, err = s.cfg.KubeClient.BuildTable(bytes.NewBufferString(rel.Manifest), false)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ resources, err = s.cfg.KubeClient.Build(bytes.NewBufferString(rel.Manifest), false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ resp, err := s.cfg.KubeClient.Get(resources, true)
+ if err != nil {
+ return nil, err
+ }
+
+ rel.Info.Resources = resp
+
+ return rel, nil
+}
diff --git a/helm/pkg/action/status_test.go b/helm/pkg/action/status_test.go
new file mode 100644
index 000000000..674715aff
--- /dev/null
+++ b/helm/pkg/action/status_test.go
@@ -0,0 +1,143 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "errors"
+ "io"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ rcommon "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestNewStatus(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewStatus(config)
+
+ assert.NotNil(t, client)
+ assert.Equal(t, config, client.cfg)
+ assert.Equal(t, 0, client.Version)
+}
+
+func TestStatusRun(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.BuildDummy = true
+ config.KubeClient = &failingKubeClient
+ client := NewStatus(config)
+ client.ShowResourcesTable = true
+
+ releaseName := "test-release"
+ require.NoError(t, configureReleaseContent(config, releaseName))
+ releaser, err := client.Run(releaseName)
+ require.NoError(t, err)
+
+ result, err := releaserToV1Release(releaser)
+ require.NoError(t, err)
+ assert.Equal(t, releaseName, result.Name)
+ assert.Equal(t, 1, result.Version)
+}
+
+func TestStatusRun_KubeClientNotReachable(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ client := NewStatus(config)
+
+ result, err := client.Run("")
+ assert.Nil(t, result)
+ assert.Error(t, err)
+}
+
+func TestStatusRun_KubeClientBuildTableError(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.BuildTableError = errors.New("build table error")
+ config.KubeClient = &failingKubeClient
+
+ releaseName := "test-release"
+ require.NoError(t, configureReleaseContent(config, releaseName))
+
+ client := NewStatus(config)
+ client.ShowResourcesTable = true
+
+ result, err := client.Run(releaseName)
+
+ assert.Nil(t, result)
+ assert.ErrorContains(t, err, "build table error")
+}
+
+func TestStatusRun_KubeClientBuildError(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.BuildError = errors.New("build error")
+ config.KubeClient = &failingKubeClient
+
+ releaseName := "test-release"
+ require.NoError(t, configureReleaseContent(config, releaseName))
+
+ client := NewStatus(config)
+ client.ShowResourcesTable = false
+
+ result, err := client.Run(releaseName)
+ assert.Nil(t, result)
+ assert.ErrorContains(t, err, "build error")
+}
+
+func TestStatusRun_KubeClientGetError(t *testing.T) {
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.BuildError = errors.New("get error")
+ config.KubeClient = &failingKubeClient
+
+ releaseName := "test-release"
+ require.NoError(t, configureReleaseContent(config, releaseName))
+ client := NewStatus(config)
+
+ result, err := client.Run(releaseName)
+ assert.Nil(t, result)
+ assert.ErrorContains(t, err, "get error")
+}
+
+func configureReleaseContent(cfg *Configuration, releaseName string) error {
+ rel := &release.Release{
+ Name: releaseName,
+ Info: &release.Info{
+ Status: rcommon.StatusDeployed,
+ },
+ Manifest: testManifest,
+ Version: 1,
+ Namespace: "default",
+ }
+
+ return cfg.Releases.Create(rel)
+}
+
+const testManifest = `
+apiVersion: v1
+kind: Pod
+metadata:
+ namespace: default
+ name: test-application
+`
diff --git a/helm/pkg/action/testdata/charts/chart-missing-deps/Chart.yaml b/helm/pkg/action/testdata/charts/chart-missing-deps/Chart.yaml
new file mode 100755
index 000000000..ba10ee803
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-missing-deps/Chart.yaml
@@ -0,0 +1,2 @@
+name: chart-with-missing-deps
+version: 2.1.8
diff --git a/helm/pkg/action/testdata/charts/chart-missing-deps/requirements.lock b/helm/pkg/action/testdata/charts/chart-missing-deps/requirements.lock
new file mode 100755
index 000000000..dcda2b142
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-missing-deps/requirements.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: mariadb
+ repository: https://charts.helm.sh/stable/
+ version: 4.3.1
+digest: sha256:82a0e5374376169d2ecf7d452c18a2ed93507f5d17c3393a1457f9ffad7e9b26
+generated: 2018-08-02T22:07:51.905271776Z
diff --git a/helm/pkg/action/testdata/charts/chart-missing-deps/requirements.yaml b/helm/pkg/action/testdata/charts/chart-missing-deps/requirements.yaml
new file mode 100755
index 000000000..fef7d0b7f
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-missing-deps/requirements.yaml
@@ -0,0 +1,7 @@
+dependencies:
+- name: mariadb
+ version: 4.x.x
+ repository: https://charts.helm.sh/stable/
+ condition: mariadb.enabled
+ tags:
+ - wordpress-database
diff --git a/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies-2.1.8.tgz b/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies-2.1.8.tgz
new file mode 100644
index 000000000..7a22b1d82
Binary files /dev/null and b/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies-2.1.8.tgz differ
diff --git a/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/Chart.yaml b/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/Chart.yaml
new file mode 100755
index 000000000..1d16590b6
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/Chart.yaml
@@ -0,0 +1,2 @@
+name: chart-with-compressed-dependencies
+version: 2.1.8
diff --git a/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/charts/mariadb-4.3.1.tgz b/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/charts/mariadb-4.3.1.tgz
new file mode 100644
index 000000000..5b38fa1c3
Binary files /dev/null and b/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/charts/mariadb-4.3.1.tgz differ
diff --git a/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/requirements.lock b/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/requirements.lock
new file mode 100755
index 000000000..dcda2b142
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/requirements.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: mariadb
+ repository: https://charts.helm.sh/stable/
+ version: 4.3.1
+digest: sha256:82a0e5374376169d2ecf7d452c18a2ed93507f5d17c3393a1457f9ffad7e9b26
+generated: 2018-08-02T22:07:51.905271776Z
diff --git a/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/requirements.yaml b/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/requirements.yaml
new file mode 100755
index 000000000..fef7d0b7f
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-compressed-dependencies/requirements.yaml
@@ -0,0 +1,7 @@
+dependencies:
+- name: mariadb
+ version: 4.x.x
+ repository: https://charts.helm.sh/stable/
+ condition: mariadb.enabled
+ tags:
+ - wordpress-database
diff --git a/helm/pkg/action/testdata/charts/chart-with-no-templates-dir/Chart.yaml b/helm/pkg/action/testdata/charts/chart-with-no-templates-dir/Chart.yaml
new file mode 100644
index 000000000..d3458f6a2
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-no-templates-dir/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: chart-with-no-templates-dir
+description: an example chart
+version: 199.44.12345-Alpha.1+cafe009
+icon: http://riverrun.io
diff --git a/helm/pkg/action/testdata/charts/chart-with-schema-negative/Chart.yaml b/helm/pkg/action/testdata/charts/chart-with-schema-negative/Chart.yaml
new file mode 100644
index 000000000..395d24f6a
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-schema-negative/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: Empty testing chart
+home: https://k8s.io/helm
+name: empty
+sources:
+- https://github.com/kubernetes/helm
+version: 0.1.0
diff --git a/helm/pkg/action/testdata/charts/chart-with-schema-negative/templates/empty.yaml b/helm/pkg/action/testdata/charts/chart-with-schema-negative/templates/empty.yaml
new file mode 100644
index 000000000..c80812f6e
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-schema-negative/templates/empty.yaml
@@ -0,0 +1 @@
+# This file is intentionally blank
diff --git a/helm/pkg/action/testdata/charts/chart-with-schema-negative/values.schema.json b/helm/pkg/action/testdata/charts/chart-with-schema-negative/values.schema.json
new file mode 100644
index 000000000..4df89bbe8
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-schema-negative/values.schema.json
@@ -0,0 +1,67 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "properties": {
+ "addresses": {
+ "description": "List of addresses",
+ "items": {
+ "properties": {
+ "city": {
+ "type": "string"
+ },
+ "number": {
+ "type": "number"
+ },
+ "street": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "age": {
+ "description": "Age",
+ "minimum": 0,
+ "type": "integer"
+ },
+ "employmentInfo": {
+ "properties": {
+ "salary": {
+ "minimum": 0,
+ "type": "number"
+ },
+ "title": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "salary"
+ ],
+ "type": "object"
+ },
+ "firstname": {
+ "description": "First name",
+ "type": "string"
+ },
+ "lastname": {
+ "type": "string"
+ },
+ "likesCoffee": {
+ "type": "boolean"
+ },
+ "phoneNumbers": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "firstname",
+ "lastname",
+ "addresses",
+ "employmentInfo"
+ ],
+ "title": "Values",
+ "type": "object"
+}
diff --git a/helm/pkg/action/testdata/charts/chart-with-schema-negative/values.yaml b/helm/pkg/action/testdata/charts/chart-with-schema-negative/values.yaml
new file mode 100644
index 000000000..5a1250bff
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-schema-negative/values.yaml
@@ -0,0 +1,14 @@
+firstname: John
+lastname: Doe
+age: -5
+likesCoffee: true
+addresses:
+ - city: Springfield
+ street: Main
+ number: 12345
+ - city: New York
+ street: Broadway
+ number: 67890
+phoneNumbers:
+ - "(888) 888-8888"
+ - "(555) 555-5555"
diff --git a/helm/pkg/action/testdata/charts/chart-with-schema/Chart.yaml b/helm/pkg/action/testdata/charts/chart-with-schema/Chart.yaml
new file mode 100644
index 000000000..395d24f6a
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-schema/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: Empty testing chart
+home: https://k8s.io/helm
+name: empty
+sources:
+- https://github.com/kubernetes/helm
+version: 0.1.0
diff --git a/helm/pkg/action/testdata/charts/chart-with-schema/extra-values.yaml b/helm/pkg/action/testdata/charts/chart-with-schema/extra-values.yaml
new file mode 100644
index 000000000..76c290c4f
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-schema/extra-values.yaml
@@ -0,0 +1,2 @@
+age: -5
+employmentInfo: null
diff --git a/helm/pkg/action/testdata/charts/chart-with-schema/templates/empty.yaml b/helm/pkg/action/testdata/charts/chart-with-schema/templates/empty.yaml
new file mode 100644
index 000000000..c80812f6e
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-schema/templates/empty.yaml
@@ -0,0 +1 @@
+# This file is intentionally blank
diff --git a/helm/pkg/action/testdata/charts/chart-with-schema/values.schema.json b/helm/pkg/action/testdata/charts/chart-with-schema/values.schema.json
new file mode 100644
index 000000000..4df89bbe8
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-schema/values.schema.json
@@ -0,0 +1,67 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "properties": {
+ "addresses": {
+ "description": "List of addresses",
+ "items": {
+ "properties": {
+ "city": {
+ "type": "string"
+ },
+ "number": {
+ "type": "number"
+ },
+ "street": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "age": {
+ "description": "Age",
+ "minimum": 0,
+ "type": "integer"
+ },
+ "employmentInfo": {
+ "properties": {
+ "salary": {
+ "minimum": 0,
+ "type": "number"
+ },
+ "title": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "salary"
+ ],
+ "type": "object"
+ },
+ "firstname": {
+ "description": "First name",
+ "type": "string"
+ },
+ "lastname": {
+ "type": "string"
+ },
+ "likesCoffee": {
+ "type": "boolean"
+ },
+ "phoneNumbers": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "firstname",
+ "lastname",
+ "addresses",
+ "employmentInfo"
+ ],
+ "title": "Values",
+ "type": "object"
+}
diff --git a/helm/pkg/action/testdata/charts/chart-with-schema/values.yaml b/helm/pkg/action/testdata/charts/chart-with-schema/values.yaml
new file mode 100644
index 000000000..042dea664
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-schema/values.yaml
@@ -0,0 +1,17 @@
+firstname: John
+lastname: Doe
+age: 25
+likesCoffee: true
+employmentInfo:
+ title: Software Developer
+ salary: 100000
+addresses:
+ - city: Springfield
+ street: Main
+ number: 12345
+ - city: New York
+ street: Broadway
+ number: 67890
+phoneNumbers:
+ - "(888) 888-8888"
+ - "(555) 555-5555"
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies-2.1.8.tgz b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies-2.1.8.tgz
new file mode 100644
index 000000000..ad9e68179
Binary files /dev/null and b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies-2.1.8.tgz differ
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/.helmignore b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/.helmignore
new file mode 100755
index 000000000..e2cf7941f
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/.helmignore
@@ -0,0 +1,5 @@
+.git
+# OWNERS file for Kubernetes
+OWNERS
+# example production yaml
+values-production.yaml
\ No newline at end of file
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/Chart.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/Chart.yaml
new file mode 100755
index 000000000..4d8569c89
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/Chart.yaml
@@ -0,0 +1,20 @@
+appVersion: 4.9.8
+description: Web publishing platform for building blogs and websites.
+engine: gotpl
+home: http://www.wordpress.com/
+icon: https://bitnami.com/assets/stacks/wordpress/img/wordpress-stack-220x234.png
+keywords:
+- wordpress
+- cms
+- blog
+- http
+- web
+- application
+- php
+maintainers:
+- email: containers@bitnami.com
+ name: bitnami-bot
+name: chart-with-uncompressed-dependencies
+sources:
+- https://github.com/bitnami/bitnami-docker-wordpress
+version: 2.1.8
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/README.md b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/README.md
new file mode 100755
index 000000000..341a1ad93
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/README.md
@@ -0,0 +1,3 @@
+# WordPress
+
+This is a testing mock, and is not operational.
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/.helmignore b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/.helmignore
new file mode 100755
index 000000000..6b8710a71
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/.helmignore
@@ -0,0 +1 @@
+.git
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/Chart.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/Chart.yaml
new file mode 100755
index 000000000..cefc15836
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/Chart.yaml
@@ -0,0 +1,21 @@
+appVersion: 10.1.34
+description: Fast, reliable, scalable, and easy to use open-source relational database
+ system. MariaDB Server is intended for mission-critical, heavy-load production systems
+ as well as for embedding into mass-deployed software. Highly available MariaDB cluster.
+engine: gotpl
+home: https://mariadb.org
+icon: https://bitnami.com/assets/stacks/mariadb/img/mariadb-stack-220x234.png
+keywords:
+- mariadb
+- mysql
+- database
+- sql
+- prometheus
+maintainers:
+- email: containers@bitnami.com
+ name: bitnami-bot
+name: mariadb
+sources:
+- https://github.com/bitnami/bitnami-docker-mariadb
+- https://github.com/prometheus/mysqld_exporter
+version: 4.3.1
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/README.md b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/README.md
new file mode 100755
index 000000000..3463b8b6d
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/README.md
@@ -0,0 +1,143 @@
+# MariaDB
+
+[MariaDB](https://mariadb.org) is one of the most popular database servers in the world. It’s made by the original developers of MySQL and guaranteed to stay open source. Notable users include Wikipedia, Facebook and Google.
+
+MariaDB is developed as open source software and as a relational database it provides an SQL interface for accessing data. The latest versions of MariaDB also include GIS and JSON features.
+
+## TL;DR
+
+```bash
+$ helm install stable/mariadb
+```
+
+## Introduction
+
+This chart bootstraps a [MariaDB](https://github.com/bitnami/bitnami-docker-mariadb) replication cluster deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+## Prerequisites
+
+- Kubernetes 1.4+ with Beta APIs enabled
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```bash
+$ helm install --name my-release stable/mariadb
+```
+
+The command deploys MariaDB on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```bash
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Configuration
+
+The following table lists the configurable parameters of the MariaDB chart and their default values.
+
+| Parameter | Description | Default |
+|-------------------------------------------|-----------------------------------------------------|-------------------------------------------------------------------|
+| `image.registry` | MariaDB image registry | `docker.io` |
+| `image.repository` | MariaDB Image name | `bitnami/mariadb` |
+| `image.tag` | MariaDB Image tag | `{VERSION}` |
+| `image.pullPolicy` | MariaDB image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` |
+| `image.pullSecrets` | Specify image pull secrets | `nil` (does not add image pull secrets to deployed pods) |
+| `service.type` | Kubernetes service type | `ClusterIP` |
+| `service.port` | MySQL service port | `3306` |
+| `rootUser.password` | Password for the `root` user | _random 10 character alphanumeric string_ |
+| `rootUser.forcePassword` | Force users to specify a password | `false` |
+| `db.user` | Username of new user to create | `nil` |
+| `db.password` | Password for the new user | _random 10 character alphanumeric string if `db.user` is defined_ |
+| `db.name` | Name for new database to create | `my_database` |
+| `replication.enabled` | MariaDB replication enabled | `true` |
+| `replication.user` | MariaDB replication user | `replicator` |
+| `replication.password` | MariaDB replication user password | _random 10 character alphanumeric string_ |
+| `master.antiAffinity` | Master pod anti-affinity policy | `soft` |
+| `master.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` |
+| `master.persistence.annotations` | Persistent Volume Claim annotations | `{}` |
+| `master.persistence.storageClass` | Persistent Volume Storage Class | `` |
+| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` |
+| `master.persistence.size` | Persistent Volume Size | `8Gi` |
+| `master.config` | Config file for the MariaDB Master server | `_default values in the values.yaml file_` |
+| `master.resources` | CPU/Memory resource requests/limits for master node | `{}` |
+| `master.livenessProbe.enabled` | Turn on and off liveness probe (master) | `true` |
+| `master.livenessProbe.initialDelaySeconds`| Delay before liveness probe is initiated (master) | `120` |
+| `master.livenessProbe.periodSeconds` | How often to perform the probe (master) | `10` |
+| `master.livenessProbe.timeoutSeconds` | When the probe times out (master) | `1` |
+| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe (master)| `1` |
+| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe (master) | `3` |
+| `master.readinessProbe.enabled` | Turn on and off readiness probe (master) | `true` |
+| `master.readinessProbe.initialDelaySeconds`| Delay before readiness probe is initiated (master) | `15` |
+| `master.readinessProbe.periodSeconds` | How often to perform the probe (master) | `10` |
+| `master.readinessProbe.timeoutSeconds` | When the probe times out (master) | `1` |
+| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe (master)| `1` |
+| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe (master) | `3` |
+| `slave.replicas` | Desired number of slave replicas | `1` |
+| `slave.antiAffinity` | Slave pod anti-affinity policy | `soft` |
+| `slave.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` |
+| `slave.persistence.annotations` | Persistent Volume Claim annotations | `{}` |
+| `slave.persistence.storageClass` | Persistent Volume Storage Class | `` |
+| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` |
+| `slave.persistence.size` | Persistent Volume Size | `8Gi` |
+| `slave.config` | Config file for the MariaDB Slave replicas | `_default values in the values.yaml file_` |
+| `slave.resources` | CPU/Memory resource requests/limits for slave node | `{}` |
+| `slave.livenessProbe.enabled` | Turn on and off liveness probe (slave) | `true` |
+| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (slave) | `120` |
+| `slave.livenessProbe.periodSeconds` | How often to perform the probe (slave) | `10` |
+| `slave.livenessProbe.timeoutSeconds` | When the probe times out (slave) | `1` |
+| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe (slave) | `1` |
+| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe (slave) | `3` |
+| `slave.readinessProbe.enabled` | Turn on and off readiness probe (slave) | `true` |
+| `slave.readinessProbe.initialDelaySeconds`| Delay before readiness probe is initiated (slave) | `15` |
+| `slave.readinessProbe.periodSeconds` | How often to perform the probe (slave) | `10` |
+| `slave.readinessProbe.timeoutSeconds` | When the probe times out (slave) | `1` |
+| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe (slave) | `1` |
+| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe (slave) | `3` |
+| `metrics.enabled` | Start a side-car prometheus exporter | `false` |
+| `metrics.image.registry` | Exporter image registry | `docker.io` |
+`metrics.image.repository` | Exporter image name | `prom/mysqld-exporter` |
+| `metrics.image.tag` | Exporter image tag | `v0.10.0` |
+| `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` |
+| `metrics.resources` | Exporter resource requests/limit | `nil` |
+
+The above parameters map to the env variables defined in [bitnami/mariadb](http://github.com/bitnami/bitnami-docker-mariadb). For more information please refer to the [bitnami/mariadb](http://github.com/bitnami/bitnami-docker-mariadb) image documentation.
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```bash
+$ helm install --name my-release \
+ --set root.password=secretpassword,user.database=app_database \
+ stable/mariadb
+```
+
+The above command sets the MariaDB `root` account password to `secretpassword`. Additionally it creates a database named `my_database`.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```bash
+$ helm install --name my-release -f values.yaml stable/mariadb
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Initialize a fresh instance
+
+The [Bitnami MariaDB](https://github.com/bitnami/bitnami-docker-mariadb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap.
+
+The allowed extensions are `.sh`, `.sql` and `.sql.gz`.
+
+## Persistence
+
+The [Bitnami MariaDB](https://github.com/bitnami/bitnami-docker-mariadb) image stores the MariaDB data and configurations at the `/bitnami/mariadb` path of the container.
+
+The chart mounts a [Persistent Volume](kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning, by default. An existing PersistentVolumeClaim can be defined.
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/files/docker-entrypoint-initdb.d/README.md b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/files/docker-entrypoint-initdb.d/README.md
new file mode 100755
index 000000000..aaddde303
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/files/docker-entrypoint-initdb.d/README.md
@@ -0,0 +1,3 @@
+You can copy here your custom .sh, .sql or .sql.gz file so they are executed during the first boot of the image.
+
+More info in the [bitnami-docker-mariadb](https://github.com/bitnami/bitnami-docker-mariadb#initializing-a-new-instance) repository.
\ No newline at end of file
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/NOTES.txt b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/NOTES.txt
new file mode 100755
index 000000000..4ba3b668a
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/NOTES.txt
@@ -0,0 +1,35 @@
+
+Please be patient while the chart is being deployed
+
+Tip:
+
+ Watch the deployment status using the command: kubectl get pods -w --namespace {{ .Release.Namespace }} -l release={{ .Release.Name }}
+
+Services:
+
+ echo Master: {{ template "mariadb.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.port }}
+{{- if .Values.replication.enabled }}
+ echo Slave: {{ template "slave.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.port }}
+{{- end }}
+
+Administrator credentials:
+
+ Username: root
+ Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mariadb.fullname" . }} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode)
+
+To connect to your database
+
+ 1. Run a pod that you can use as a client:
+
+ kubectl run {{ template "mariadb.fullname" . }}-client --rm --tty -i --image {{ template "mariadb.image" . }} --namespace {{ .Release.Namespace }} --command -- bash
+
+ 2. To connect to master service (read/write):
+
+ mysql -h {{ template "mariadb.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local -uroot -p {{ .Values.db.name }}
+
+{{- if .Values.replication.enabled }}
+
+ 3. To connect to slave service (read-only):
+
+ mysql -h {{ template "slave.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local -uroot -p {{ .Values.db.name }}
+{{- end }}
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/_helpers.tpl b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/_helpers.tpl
new file mode 100755
index 000000000..5afe380ff
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/_helpers.tpl
@@ -0,0 +1,53 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "mariadb.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "mariadb.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "master.fullname" -}}
+{{- if .Values.replication.enabled -}}
+{{- printf "%s-%s" .Release.Name "mariadb-master" | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name "mariadb" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+
+{{- define "slave.fullname" -}}
+{{- printf "%s-%s" .Release.Name "mariadb-slave" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "mariadb.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Return the proper image name
+*/}}
+{{- define "mariadb.image" -}}
+{{- $registryName := .Values.image.registry -}}
+{{- $repositoryName := .Values.image.repository -}}
+{{- $tag := .Values.image.tag | toString -}}
+{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+
+{{/*
+Return the proper image name
+*/}}
+{{- define "metrics.image" -}}
+{{- $registryName := .Values.metrics.image.registry -}}
+{{- $repositoryName := .Values.metrics.image.repository -}}
+{{- $tag := .Values.metrics.image.tag | toString -}}
+{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/initialization-configmap.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/initialization-configmap.yaml
new file mode 100755
index 000000000..7bb969627
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/initialization-configmap.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "master.fullname" . }}-init-scripts
+ labels:
+ app: {{ template "mariadb.name" . }}
+ component: "master"
+ chart: {{ template "mariadb.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+data:
+{{ (.Files.Glob "files/docker-entrypoint-initdb.d/*").AsConfig | indent 2 }}
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/master-configmap.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/master-configmap.yaml
new file mode 100755
index 000000000..880a10198
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/master-configmap.yaml
@@ -0,0 +1,15 @@
+{{- if .Values.master.config }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "master.fullname" . }}
+ labels:
+ app: {{ template "mariadb.name" . }}
+ component: "master"
+ chart: {{ template "mariadb.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+data:
+ my.cnf: |-
+{{ .Values.master.config | indent 4 }}
+{{- end -}}
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/master-statefulset.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/master-statefulset.yaml
new file mode 100755
index 000000000..0d74f01ff
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/master-statefulset.yaml
@@ -0,0 +1,187 @@
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+ name: {{ template "master.fullname" . }}
+ labels:
+ app: "{{ template "mariadb.name" . }}"
+ chart: {{ template "mariadb.chart" . }}
+ component: "master"
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ serviceName: "{{ template "master.fullname" . }}"
+ replicas: 1
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: "{{ template "mariadb.name" . }}"
+ component: "master"
+ release: "{{ .Release.Name }}"
+ chart: {{ template "mariadb.chart" . }}
+ spec:
+ securityContext:
+ runAsUser: 1001
+ fsGroup: 1001
+ {{- if eq .Values.master.antiAffinity "hard" }}
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: "kubernetes.io/hostname"
+ labelSelector:
+ matchLabels:
+ app: "{{ template "mariadb.name" . }}"
+ release: "{{ .Release.Name }}"
+ {{- else if eq .Values.master.antiAffinity "soft" }}
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 1
+ podAffinityTerm:
+ topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ app: "{{ template "mariadb.name" . }}"
+ release: "{{ .Release.Name }}"
+ {{- end }}
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end}}
+ {{- end }}
+ containers:
+ - name: "mariadb"
+ image: {{ template "mariadb.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ env:
+ - name: MARIADB_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "mariadb.fullname" . }}
+ key: mariadb-root-password
+ {{- if .Values.db.user }}
+ - name: MARIADB_USER
+ value: "{{ .Values.db.user }}"
+ - name: MARIADB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "mariadb.fullname" . }}
+ key: mariadb-password
+ {{- end }}
+ - name: MARIADB_DATABASE
+ value: "{{ .Values.db.name }}"
+ {{- if .Values.replication.enabled }}
+ - name: MARIADB_REPLICATION_MODE
+ value: "master"
+ - name: MARIADB_REPLICATION_USER
+ value: "{{ .Values.replication.user }}"
+ - name: MARIADB_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "mariadb.fullname" . }}
+ key: mariadb-replication-password
+ {{- end }}
+ ports:
+ - name: mysql
+ containerPort: 3306
+ {{- if .Values.master.livenessProbe.enabled }}
+ livenessProbe:
+ exec:
+ command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
+ initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.master.readinessProbe.enabled }}
+ readinessProbe:
+ exec:
+ command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
+ initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.master.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }}
+ {{- end }}
+ resources:
+{{ toYaml .Values.master.resources | indent 10 }}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/mariadb
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d
+{{- if .Values.master.config }}
+ - name: config
+ mountPath: /opt/bitnami/mariadb/conf/my.cnf
+ subPath: my.cnf
+{{- end }}
+{{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ template "metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ env:
+ - name: MARIADB_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "mariadb.fullname" . }}
+ key: mariadb-root-password
+ command: [ 'sh', '-c', 'DATA_SOURCE_NAME="root:$MARIADB_ROOT_PASSWORD@(localhost:3306)/" /bin/mysqld_exporter' ]
+ ports:
+ - name: metrics
+ containerPort: 9104
+ livenessProbe:
+ httpGet:
+ path: /metrics
+ port: metrics
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ readinessProbe:
+ httpGet:
+ path: /metrics
+ port: metrics
+ initialDelaySeconds: 5
+ timeoutSeconds: 1
+ resources:
+{{ toYaml .Values.metrics.resources | indent 10 }}
+{{- end }}
+ volumes:
+ {{- if .Values.master.config }}
+ - name: config
+ configMap:
+ name: {{ template "master.fullname" . }}
+ {{- end }}
+ - name: custom-init-scripts
+ configMap:
+ name: {{ template "master.fullname" . }}-init-scripts
+{{- if .Values.master.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ labels:
+ app: "{{ template "mariadb.name" . }}"
+ chart: {{ template "mariadb.chart" . }}
+ component: "master"
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+ spec:
+ accessModes:
+ {{- range .Values.master.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.master.persistence.size | quote }}
+ {{- if .Values.master.persistence.storageClass }}
+ {{- if (eq "-" .Values.master.persistence.storageClass) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: {{ .Values.master.persistence.storageClass | quote }}
+ {{- end }}
+ {{- end }}
+{{- else }}
+ - name: "data"
+ emptyDir: {}
+{{- end }}
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/master-svc.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/master-svc.yaml
new file mode 100755
index 000000000..460ec328e
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/master-svc.yaml
@@ -0,0 +1,29 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "mariadb.fullname" . }}
+ labels:
+ app: "{{ template "mariadb.name" . }}"
+ component: "master"
+ chart: {{ template "mariadb.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+{{- if .Values.metrics.enabled }}
+ annotations:
+{{ toYaml .Values.metrics.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - name: mysql
+ port: {{ .Values.service.port }}
+ targetPort: mysql
+{{- if .Values.metrics.enabled }}
+ - name: metrics
+ port: 9104
+ targetPort: metrics
+{{- end }}
+ selector:
+ app: "{{ template "mariadb.name" . }}"
+ component: "master"
+ release: "{{ .Release.Name }}"
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/secrets.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/secrets.yaml
new file mode 100755
index 000000000..17999d609
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/secrets.yaml
@@ -0,0 +1,38 @@
+{{- if (not .Values.rootUser.existingSecret) -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "mariadb.fullname" . }}
+ labels:
+ app: "{{ template "mariadb.name" . }}"
+ chart: {{ template "mariadb.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+type: Opaque
+data:
+ {{- if .Values.rootUser.password }}
+ mariadb-root-password: "{{ .Values.rootUser.password | b64enc }}"
+ {{- else if (not .Values.rootUser.forcePassword) }}
+ mariadb-root-password: "{{ randAlphaNum 10 | b64enc }}"
+ {{ else }}
+ mariadb-root-password: {{ required "A MariaDB Root Password is required!" .Values.rootUser.password }}
+ {{- end }}
+ {{- if .Values.db.user }}
+ {{- if .Values.db.password }}
+ mariadb-password: "{{ .Values.db.password | b64enc }}"
+ {{- else if (not .Values.db.forcePassword) }}
+ mariadb-password: "{{ randAlphaNum 10 | b64enc }}"
+ {{- else }}
+ mariadb-password: {{ required "A MariaDB Database Password is required!" .Values.db.password }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.replication.enabled }}
+ {{- if .Values.replication.password }}
+ mariadb-replication-password: "{{ .Values.replication.password | b64enc }}"
+ {{- else if (not .Values.replication.forcePassword) }}
+ mariadb-replication-password: "{{ randAlphaNum 10 | b64enc }}"
+ {{- else }}
+ mariadb-replication-password: {{ required "A MariaDB Replication Password is required!" .Values.replication.password }}
+ {{- end }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/slave-configmap.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/slave-configmap.yaml
new file mode 100755
index 000000000..056cf5c07
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/slave-configmap.yaml
@@ -0,0 +1,15 @@
+{{- if and .Values.replication.enabled .Values.slave.config }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "slave.fullname" . }}
+ labels:
+ app: {{ template "mariadb.name" . }}
+ component: "slave"
+ chart: {{ template "mariadb.chart" . }}
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+data:
+ my.cnf: |-
+{{ .Values.slave.config | indent 4 }}
+{{- end }}
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/slave-statefulset.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/slave-statefulset.yaml
new file mode 100755
index 000000000..aa67d4a70
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/slave-statefulset.yaml
@@ -0,0 +1,193 @@
+{{- if .Values.replication.enabled }}
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+ name: {{ template "slave.fullname" . }}
+ labels:
+ app: "{{ template "mariadb.name" . }}"
+ chart: {{ template "mariadb.chart" . }}
+ component: "slave"
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ serviceName: "{{ template "slave.fullname" . }}"
+ replicas: {{ .Values.slave.replicas }}
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: "{{ template "mariadb.name" . }}"
+ component: "slave"
+ release: "{{ .Release.Name }}"
+ chart: {{ template "mariadb.chart" . }}
+ spec:
+ securityContext:
+ runAsUser: 1001
+ fsGroup: 1001
+ {{- if eq .Values.slave.antiAffinity "hard" }}
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: "kubernetes.io/hostname"
+ labelSelector:
+ matchLabels:
+ app: "{{ template "mariadb.name" . }}"
+ release: "{{ .Release.Name }}"
+ {{- else if eq .Values.slave.antiAffinity "soft" }}
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 1
+ podAffinityTerm:
+ topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ app: "{{ template "mariadb.name" . }}"
+ release: "{{ .Release.Name }}"
+ {{- end }}
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end}}
+ {{- end }}
+ containers:
+ - name: "mariadb"
+ image: {{ template "mariadb.image" . }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ env:
+ - name: MARIADB_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "mariadb.fullname" . }}
+ key: mariadb-root-password
+ {{- if .Values.db.user }}
+ - name: MARIADB_USER
+ value: "{{ .Values.db.user }}"
+ - name: MARIADB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "mariadb.fullname" . }}
+ key: mariadb-password
+ {{- end }}
+ - name: MARIADB_DATABASE
+ value: "{{ .Values.db.name }}"
+ - name: MARIADB_REPLICATION_MODE
+ value: "slave"
+ - name: MARIADB_MASTER_HOST
+ value: {{ template "mariadb.fullname" . }}
+ - name: MARIADB_MASTER_PORT
+ value: "3306"
+ - name: MARIADB_MASTER_USER
+ value: "root"
+ - name: MARIADB_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "mariadb.fullname" . }}
+ key: mariadb-root-password
+ - name: MARIADB_REPLICATION_USER
+ value: "{{ .Values.replication.user }}"
+ - name: MARIADB_REPLICATION_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "mariadb.fullname" . }}
+ key: mariadb-replication-password
+ ports:
+ - name: mysql
+ containerPort: 3306
+ {{- if .Values.slave.livenessProbe.enabled }}
+ livenessProbe:
+ exec:
+ command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
+ initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.slave.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.slave.readinessProbe.enabled }}
+ readinessProbe:
+ exec:
+ command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
+ initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.slave.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }}
+ {{- end }}
+ resources:
+{{ toYaml .Values.slave.resources | indent 10 }}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/mariadb
+{{- if .Values.slave.config }}
+ - name: config
+ mountPath: /opt/bitnami/mariadb/conf/my.cnf
+ subPath: my.cnf
+{{- end }}
+{{- if .Values.metrics.enabled }}
+ - name: metrics
+ image: {{ template "metrics.image" . }}
+ imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+ env:
+ - name: MARIADB_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "mariadb.fullname" . }}
+ key: mariadb-root-password
+ command: [ 'sh', '-c', 'DATA_SOURCE_NAME="root:$MARIADB_ROOT_PASSWORD@(localhost:3306)/" /bin/mysqld_exporter' ]
+ ports:
+ - name: metrics
+ containerPort: 9104
+ livenessProbe:
+ httpGet:
+ path: /metrics
+ port: metrics
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ readinessProbe:
+ httpGet:
+ path: /metrics
+ port: metrics
+ initialDelaySeconds: 5
+ timeoutSeconds: 1
+ resources:
+{{ toYaml .Values.metrics.resources | indent 10 }}
+{{- end }}
+ volumes:
+ {{- if .Values.slave.config }}
+ - name: config
+ configMap:
+ name: {{ template "slave.fullname" . }}
+ {{- end }}
+{{- if .Values.slave.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ labels:
+ app: "{{ template "mariadb.name" . }}"
+ chart: {{ template "mariadb.chart" . }}
+ component: "slave"
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+ spec:
+ accessModes:
+ {{- range .Values.slave.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.slave.persistence.size | quote }}
+ {{- if .Values.slave.persistence.storageClass }}
+ {{- if (eq "-" .Values.slave.persistence.storageClass) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: {{ .Values.slave.persistence.storageClass | quote }}
+ {{- end }}
+ {{- end }}
+{{- else }}
+ - name: "data"
+ emptyDir: {}
+{{- end }}
+{{- end }}
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/slave-svc.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/slave-svc.yaml
new file mode 100755
index 000000000..fa551371f
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/slave-svc.yaml
@@ -0,0 +1,31 @@
+{{- if .Values.replication.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "slave.fullname" . }}
+ labels:
+ app: "{{ template "mariadb.name" . }}"
+ chart: {{ template "mariadb.chart" . }}
+ component: "slave"
+ release: {{ .Release.Name | quote }}
+ heritage: {{ .Release.Service | quote }}
+{{- if .Values.metrics.enabled }}
+ annotations:
+{{ toYaml .Values.metrics.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - name: mysql
+ port: {{ .Values.service.port }}
+ targetPort: mysql
+{{- if .Values.metrics.enabled }}
+ - name: metrics
+ port: 9104
+ targetPort: metrics
+{{- end }}
+ selector:
+ app: "{{ template "mariadb.name" . }}"
+ component: "slave"
+ release: "{{ .Release.Name }}"
+{{- end }}
\ No newline at end of file
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/test-runner.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/test-runner.yaml
new file mode 100755
index 000000000..99a85d4aa
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/test-runner.yaml
@@ -0,0 +1,44 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ template "mariadb.fullname" . }}-test-{{ randAlphaNum 5 | lower }}"
+ annotations:
+ "helm.sh/hook": test-success
+spec:
+ initContainers:
+ - name: "test-framework"
+ image: "dduportal/bats:0.4.0"
+ command:
+ - "bash"
+ - "-c"
+ - |
+ set -ex
+ # copy bats to tools dir
+ cp -R /usr/local/libexec/ /tools/bats/
+ volumeMounts:
+ - mountPath: /tools
+ name: tools
+ containers:
+ - name: mariadb-test
+ image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ command: ["/tools/bats/bats", "-t", "/tests/run.sh"]
+ env:
+ - name: MARIADB_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "mariadb.fullname" . }}
+ key: mariadb-root-password
+ volumeMounts:
+ - mountPath: /tests
+ name: tests
+ readOnly: true
+ - mountPath: /tools
+ name: tools
+ volumes:
+ - name: tests
+ configMap:
+ name: {{ template "mariadb.fullname" . }}-tests
+ - name: tools
+ emptyDir: {}
+ restartPolicy: Never
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/tests.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/tests.yaml
new file mode 100755
index 000000000..957f3fd1e
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/templates/tests.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "mariadb.fullname" . }}-tests
+data:
+ run.sh: |-
+ @test "Testing MariaDB is accessible" {
+ mysql -h {{ template "mariadb.fullname" . }} -uroot -p$MARIADB_ROOT_PASSWORD -e 'show databases;'
+ }
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/values.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/values.yaml
new file mode 100755
index 000000000..ce2414e9f
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/charts/mariadb/values.yaml
@@ -0,0 +1,233 @@
+## Bitnami MariaDB image
+## ref: https://hub.docker.com/r/bitnami/mariadb/tags/
+##
+image:
+ registry: docker.io
+ repository: bitnami/mariadb
+ tag: 10.1.34-debian-9
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+service:
+ ## Kubernetes service type
+ type: ClusterIP
+ port: 3306
+
+rootUser:
+ ## MariaDB admin password
+ ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-the-root-password-on-first-run
+ ##
+ password:
+ ## Use existing secret (ignores root, db and replication passwords)
+ # existingSecret:
+ ##
+ ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
+ ## If it is not force, a random password will be generated.
+ forcePassword: false
+
+db:
+ ## MariaDB username and password
+ ## ref: https://github.com/bitnami/bitnami-docker-mariadb#creating-a-database-user-on-first-run
+ ##
+ user:
+ password:
+ ## Password is ignored if existingSecret is specified.
+ ## Database to create
+ ## ref: https://github.com/bitnami/bitnami-docker-mariadb#creating-a-database-on-first-run
+ ##
+ name: my_database
+ ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
+ ## If it is not force, a random password will be generated.
+ forcePassword: false
+
+replication:
+ ## Enable replication. This enables the creation of replicas of MariaDB. If false, only a
+ ## master deployment would be created
+ enabled: true
+ ##
+ ## MariaDB replication user
+ ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster
+ ##
+ user: replicator
+ ## MariaDB replication user password
+ ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster
+ ##
+ password:
+ ## Password is ignored if existingSecret is specified.
+ ##
+ ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
+ ## If it is not force, a random password will be generated.
+ forcePassword: false
+
+master:
+ antiAffinity: soft
+ ## Enable persistence using Persistent Volume Claims
+ ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+ ##
+ persistence:
+ ## If true, use a Persistent Volume Claim, If false, use emptyDir
+ ##
+ enabled: true
+ ## Persistent Volume Storage Class
+ ## If defined, storageClassName:
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+ ## Persistent Volume Claim annotations
+ ##
+ annotations:
+ ## Persistent Volume Access Mode
+ ##
+ accessModes:
+ - ReadWriteOnce
+ ## Persistent Volume size
+ ##
+ size: 8Gi
+ ##
+
+ ## Configure MySQL with a custom my.cnf file
+ ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file
+ ##
+ config: |-
+ [mysqld]
+ skip-name-resolve
+ explicit_defaults_for_timestamp
+ basedir=/opt/bitnami/mariadb
+ port=3306
+ socket=/opt/bitnami/mariadb/tmp/mysql.sock
+ tmpdir=/opt/bitnami/mariadb/tmp
+ max_allowed_packet=16M
+ bind-address=0.0.0.0
+ pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid
+ log-error=/opt/bitnami/mariadb/logs/mysqld.log
+ character-set-server=UTF8
+ collation-server=utf8_general_ci
+
+ [client]
+ port=3306
+ socket=/opt/bitnami/mariadb/tmp/mysql.sock
+ default-character-set=UTF8
+
+ [manager]
+ port=3306
+ socket=/opt/bitnami/mariadb/tmp/mysql.sock
+ pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid
+
+ ## Configure master resource requests and limits
+ ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ ##
+ resources: {}
+ livenessProbe:
+ enabled: true
+ ##
+ ## Initializing the database could take some time
+ initialDelaySeconds: 120
+ ##
+ ## Default Kubernetes values
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 15
+ ##
+ ## Default Kubernetes values
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+slave:
+ replicas: 1
+ antiAffinity: soft
+ persistence:
+ ## If true, use a Persistent Volume Claim, If false, use emptyDir
+ ##
+ enabled: true
+ # storageClass: "-"
+ annotations:
+ accessModes:
+ - ReadWriteOnce
+ ## Persistent Volume size
+ ##
+ size: 8Gi
+ ##
+
+ ## Configure MySQL slave with a custom my.cnf file
+ ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file
+ ##
+ config: |-
+ [mysqld]
+ skip-name-resolve
+ explicit_defaults_for_timestamp
+ basedir=/opt/bitnami/mariadb
+ port=3306
+ socket=/opt/bitnami/mariadb/tmp/mysql.sock
+ tmpdir=/opt/bitnami/mariadb/tmp
+ max_allowed_packet=16M
+ bind-address=0.0.0.0
+ pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid
+ log-error=/opt/bitnami/mariadb/logs/mysqld.log
+ character-set-server=UTF8
+ collation-server=utf8_general_ci
+
+ [client]
+ port=3306
+ socket=/opt/bitnami/mariadb/tmp/mysql.sock
+ default-character-set=UTF8
+
+ [manager]
+ port=3306
+ socket=/opt/bitnami/mariadb/tmp/mysql.sock
+ pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid
+
+ ##
+ ## Configure slave resource requests and limits
+ ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ ##
+ resources: {}
+ livenessProbe:
+ enabled: true
+ ##
+ ## Initializing the database could take some time
+ initialDelaySeconds: 120
+ ##
+ ## Default Kubernetes values
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 15
+ ##
+ ## Default Kubernetes values
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+metrics:
+ enabled: false
+ image:
+ registry: docker.io
+ repository: prom/mysqld-exporter
+ tag: v0.10.0
+ pullPolicy: IfNotPresent
+ resources: {}
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9104"
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/requirements.lock b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/requirements.lock
new file mode 100755
index 000000000..dcda2b142
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/requirements.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: mariadb
+ repository: https://charts.helm.sh/stable/
+ version: 4.3.1
+digest: sha256:82a0e5374376169d2ecf7d452c18a2ed93507f5d17c3393a1457f9ffad7e9b26
+generated: 2018-08-02T22:07:51.905271776Z
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/requirements.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/requirements.yaml
new file mode 100755
index 000000000..fef7d0b7f
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/requirements.yaml
@@ -0,0 +1,7 @@
+dependencies:
+- name: mariadb
+ version: 4.x.x
+ repository: https://charts.helm.sh/stable/
+ condition: mariadb.enabled
+ tags:
+ - wordpress-database
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/templates/NOTES.txt b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/templates/NOTES.txt
new file mode 100755
index 000000000..75ed9b64f
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/templates/NOTES.txt
@@ -0,0 +1 @@
+Placeholder.
diff --git a/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/values.yaml b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/values.yaml
new file mode 100755
index 000000000..98c70aad4
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/chart-with-uncompressed-dependencies/values.yaml
@@ -0,0 +1,254 @@
+## Bitnami WordPress image version
+## ref: https://hub.docker.com/r/bitnami/wordpress/tags/
+##
+image:
+ registry: docker.io
+ repository: bitnami/wordpress
+ tag: 4.9.8-debian-9
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+## User of the application
+## ref: https://github.com/bitnami/bitnami-docker-wordpress#environment-variables
+##
+wordpressUsername: user
+
+## Application password
+## Defaults to a random 10-character alphanumeric string if not set
+## ref: https://github.com/bitnami/bitnami-docker-wordpress#environment-variables
+##
+# wordpressPassword:
+
+## Admin email
+## ref: https://github.com/bitnami/bitnami-docker-wordpress#environment-variables
+##
+wordpressEmail: user@example.com
+
+## First name
+## ref: https://github.com/bitnami/bitnami-docker-wordpress#environment-variables
+##
+wordpressFirstName: FirstName
+
+## Last name
+## ref: https://github.com/bitnami/bitnami-docker-wordpress#environment-variables
+##
+wordpressLastName: LastName
+
+## Blog name
+## ref: https://github.com/bitnami/bitnami-docker-wordpress#environment-variables
+##
+wordpressBlogName: User's Blog!
+
+## Table prefix
+## ref: https://github.com/bitnami/bitnami-docker-wordpress#environment-variables
+##
+wordpressTablePrefix: wp_
+
+## Set to `yes` to allow the container to be started with blank passwords
+## ref: https://github.com/bitnami/bitnami-docker-wordpress#environment-variables
+allowEmptyPassword: yes
+
+## SMTP mail delivery configuration
+## ref: https://github.com/bitnami/bitnami-docker-wordpress/#smtp-configuration
+##
+# smtpHost:
+# smtpPort:
+# smtpUser:
+# smtpPassword:
+# smtpUsername:
+# smtpProtocol:
+
+replicaCount: 1
+
+externalDatabase:
+## All of these values are only used when mariadb.enabled is set to false
+ ## Database host
+ host: localhost
+
+ ## non-root Username for WordPress Database
+ user: bn_wordpress
+
+ ## Database password
+ password: ""
+
+ ## Database name
+ database: bitnami_wordpress
+
+ ## Database port number
+ port: 3306
+
+##
+## MariaDB chart configuration
+##
+mariadb:
+ ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
+ enabled: true
+ ## Disable MariaDB replication
+ replication:
+ enabled: false
+
+ ## Create a database and a database user
+ ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
+ ##
+ db:
+ name: bitnami_wordpress
+ user: bn_wordpress
+ ## If the password is not specified, mariadb will generate a random password
+ ##
+ # password:
+
+ ## MariaDB admin password
+ ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
+ ##
+ # rootUser:
+ # password:
+
+ ## Enable persistence using Persistent Volume Claims
+ ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+ ##
+ master:
+ persistence:
+ enabled: true
+ ## mariadb data Persistent Volume Storage Class
+ ## If defined, storageClassName:
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+ accessMode: ReadWriteOnce
+ size: 8Gi
+
+## Kubernetes configuration
+## For minikube, set this to NodePort, elsewhere use LoadBalancer or ClusterIP
+##
+serviceType: LoadBalancer
+##
+## serviceType: NodePort
+## nodePorts:
+## http:
+## https:
+nodePorts:
+ http: ""
+ https: ""
+## Enable client source IP preservation
+## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+##
+serviceExternalTrafficPolicy: Cluster
+
+## Allow health checks to be pointed at the https port
+healthcheckHttps: false
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+livenessProbe:
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+readinessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+
+## Configure the ingress resource that allows you to access the
+## WordPress installation. Set up the URL
+## ref: http://kubernetes.io/docs/user-guide/ingress/
+##
+ingress:
+ ## Set to true to enable ingress record generation
+ enabled: false
+
+ ## The list of hostnames to be covered with this ingress record.
+ ## Most likely this will be just one host, but in the event more hosts are needed, this is an array
+ hosts:
+ - name: wordpress.local
+
+ ## Set this to true in order to enable TLS on the ingress record
+ ## A side effect of this will be that the backend wordpress service will be connected at port 443
+ tls: false
+
+ ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
+ tlsSecret: wordpress.local-tls
+
+ ## Ingress annotations done as key:value pairs
+ ## If you're using kube-lego, you will want to add:
+ ## kubernetes.io/tls-acme: true
+ ##
+ ## For a full list of possible ingress annotations, please see
+ ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
+ ##
+ ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
+ annotations:
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: true
+
+ secrets:
+ ## If you're providing your own certificates, please use this to add the certificates as secrets
+ ## key and certificate should start with -----BEGIN CERTIFICATE----- or
+ ## -----BEGIN RSA PRIVATE KEY-----
+ ##
+ ## name should line up with a tlsSecret set further up
+ ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
+ ##
+ ## It is also possible to create and manage the certificates outside of this helm chart
+ ## Please see README.md for more information
+ # - name: wordpress.local-tls
+ # key:
+ # certificate:
+
+## Enable persistence using Persistent Volume Claims
+## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+##
+persistence:
+ enabled: true
+ ## wordpress data Persistent Volume Storage Class
+ ## If defined, storageClassName:
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+ ##
+ ## If you want to reuse an existing claim, you can pass the name of the PVC using
+ ## the existingClaim variable
+ # existingClaim: your-claim
+ accessMode: ReadWriteOnce
+ size: 10Gi
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+##
+resources:
+ requests:
+ memory: 512Mi
+ cpu: 300m
+
+## Node labels for pod assignment
+## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+
+## Tolerations for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+
+## Affinity for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+##
+affinity: {}
diff --git a/helm/pkg/action/testdata/charts/compressedchart-0.1.0.tar.gz b/helm/pkg/action/testdata/charts/compressedchart-0.1.0.tar.gz
new file mode 100644
index 000000000..3c9c24d76
Binary files /dev/null and b/helm/pkg/action/testdata/charts/compressedchart-0.1.0.tar.gz differ
diff --git a/helm/pkg/action/testdata/charts/compressedchart-0.1.0.tgz b/helm/pkg/action/testdata/charts/compressedchart-0.1.0.tgz
new file mode 100644
index 000000000..3c9c24d76
Binary files /dev/null and b/helm/pkg/action/testdata/charts/compressedchart-0.1.0.tgz differ
diff --git a/helm/pkg/action/testdata/charts/compressedchart-0.2.0.tgz b/helm/pkg/action/testdata/charts/compressedchart-0.2.0.tgz
new file mode 100644
index 000000000..16a644a79
Binary files /dev/null and b/helm/pkg/action/testdata/charts/compressedchart-0.2.0.tgz differ
diff --git a/helm/pkg/action/testdata/charts/compressedchart-0.3.0.tgz b/helm/pkg/action/testdata/charts/compressedchart-0.3.0.tgz
new file mode 100644
index 000000000..051bd6fd9
Binary files /dev/null and b/helm/pkg/action/testdata/charts/compressedchart-0.3.0.tgz differ
diff --git a/helm/pkg/action/testdata/charts/compressedchart-with-hyphens-0.1.0.tgz b/helm/pkg/action/testdata/charts/compressedchart-with-hyphens-0.1.0.tgz
new file mode 100644
index 000000000..379210a92
Binary files /dev/null and b/helm/pkg/action/testdata/charts/compressedchart-with-hyphens-0.1.0.tgz differ
diff --git a/helm/pkg/action/testdata/charts/corrupted-compressed-chart.tgz b/helm/pkg/action/testdata/charts/corrupted-compressed-chart.tgz
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/action/testdata/charts/decompressedchart/Chart.yaml b/helm/pkg/action/testdata/charts/decompressedchart/Chart.yaml
new file mode 100644
index 000000000..92ba4d88f
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/decompressedchart/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: decompressedchart
+version: 0.1.0
diff --git a/helm/pkg/action/testdata/charts/decompressedchart/values.yaml b/helm/pkg/action/testdata/charts/decompressedchart/values.yaml
new file mode 100644
index 000000000..a940d1fd9
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/decompressedchart/values.yaml
@@ -0,0 +1,4 @@
+# Default values for decompressedchart.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+ name: my-decompressed-chart
diff --git a/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-1/Chart.yaml b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-1/Chart.yaml
new file mode 100644
index 000000000..e33c97e8c
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-1/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+name: multiplecharts-lint-chart-1
+version: "1"
+icon: ""
\ No newline at end of file
diff --git a/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-1/templates/configmap.yaml b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-1/templates/configmap.yaml
new file mode 100644
index 000000000..88ebf2468
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-1/templates/configmap.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+metadata:
+ name: multicharttest-chart1-configmap
+data:
+ dat: |
+ {{ .Values.config | indent 4 }}
diff --git a/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-1/values.yaml b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-1/values.yaml
new file mode 100644
index 000000000..aafb09e4b
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-1/values.yaml
@@ -0,0 +1 @@
+config: "Test"
\ No newline at end of file
diff --git a/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-2/Chart.yaml b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-2/Chart.yaml
new file mode 100644
index 000000000..b27de2754
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-2/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+name: multiplecharts-lint-chart-2
+version: "1"
+icon: ""
\ No newline at end of file
diff --git a/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-2/templates/configmap.yaml b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-2/templates/configmap.yaml
new file mode 100644
index 000000000..8484bfe6a
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-2/templates/configmap.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+metadata:
+ name: multicharttest-chart2-configmap
+data:
+ {{ toYaml .Values.config | indent 4 }}
diff --git a/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-2/values.yaml b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-2/values.yaml
new file mode 100644
index 000000000..9139f486e
--- /dev/null
+++ b/helm/pkg/action/testdata/charts/multiplecharts-lint-chart-2/values.yaml
@@ -0,0 +1,2 @@
+config:
+ test: "Test"
\ No newline at end of file
diff --git a/helm/pkg/action/testdata/charts/pre-release-chart-0.1.0-alpha.tgz b/helm/pkg/action/testdata/charts/pre-release-chart-0.1.0-alpha.tgz
new file mode 100644
index 000000000..5d5770fed
Binary files /dev/null and b/helm/pkg/action/testdata/charts/pre-release-chart-0.1.0-alpha.tgz differ
diff --git a/helm/pkg/action/testdata/output/list-compressed-deps-tgz.txt b/helm/pkg/action/testdata/output/list-compressed-deps-tgz.txt
new file mode 100644
index 000000000..6cc526b70
--- /dev/null
+++ b/helm/pkg/action/testdata/output/list-compressed-deps-tgz.txt
@@ -0,0 +1,3 @@
+NAME VERSION REPOSITORY STATUS
+mariadb 4.x.x https://kubernetes-charts.storage.googleapis.com/ unpacked
+
diff --git a/helm/pkg/action/testdata/output/list-compressed-deps.txt b/helm/pkg/action/testdata/output/list-compressed-deps.txt
new file mode 100644
index 000000000..08597f31e
--- /dev/null
+++ b/helm/pkg/action/testdata/output/list-compressed-deps.txt
@@ -0,0 +1,3 @@
+NAME VERSION REPOSITORY STATUS
+mariadb 4.x.x https://charts.helm.sh/stable/ ok
+
diff --git a/helm/pkg/action/testdata/output/list-missing-deps.txt b/helm/pkg/action/testdata/output/list-missing-deps.txt
new file mode 100644
index 000000000..03051251e
--- /dev/null
+++ b/helm/pkg/action/testdata/output/list-missing-deps.txt
@@ -0,0 +1,3 @@
+NAME VERSION REPOSITORY STATUS
+mariadb 4.x.x https://charts.helm.sh/stable/ missing
+
diff --git a/helm/pkg/action/testdata/output/list-uncompressed-deps-tgz.txt b/helm/pkg/action/testdata/output/list-uncompressed-deps-tgz.txt
new file mode 100644
index 000000000..6cc526b70
--- /dev/null
+++ b/helm/pkg/action/testdata/output/list-uncompressed-deps-tgz.txt
@@ -0,0 +1,3 @@
+NAME VERSION REPOSITORY STATUS
+mariadb 4.x.x https://kubernetes-charts.storage.googleapis.com/ unpacked
+
diff --git a/helm/pkg/action/testdata/output/list-uncompressed-deps.txt b/helm/pkg/action/testdata/output/list-uncompressed-deps.txt
new file mode 100644
index 000000000..bc59e825c
--- /dev/null
+++ b/helm/pkg/action/testdata/output/list-uncompressed-deps.txt
@@ -0,0 +1,3 @@
+NAME VERSION REPOSITORY STATUS
+mariadb 4.x.x https://charts.helm.sh/stable/ unpacked
+
diff --git a/helm/pkg/action/testdata/rbac.txt b/helm/pkg/action/testdata/rbac.txt
new file mode 100644
index 000000000..0cb15b868
--- /dev/null
+++ b/helm/pkg/action/testdata/rbac.txt
@@ -0,0 +1,25 @@
+---
+# Source: hello/templates/rbac
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: schedule-agents
+rules:
+- apiGroups: [""]
+ resources: ["pods", "pods/exec", "pods/log"]
+ verbs: ["*"]
+---
+# Source: hello/templates/rbac
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: schedule-agents
+ namespace: spaced
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: schedule-agents
+subjects:
+- kind: ServiceAccount
+ name: schedule-agents
+ namespace: spaced
diff --git a/helm/pkg/action/uninstall.go b/helm/pkg/action/uninstall.go
new file mode 100644
index 000000000..79156991c
--- /dev/null
+++ b/helm/pkg/action/uninstall.go
@@ -0,0 +1,303 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "errors"
+ "fmt"
+ "log/slog"
+ "strings"
+ "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/kube"
+ releasei "helm.sh/helm/v4/pkg/release"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+// Uninstall is the action for uninstalling releases.
+//
+// It provides the implementation of 'helm uninstall'.
+type Uninstall struct {
+ cfg *Configuration
+
+ DisableHooks bool
+ DryRun bool
+ IgnoreNotFound bool
+ KeepHistory bool
+ WaitStrategy kube.WaitStrategy
+ WaitOptions []kube.WaitOption
+ DeletionPropagation string
+ Timeout time.Duration
+ Description string
+}
+
+// NewUninstall creates a new Uninstall object with the given configuration.
+func NewUninstall(cfg *Configuration) *Uninstall {
+ return &Uninstall{
+ cfg: cfg,
+ }
+}
+
+// Run uninstalls the given release.
+func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) {
+ if err := u.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ var waiter kube.Waiter
+ var err error
+ if c, supportsOptions := u.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions {
+ waiter, err = c.GetWaiterWithOptions(u.WaitStrategy, u.WaitOptions...)
+ } else {
+ waiter, err = u.cfg.KubeClient.GetWaiter(u.WaitStrategy)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if u.DryRun {
+ ri, err := u.cfg.releaseContent(name, 0)
+
+ if err != nil {
+ if u.IgnoreNotFound && errors.Is(err, driver.ErrReleaseNotFound) {
+ return nil, nil
+ }
+ return &releasei.UninstallReleaseResponse{}, err
+ }
+ r, err := releaserToV1Release(ri)
+ if err != nil {
+ return nil, err
+ }
+ return &releasei.UninstallReleaseResponse{Release: r}, nil
+ }
+
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, fmt.Errorf("uninstall: Release name is invalid: %s", name)
+ }
+
+ relsi, err := u.cfg.Releases.History(name)
+ if err != nil {
+ if u.IgnoreNotFound {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("uninstall: Release not loaded: %s: %w", name, err)
+ }
+ if len(relsi) < 1 {
+ return nil, errMissingRelease
+ }
+
+ rels, err := releaseListToV1List(relsi)
+ if err != nil {
+ return nil, err
+ }
+
+ releaseutil.SortByRevision(rels)
+ rel := rels[len(rels)-1]
+
+ // TODO: Are there any cases where we want to force a delete even if it's
+ // already marked deleted?
+ if rel.Info.Status == common.StatusUninstalled {
+ if !u.KeepHistory {
+ if err := u.purgeReleases(rels...); err != nil {
+ return nil, fmt.Errorf("uninstall: Failed to purge the release: %w", err)
+ }
+ return &releasei.UninstallReleaseResponse{Release: rel}, nil
+ }
+ return nil, fmt.Errorf("the release named %q is already deleted", name)
+ }
+
+ u.cfg.Logger().Debug("uninstall: deleting release", "name", name)
+ rel.Info.Status = common.StatusUninstalling
+ rel.Info.Deleted = time.Now()
+ rel.Info.Description = "Deletion in progress (or silently failed)"
+ res := &releasei.UninstallReleaseResponse{Release: rel}
+
+ if !u.DisableHooks {
+ serverSideApply := true
+ if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil {
+ return res, err
+ }
+ } else {
+ u.cfg.Logger().Debug("delete hooks disabled", "release", name)
+ }
+
+ // From here on out, the release is currently considered to be in StatusUninstalling
+ // state.
+ if err := u.cfg.Releases.Update(rel); err != nil {
+ u.cfg.Logger().Debug("uninstall: Failed to store updated release", slog.Any("error", err))
+ }
+
+ deletedResources, kept, errs := u.deleteRelease(rel)
+ if errs != nil {
+ u.cfg.Logger().Debug("uninstall: Failed to delete release", slog.Any("error", errs))
+ return nil, fmt.Errorf("failed to delete release: %s", name)
+ }
+
+ if kept != "" {
+ kept = "These resources were kept due to the resource policy:\n" + kept
+ }
+ res.Info = kept
+
+ if err := waiter.WaitForDelete(deletedResources, u.Timeout); err != nil {
+ errs = append(errs, err)
+ }
+
+ if !u.DisableHooks {
+ serverSideApply := true
+ if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ rel.Info.Status = common.StatusUninstalled
+ if len(u.Description) > 0 {
+ rel.Info.Description = u.Description
+ } else {
+ rel.Info.Description = "Uninstallation complete"
+ }
+
+ if !u.KeepHistory {
+ u.cfg.Logger().Debug("purge requested", "release", name)
+ err := u.purgeReleases(rels...)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("uninstall: Failed to purge the release: %w", err))
+ }
+
+ // Return the errors that occurred while deleting the release, if any
+ if len(errs) > 0 {
+ return res, fmt.Errorf("uninstallation completed with %d error(s): %w", len(errs), joinErrors(errs, "; "))
+ }
+
+ return res, nil
+ }
+
+ if err := u.cfg.Releases.Update(rel); err != nil {
+ u.cfg.Logger().Debug("uninstall: Failed to store updated release", slog.Any("error", err))
+ }
+
+ // Supersede all previous deployments, see issue #12556 (which is a
+ // variation on #2941).
+ deployed, err := u.cfg.Releases.DeployedAll(name)
+ if err != nil && !errors.Is(err, driver.ErrNoDeployedReleases) {
+ return nil, err
+ }
+ for _, reli := range deployed {
+ rel, err := releaserToV1Release(reli)
+ if err != nil {
+ return nil, err
+ }
+
+ u.cfg.Logger().Debug("superseding previous deployment", "version", rel.Version)
+ rel.Info.Status = common.StatusSuperseded
+ if err := u.cfg.Releases.Update(rel); err != nil {
+ u.cfg.Logger().Debug("uninstall: Failed to store updated release", slog.Any("error", err))
+ }
+ }
+
+ if len(errs) > 0 {
+ return res, fmt.Errorf("uninstallation completed with %d error(s): %w", len(errs), joinErrors(errs, "; "))
+ }
+ return res, nil
+}
+
+func (u *Uninstall) purgeReleases(rels ...*release.Release) error {
+ for _, rel := range rels {
+ if _, err := u.cfg.Releases.Delete(rel.Name, rel.Version); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type joinedErrors struct {
+ errs []error
+ sep string
+}
+
+func joinErrors(errs []error, sep string) error {
+ return &joinedErrors{
+ errs: errs,
+ sep: sep,
+ }
+}
+
+func (e *joinedErrors) Error() string {
+ errs := make([]string, 0, len(e.errs))
+ for _, err := range e.errs {
+ errs = append(errs, err.Error())
+ }
+ return strings.Join(errs, e.sep)
+}
+
+func (e *joinedErrors) Unwrap() []error {
+ return e.errs
+}
+
+// deleteRelease deletes the release and returns list of delete resources and manifests that were kept in the deletion process
+func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, string, []error) {
+ var errs []error
+
+ manifests := releaseutil.SplitManifests(rel.Manifest)
+ _, files, err := releaseutil.SortManifests(manifests, nil, releaseutil.UninstallOrder)
+ if err != nil {
+ // We could instead just delete everything in no particular order.
+ // FIXME: One way to delete at this point would be to try a label-based
+ // deletion. The problem with this is that we could get a false positive
+ // and delete something that was not legitimately part of this release.
+ return nil, rel.Manifest, []error{fmt.Errorf("corrupted release record. You must manually delete the resources: %w", err)}
+ }
+
+ filesToKeep, filesToDelete := filterManifestsToKeep(files)
+ var kept strings.Builder
+ for _, f := range filesToKeep {
+ fmt.Fprintf(&kept, "[%s] %s\n", f.Head.Kind, f.Head.Metadata.Name)
+ }
+
+ var builder strings.Builder
+ for _, file := range filesToDelete {
+ builder.WriteString("\n---\n" + file.Content)
+ }
+
+ resources, err := u.cfg.KubeClient.Build(strings.NewReader(builder.String()), false)
+ if err != nil {
+ return nil, "", []error{fmt.Errorf("unable to build kubernetes objects for delete: %w", err)}
+ }
+ if len(resources) > 0 {
+ _, errs = u.cfg.KubeClient.Delete(resources, parseCascadingFlag(u.DeletionPropagation))
+ }
+ return resources, kept.String(), errs
+}
+
+func parseCascadingFlag(cascadingFlag string) v1.DeletionPropagation {
+ switch cascadingFlag {
+ case "orphan":
+ return v1.DeletePropagationOrphan
+ case "foreground":
+ return v1.DeletePropagationForeground
+ case "background":
+ return v1.DeletePropagationBackground
+ default:
+ slog.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag)
+ return v1.DeletePropagationBackground
+ }
+}
diff --git a/helm/pkg/action/uninstall_test.go b/helm/pkg/action/uninstall_test.go
new file mode 100644
index 000000000..b5a76d983
--- /dev/null
+++ b/helm/pkg/action/uninstall_test.go
@@ -0,0 +1,209 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/pkg/kube"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ "helm.sh/helm/v4/pkg/release/common"
+)
+
+func uninstallAction(t *testing.T) *Uninstall {
+ t.Helper()
+ config := actionConfigFixture(t)
+ unAction := NewUninstall(config)
+ return unAction
+}
+
+func TestUninstallRelease_dryRun_ignoreNotFound(t *testing.T) {
+ unAction := uninstallAction(t)
+ unAction.DryRun = true
+ unAction.IgnoreNotFound = true
+
+ is := assert.New(t)
+ res, err := unAction.Run("release-non-exist")
+ is.Nil(res)
+ is.NoError(err)
+}
+
+func TestUninstallRelease_ignoreNotFound(t *testing.T) {
+ unAction := uninstallAction(t)
+ unAction.DryRun = false
+ unAction.IgnoreNotFound = true
+
+ is := assert.New(t)
+ res, err := unAction.Run("release-non-exist")
+ is.Nil(res)
+ is.NoError(err)
+}
+func TestUninstallRelease_deleteRelease(t *testing.T) {
+ is := assert.New(t)
+
+ unAction := uninstallAction(t)
+ unAction.DisableHooks = true
+ unAction.DryRun = false
+ unAction.KeepHistory = true
+
+ rel := releaseStub()
+ rel.Name = "keep-secret"
+ rel.Manifest = `{
+ "apiVersion": "v1",
+ "kind": "Secret",
+ "metadata": {
+ "name": "secret",
+ "annotations": {
+ "helm.sh/resource-policy": "keep"
+ }
+ },
+ "type": "Opaque",
+ "data": {
+ "password": "password"
+ }
+ }`
+ require.NoError(t, unAction.cfg.Releases.Create(rel))
+ res, err := unAction.Run(rel.Name)
+ is.NoError(err)
+ expected := `These resources were kept due to the resource policy:
+[Secret] secret
+`
+ is.Contains(res.Info, expected)
+}
+
+func TestUninstallRelease_Wait(t *testing.T) {
+ is := assert.New(t)
+
+ unAction := uninstallAction(t)
+ unAction.DisableHooks = true
+ unAction.DryRun = false
+ unAction.WaitStrategy = kube.StatusWatcherStrategy
+
+ rel := releaseStub()
+ rel.Name = "come-fail-away"
+ rel.Manifest = `{
+ "apiVersion": "v1",
+ "kind": "Secret",
+ "metadata": {
+ "name": "secret"
+ },
+ "type": "Opaque",
+ "data": {
+ "password": "password"
+ }
+ }`
+ require.NoError(t, unAction.cfg.Releases.Create(rel))
+ failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitForDeleteError = fmt.Errorf("U timed out")
+ unAction.cfg.KubeClient = failer
+ resi, err := unAction.Run(rel.Name)
+ is.Error(err)
+ is.Contains(err.Error(), "U timed out")
+ res, err := releaserToV1Release(resi.Release)
+ is.NoError(err)
+ is.Equal(res.Info.Status, common.StatusUninstalled)
+}
+
+func TestUninstallRelease_Cascade(t *testing.T) {
+ is := assert.New(t)
+
+ unAction := uninstallAction(t)
+ unAction.DisableHooks = true
+ unAction.DryRun = false
+ unAction.WaitStrategy = kube.HookOnlyStrategy
+ unAction.DeletionPropagation = "foreground"
+
+ rel := releaseStub()
+ rel.Name = "come-fail-away"
+ rel.Manifest = `{
+ "apiVersion": "v1",
+ "kind": "Secret",
+ "metadata": {
+ "name": "secret"
+ },
+ "type": "Opaque",
+ "data": {
+ "password": "password"
+ }
+ }`
+ require.NoError(t, unAction.cfg.Releases.Create(rel))
+ failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.DeleteError = fmt.Errorf("Uninstall with cascade failed")
+ failer.BuildDummy = true
+ unAction.cfg.KubeClient = failer
+ _, err := unAction.Run(rel.Name)
+ require.Error(t, err)
+ is.Contains(err.Error(), "failed to delete release: come-fail-away")
+}
+
+func TestUninstallRun_UnreachableKubeClient(t *testing.T) {
+ t.Helper()
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ client := NewUninstall(config)
+ result, err := client.Run("")
+
+ assert.Nil(t, result)
+ assert.ErrorContains(t, err, "connection refused")
+}
+
+func TestUninstall_WaitOptionsPassedDownstream(t *testing.T) {
+ is := assert.New(t)
+
+ unAction := uninstallAction(t)
+ unAction.DisableHooks = true
+ unAction.DryRun = false
+ unAction.WaitStrategy = kube.StatusWatcherStrategy
+
+ // Use WithWaitContext as a marker WaitOption that we can track
+ ctx := context.Background()
+ unAction.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)}
+
+ rel := releaseStub()
+ rel.Name = "wait-options-uninstall"
+ rel.Manifest = `{
+ "apiVersion": "v1",
+ "kind": "Secret",
+ "metadata": {
+ "name": "secret"
+ },
+ "type": "Opaque",
+ "data": {
+ "password": "password"
+ }
+ }`
+ require.NoError(t, unAction.cfg.Releases.Create(rel))
+
+ // Access the underlying FailingKubeClient to check recorded options
+ failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+
+ _, err := unAction.Run(rel.Name)
+ is.NoError(err)
+
+ // Verify that WaitOptions were passed to GetWaiter
+ is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter")
+}
diff --git a/helm/pkg/action/upgrade.go b/helm/pkg/action/upgrade.go
new file mode 100644
index 000000000..4b99be603
--- /dev/null
+++ b/helm/pkg/action/upgrade.go
@@ -0,0 +1,675 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "strings"
+ "sync"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/cli-runtime/pkg/resource"
+
+ "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ chartv2 "helm.sh/helm/v4/pkg/chart/v2"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/kube"
+ "helm.sh/helm/v4/pkg/postrenderer"
+ "helm.sh/helm/v4/pkg/registry"
+ ri "helm.sh/helm/v4/pkg/release"
+ rcommon "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+// Upgrade is the action for upgrading releases.
+//
+// It provides the implementation of 'helm upgrade'.
+type Upgrade struct {
+ cfg *Configuration
+
+ ChartPathOptions
+
+ // Install is a purely informative flag that indicates whether this upgrade was done in "install" mode.
+ //
+ // Applications may use this to determine whether this Upgrade operation was done as part of a
+ // pure upgrade (Upgrade.Install == false) or as part of an install-or-upgrade operation
+ // (Upgrade.Install == true).
+ //
+ // Setting this to `true` will NOT cause `Upgrade` to perform an install if the release does not exist.
+ // That process must be handled by creating an Install action directly. See cmd/upgrade.go for an
+ // example of how this flag is used.
+ Install bool
+ // Devel indicates that the operation is done in devel mode.
+ Devel bool
+ // Namespace is the namespace in which this operation should be performed.
+ Namespace string
+ // SkipCRDs skips installing CRDs when install flag is enabled during upgrade
+ SkipCRDs bool
+ // Timeout is the timeout for this operation
+ Timeout time.Duration
+ // WaitStrategy determines what type of waiting should be done
+ WaitStrategy kube.WaitStrategy
+ // WaitOptions are additional options for waiting on resources
+ WaitOptions []kube.WaitOption
+ // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested.
+ WaitForJobs bool
+ // DisableHooks disables hook processing if set to true.
+ DisableHooks bool
+ // DryRunStrategy can be set to prepare, but not execute the operation and whether or not to interact with the remote cluster
+ DryRunStrategy DryRunStrategy
+ // HideSecret can be set to true when DryRun is enabled in order to hide
+ // Kubernetes Secrets in the output. It cannot be used outside of DryRun.
+ HideSecret bool
+ // ForceReplace will, if set to `true`, ignore certain warnings and perform the upgrade anyway.
+ //
+ // This should be used with caution.
+ ForceReplace bool
+ // ForceConflicts causes server-side apply to force conflicts ("Overwrite value, become sole manager")
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts
+ ForceConflicts bool
+ // ServerSideApply enables changes to be applied via Kubernetes server-side apply
+ // Can be the string: "true", "false" or "auto"
+ // When "auto", sever-side usage will be based upon the releases previous usage
+ // see: https://kubernetes.io/docs/reference/using-api/server-side-apply/
+ ServerSideApply string
+ // ResetValues will reset the values to the chart's built-ins rather than merging with existing.
+ ResetValues bool
+ // ReuseValues will reuse the user's last supplied values.
+ ReuseValues bool
+ // ResetThenReuseValues will reset the values to the chart's built-ins then merge with user's last supplied values.
+ ResetThenReuseValues bool
+ // MaxHistory limits the maximum number of revisions saved per release
+ MaxHistory int
+ // RollbackOnFailure enables rolling back the upgraded release on failure
+ RollbackOnFailure bool
+ // CleanupOnFail will, if true, cause the upgrade to delete newly-created resources on a failed update.
+ CleanupOnFail bool
+ // SubNotes determines whether sub-notes are rendered in the chart.
+ SubNotes bool
+ // HideNotes determines whether notes are output during upgrade
+ HideNotes bool
+ // SkipSchemaValidation determines if JSON schema validation is disabled.
+ SkipSchemaValidation bool
+ // Description is the description of this operation
+ Description string
+ Labels map[string]string
+ // PostRenderer is an optional post-renderer
+ //
+ // If this is non-nil, then after templates are rendered, they will be sent to the
+ // post renderer before sending to the Kubernetes API server.
+ PostRenderer postrenderer.PostRenderer
+ // DisableOpenAPIValidation controls whether OpenAPI validation is enforced.
+ DisableOpenAPIValidation bool
+ // Get missing dependencies
+ DependencyUpdate bool
+ // Lock to control raceconditions when the process receives a SIGTERM
+ Lock sync.Mutex
+ // Enable DNS lookups when rendering templates
+ EnableDNS bool
+ // TakeOwnership will skip the check for helm annotations and adopt all existing resources.
+ TakeOwnership bool
+}
+
+type resultMessage struct {
+ r *release.Release
+ e error
+}
+
+// NewUpgrade creates a new Upgrade object with the given configuration.
+func NewUpgrade(cfg *Configuration) *Upgrade {
+ up := &Upgrade{
+ cfg: cfg,
+ ServerSideApply: "auto",
+ DryRunStrategy: DryRunNone,
+ }
+ up.registryClient = cfg.RegistryClient
+
+ return up
+}
+
+// SetRegistryClient sets the registry client to use when fetching charts.
+func (u *Upgrade) SetRegistryClient(client *registry.Client) {
+ u.registryClient = client
+}
+
+// Run executes the upgrade on the given release.
+func (u *Upgrade) Run(name string, chart chart.Charter, vals map[string]interface{}) (ri.Releaser, error) {
+ ctx := context.Background()
+ return u.RunWithContext(ctx, name, chart, vals)
+}
+
+// RunWithContext executes the upgrade on the given release with context.
+func (u *Upgrade) RunWithContext(ctx context.Context, name string, ch chart.Charter, vals map[string]interface{}) (ri.Releaser, error) {
+ if err := u.cfg.KubeClient.IsReachable(); err != nil {
+ return nil, err
+ }
+
+ var chrt *chartv2.Chart
+ switch c := ch.(type) {
+ case *chartv2.Chart:
+ chrt = c
+ case chartv2.Chart:
+ chrt = &c
+ default:
+ return nil, errors.New("invalid chart apiVersion")
+ }
+
+ // Make sure wait is set if RollbackOnFailure. This makes it so
+ // the user doesn't have to specify both
+ if u.WaitStrategy == kube.HookOnlyStrategy && u.RollbackOnFailure {
+ u.WaitStrategy = kube.StatusWatcherStrategy
+ }
+
+ if err := chartutil.ValidateReleaseName(name); err != nil {
+ return nil, fmt.Errorf("release name is invalid: %s", name)
+ }
+
+ u.cfg.Logger().Debug("preparing upgrade", "name", name)
+ currentRelease, upgradedRelease, serverSideApply, err := u.prepareUpgrade(name, chrt, vals)
+ if err != nil {
+ return nil, err
+ }
+
+ u.cfg.Releases.MaxHistory = u.MaxHistory
+
+ u.cfg.Logger().Debug("performing update", "name", name)
+ res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease, serverSideApply)
+ if err != nil {
+ return res, err
+ }
+
+ // Do not update for dry runs
+ if !isDryRun(u.DryRunStrategy) {
+ u.cfg.Logger().Debug("updating status for upgraded release", "name", name)
+ if err := u.cfg.Releases.Update(upgradedRelease); err != nil {
+ return res, err
+ }
+ }
+
+ return res, nil
+}
+
+// prepareUpgrade builds an upgraded release for an upgrade operation.
+func (u *Upgrade) prepareUpgrade(name string, chart *chartv2.Chart, vals map[string]interface{}) (*release.Release, *release.Release, bool, error) {
+ if chart == nil {
+ return nil, nil, false, errMissingChart
+ }
+
+ // HideSecret must be used with dry run. Otherwise, return an error.
+ if !isDryRun(u.DryRunStrategy) && u.HideSecret {
+ return nil, nil, false, errors.New("hiding Kubernetes secrets requires a dry-run mode")
+ }
+
+ // finds the last non-deleted release with the given name
+ lastReleasei, err := u.cfg.Releases.Last(name)
+ if err != nil {
+ // to keep existing behavior of returning the "%q has no deployed releases" error when an existing release does not exist
+ if errors.Is(err, driver.ErrReleaseNotFound) {
+ return nil, nil, false, driver.NewErrNoDeployedReleases(name)
+ }
+ return nil, nil, false, err
+ }
+
+ lastRelease, err := releaserToV1Release(lastReleasei)
+ if err != nil {
+ return nil, nil, false, err
+ }
+
+ // Concurrent `helm upgrade`s will either fail here with `errPending` or when creating the release with "already exists". This should act as a pessimistic lock.
+ if lastRelease.Info.Status.IsPending() {
+ return nil, nil, false, errPending
+ }
+
+ var currentRelease *release.Release
+ if lastRelease.Info.Status == rcommon.StatusDeployed {
+ // no need to retrieve the last deployed release from storage as the last release is deployed
+ currentRelease = lastRelease
+ } else {
+ // finds the deployed release with the given name
+ currentReleasei, err := u.cfg.Releases.Deployed(name)
+ var cerr error
+ currentRelease, cerr = releaserToV1Release(currentReleasei)
+ if cerr != nil {
+ return nil, nil, false, err
+ }
+ if err != nil {
+ if errors.Is(err, driver.ErrNoDeployedReleases) &&
+ (lastRelease.Info.Status == rcommon.StatusFailed || lastRelease.Info.Status == rcommon.StatusSuperseded) {
+ currentRelease = lastRelease
+ } else {
+ return nil, nil, false, err
+ }
+ }
+
+ }
+
+ // determine if values will be reused
+ vals, err = u.reuseValues(chart, currentRelease, vals)
+ if err != nil {
+ return nil, nil, false, err
+ }
+
+ if err := chartutil.ProcessDependencies(chart, vals); err != nil {
+ return nil, nil, false, err
+ }
+
+ // Increment revision count. This is passed to templates, and also stored on
+ // the release object.
+ revision := lastRelease.Version + 1
+
+ options := common.ReleaseOptions{
+ Name: name,
+ Namespace: currentRelease.Namespace,
+ Revision: revision,
+ IsUpgrade: true,
+ }
+
+ caps, err := u.cfg.getCapabilities()
+ if err != nil {
+ return nil, nil, false, err
+ }
+ valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chart, vals, options, caps, u.SkipSchemaValidation)
+ if err != nil {
+ return nil, nil, false, err
+ }
+
+ hooks, manifestDoc, notesTxt, err := u.cfg.renderResources(chart, valuesToRender, "", "", u.SubNotes, false, false, u.PostRenderer, interactWithServer(u.DryRunStrategy), u.EnableDNS, u.HideSecret)
+ if err != nil {
+ return nil, nil, false, err
+ }
+
+ if driver.ContainsSystemLabels(u.Labels) {
+ return nil, nil, false, fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels())
+ }
+
+ serverSideApply, err := getUpgradeServerSideValue(u.ServerSideApply, lastRelease.ApplyMethod)
+ if err != nil {
+ return nil, nil, false, err
+ }
+
+ u.cfg.Logger().Debug("determined release apply method", slog.Bool("server_side_apply", serverSideApply), slog.String("previous_release_apply_method", lastRelease.ApplyMethod))
+
+ // Store an upgraded release.
+ upgradedRelease := &release.Release{
+ Name: name,
+ Namespace: currentRelease.Namespace,
+ Chart: chart,
+ Config: vals,
+ Info: &release.Info{
+ FirstDeployed: currentRelease.Info.FirstDeployed,
+ LastDeployed: Timestamper(),
+ Status: rcommon.StatusPendingUpgrade,
+ Description: "Preparing upgrade", // This should be overwritten later.
+ },
+ Version: revision,
+ Manifest: manifestDoc.String(),
+ Hooks: hooks,
+ Labels: mergeCustomLabels(lastRelease.Labels, u.Labels),
+ ApplyMethod: string(determineReleaseSSApplyMethod(serverSideApply)),
+ }
+
+ if len(notesTxt) > 0 {
+ upgradedRelease.Info.Notes = notesTxt
+ }
+ err = validateManifest(u.cfg.KubeClient, manifestDoc.Bytes(), !u.DisableOpenAPIValidation)
+ return currentRelease, upgradedRelease, serverSideApply, err
+}
+
+func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedRelease *release.Release, serverSideApply bool) (*release.Release, error) {
+ current, err := u.cfg.KubeClient.Build(bytes.NewBufferString(originalRelease.Manifest), false)
+ if err != nil {
+ // Checking for removed Kubernetes API error so can provide a more informative error message to the user
+ // Ref: https://github.com/helm/helm/issues/7219
+ if strings.Contains(err.Error(), "unable to recognize \"\": no matches for kind") {
+ return upgradedRelease, fmt.Errorf("current release manifest contains removed kubernetes api(s) for this "+
+ "kubernetes version and it is therefore unable to build the kubernetes "+
+ "objects for performing the diff. error from kubernetes: %w", err)
+ }
+ return upgradedRelease, fmt.Errorf("unable to build kubernetes objects from current release manifest: %w", err)
+ }
+ target, err := u.cfg.KubeClient.Build(bytes.NewBufferString(upgradedRelease.Manifest), !u.DisableOpenAPIValidation)
+ if err != nil {
+ return upgradedRelease, fmt.Errorf("unable to build kubernetes objects from new release manifest: %w", err)
+ }
+
+ // It is safe to use force only on target because these are resources currently rendered by the chart.
+ err = target.Visit(setMetadataVisitor(upgradedRelease.Name, upgradedRelease.Namespace, true))
+ if err != nil {
+ return upgradedRelease, err
+ }
+
+ // Do a basic diff using gvk + name to figure out what new resources are being created so we can validate they don't already exist
+ existingResources := make(map[string]bool)
+ for _, r := range current {
+ existingResources[objectKey(r)] = true
+ }
+
+ var toBeCreated kube.ResourceList
+ for _, r := range target {
+ if !existingResources[objectKey(r)] {
+ toBeCreated = append(toBeCreated, r)
+ }
+ }
+
+ var toBeUpdated kube.ResourceList
+ if u.TakeOwnership {
+ toBeUpdated, err = requireAdoption(toBeCreated)
+ } else {
+ toBeUpdated, err = existingResourceConflict(toBeCreated, upgradedRelease.Name, upgradedRelease.Namespace)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("unable to continue with update: %w", err)
+ }
+
+ toBeUpdated.Visit(func(r *resource.Info, err error) error {
+ if err != nil {
+ return err
+ }
+ current.Append(r)
+ return nil
+ })
+
+ if isDryRun(u.DryRunStrategy) {
+ u.cfg.Logger().Debug("dry run for release", "name", upgradedRelease.Name)
+ if len(u.Description) > 0 {
+ upgradedRelease.Info.Description = u.Description
+ } else {
+ upgradedRelease.Info.Description = "Dry run complete"
+ }
+ return upgradedRelease, nil
+ }
+
+ u.cfg.Logger().Debug("creating upgraded release", "name", upgradedRelease.Name)
+ if err := u.cfg.Releases.Create(upgradedRelease); err != nil {
+ return nil, err
+ }
+ rChan := make(chan resultMessage)
+ ctxChan := make(chan resultMessage)
+ doneChan := make(chan interface{})
+ defer close(doneChan)
+ go u.releasingUpgrade(rChan, upgradedRelease, current, target, originalRelease, serverSideApply)
+ go u.handleContext(ctx, doneChan, ctxChan, upgradedRelease)
+
+ select {
+ case result := <-rChan:
+ return result.r, result.e
+ case result := <-ctxChan:
+ return result.r, result.e
+ }
+}
+
+// Function used to lock the Mutex, this is important for the case when RollbackOnFailure is set.
+// In that case the upgrade will finish before the rollback is finished so it is necessary to wait for the rollback to finish.
+// The rollback will be trigger by the function failRelease
+func (u *Upgrade) reportToPerformUpgrade(c chan<- resultMessage, rel *release.Release, created kube.ResourceList, err error) {
+ u.Lock.Lock()
+ if err != nil {
+ rel, err = u.failRelease(rel, created, err)
+ }
+ c <- resultMessage{r: rel, e: err}
+ u.Lock.Unlock()
+}
+
+// Setup listener for SIGINT and SIGTERM
+func (u *Upgrade) handleContext(ctx context.Context, done chan interface{}, c chan<- resultMessage, upgradedRelease *release.Release) {
+ select {
+ case <-ctx.Done():
+ err := ctx.Err()
+
+ // when RollbackOnFailure is set, the ongoing release finish first and doesn't give time for the rollback happens.
+ u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, err)
+ case <-done:
+ return
+ }
+}
+
+func isReleaseApplyMethodClientSideApply(applyMethod string) bool {
+ return applyMethod == "" || applyMethod == string(release.ApplyMethodClientSideApply)
+}
+
+func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *release.Release, current kube.ResourceList, target kube.ResourceList, originalRelease *release.Release, serverSideApply bool) {
+ // pre-upgrade hooks
+
+ if !u.DisableHooks {
+ if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil {
+ u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err))
+ return
+ }
+ } else {
+ u.cfg.Logger().Debug("upgrade hooks disabled", "name", upgradedRelease.Name)
+ }
+
+ upgradeClientSideFieldManager := isReleaseApplyMethodClientSideApply(originalRelease.ApplyMethod) && serverSideApply // Update client-side field manager if transitioning from client-side to server-side apply
+ results, err := u.cfg.KubeClient.Update(
+ current,
+ target,
+ kube.ClientUpdateOptionForceReplace(u.ForceReplace),
+ kube.ClientUpdateOptionServerSideApply(serverSideApply, u.ForceConflicts),
+ kube.ClientUpdateOptionUpgradeClientSideFieldManager(upgradeClientSideFieldManager))
+ if err != nil {
+ u.cfg.recordRelease(originalRelease)
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
+ return
+ }
+
+ var waiter kube.Waiter
+ if c, supportsOptions := u.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions {
+ waiter, err = c.GetWaiterWithOptions(u.WaitStrategy, u.WaitOptions...)
+ } else {
+ waiter, err = u.cfg.KubeClient.GetWaiter(u.WaitStrategy)
+ }
+ if err != nil {
+ u.cfg.recordRelease(originalRelease)
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
+ return
+ }
+ if u.WaitForJobs {
+ if err := waiter.WaitWithJobs(target, u.Timeout); err != nil {
+ u.cfg.recordRelease(originalRelease)
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
+ return
+ }
+ } else {
+ if err := waiter.Wait(target, u.Timeout); err != nil {
+ u.cfg.recordRelease(originalRelease)
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
+ return
+ }
+ }
+
+ // post-upgrade hooks
+ if !u.DisableHooks {
+ if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil {
+ u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err))
+ return
+ }
+ }
+
+ originalRelease.Info.Status = rcommon.StatusSuperseded
+ u.cfg.recordRelease(originalRelease)
+
+ upgradedRelease.Info.Status = rcommon.StatusDeployed
+ if len(u.Description) > 0 {
+ upgradedRelease.Info.Description = u.Description
+ } else {
+ upgradedRelease.Info.Description = "Upgrade complete"
+ }
+ u.reportToPerformUpgrade(c, upgradedRelease, nil, nil)
+}
+
+func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) {
+ msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err)
+ u.cfg.Logger().Warn(
+ "upgrade failed",
+ slog.String("name", rel.Name),
+ slog.Any("error", err),
+ )
+
+ rel.Info.Status = rcommon.StatusFailed
+ rel.Info.Description = msg
+ u.cfg.recordRelease(rel)
+ if u.CleanupOnFail && len(created) > 0 {
+ u.cfg.Logger().Debug("cleanup on fail set", "cleaning_resources", len(created))
+ _, errs := u.cfg.KubeClient.Delete(created, metav1.DeletePropagationBackground)
+ if errs != nil {
+ return rel, fmt.Errorf(
+ "an error occurred while cleaning up resources. original upgrade error: %w: %w",
+ err,
+ fmt.Errorf(
+ "unable to cleanup resources: %w",
+ joinErrors(errs, ", "),
+ ),
+ )
+ }
+ u.cfg.Logger().Debug("resource cleanup complete")
+ }
+
+ if u.RollbackOnFailure {
+ u.cfg.Logger().Debug("Upgrade failed and rollback-on-failure is set, rolling back to previous successful release")
+
+ // As a protection, get the last successful release before rollback.
+ // If there are no successful releases, bail out
+ hist := NewHistory(u.cfg)
+ fullHistory, herr := hist.Run(rel.Name)
+ if herr != nil {
+ return rel, fmt.Errorf("an error occurred while finding last successful release. original upgrade error: %w: %w", err, herr)
+ }
+
+ fullHistoryV1, herr := releaseListToV1List(fullHistory)
+ if herr != nil {
+ return nil, herr
+ }
+ // There isn't a way to tell if a previous release was successful, but
+ // generally failed releases do not get superseded unless the next
+ // release is successful, so this should be relatively safe
+ filteredHistory := releaseutil.FilterFunc(func(r *release.Release) bool {
+ return r.Info.Status == rcommon.StatusSuperseded || r.Info.Status == rcommon.StatusDeployed
+ }).Filter(fullHistoryV1)
+ if len(filteredHistory) == 0 {
+ return rel, fmt.Errorf("unable to find a previously successful release when attempting to rollback. original upgrade error: %w", err)
+ }
+
+ releaseutil.Reverse(filteredHistory, releaseutil.SortByRevision)
+
+ rollin := NewRollback(u.cfg)
+ rollin.Version = filteredHistory[0].Version
+ rollin.WaitStrategy = u.WaitStrategy
+ rollin.WaitOptions = u.WaitOptions
+ rollin.WaitForJobs = u.WaitForJobs
+ rollin.DisableHooks = u.DisableHooks
+ rollin.ForceReplace = u.ForceReplace
+ rollin.ForceConflicts = u.ForceConflicts
+ rollin.ServerSideApply = u.ServerSideApply
+ rollin.Timeout = u.Timeout
+ if rollErr := rollin.Run(rel.Name); rollErr != nil {
+ return rel, fmt.Errorf("an error occurred while rolling back the release. original upgrade error: %w: %w", err, rollErr)
+ }
+ return rel, fmt.Errorf("release %s failed, and has been rolled back due to rollback-on-failure being set: %w", rel.Name, err)
+ }
+
+ return rel, err
+}
+
+// reuseValues copies values from the current release to a new release if the
+// new release does not have any values.
+//
+// If the request already has values, or if there are no values in the current
+// release, this does nothing.
+//
+// This is skipped if the u.ResetValues flag is set, in which case the
+// request values are not altered.
+func (u *Upgrade) reuseValues(chart *chartv2.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) {
+ if u.ResetValues {
+ // If ResetValues is set, we completely ignore current.Config.
+ u.cfg.Logger().Debug("resetting values to the chart's original version")
+ return newVals, nil
+ }
+
+ // If the ReuseValues flag is set, we always copy the old values over the new config's values.
+ if u.ReuseValues {
+ u.cfg.Logger().Debug("reusing the old release's values")
+
+ // We have to regenerate the old coalesced values:
+ oldVals, err := util.CoalesceValues(current.Chart, current.Config)
+ if err != nil {
+ return nil, fmt.Errorf("failed to rebuild old values: %w", err)
+ }
+
+ newVals = util.CoalesceTables(newVals, current.Config)
+
+ chart.Values = oldVals
+
+ return newVals, nil
+ }
+
+ // If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values.
+ if u.ResetThenReuseValues {
+ u.cfg.Logger().Debug("merging values from old release to new values")
+
+ newVals = util.CoalesceTables(newVals, current.Config)
+
+ return newVals, nil
+ }
+
+ if len(newVals) == 0 && len(current.Config) > 0 {
+ u.cfg.Logger().Debug("copying values from old release", "name", current.Name, "version", current.Version)
+ newVals = current.Config
+ }
+ return newVals, nil
+}
+
+func validateManifest(c kube.Interface, manifest []byte, openAPIValidation bool) error {
+ _, err := c.Build(bytes.NewReader(manifest), openAPIValidation)
+ return err
+}
+
+func objectKey(r *resource.Info) string {
+ gvk := r.Object.GetObjectKind().GroupVersionKind()
+ return fmt.Sprintf("%s/%s/%s/%s", gvk.GroupVersion().String(), gvk.Kind, r.Namespace, r.Name)
+}
+
+func mergeCustomLabels(current, desired map[string]string) map[string]string {
+ labels := mergeStrStrMaps(current, desired)
+ for k, v := range labels {
+ if v == "null" {
+ delete(labels, k)
+ }
+ }
+ return labels
+}
+
+func getUpgradeServerSideValue(serverSideOption string, releaseApplyMethod string) (bool, error) {
+ switch serverSideOption {
+ case "auto":
+ return releaseApplyMethod == "ssa", nil
+ case "false":
+ return false, nil
+ case "true":
+ return true, nil
+ default:
+ return false, fmt.Errorf("invalid/unknown release server-side apply method: %s", serverSideOption)
+ }
+}
diff --git a/helm/pkg/action/upgrade_test.go b/helm/pkg/action/upgrade_test.go
new file mode 100644
index 000000000..848e8a682
--- /dev/null
+++ b/helm/pkg/action/upgrade_test.go
@@ -0,0 +1,804 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ appsv1 "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/cli-runtime/pkg/resource"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/kube"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ "helm.sh/helm/v4/pkg/registry"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+func upgradeAction(t *testing.T) *Upgrade {
+ t.Helper()
+ config := actionConfigFixture(t)
+ upAction := NewUpgrade(config)
+ upAction.Namespace = "spaced"
+
+ return upAction
+}
+
+func TestUpgradeRelease_Success(t *testing.T) {
+ is := assert.New(t)
+ req := require.New(t)
+
+ upAction := upgradeAction(t)
+ rel := releaseStub()
+ rel.Name = "previous-release"
+ rel.Info.Status = common.StatusDeployed
+ req.NoError(upAction.cfg.Releases.Create(rel))
+
+ upAction.WaitStrategy = kube.StatusWatcherStrategy
+ vals := map[string]interface{}{}
+
+ ctx, done := context.WithCancel(t.Context())
+ resi, err := upAction.RunWithContext(ctx, rel.Name, buildChart(), vals)
+ req.NoError(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Equal(res.Info.Status, common.StatusDeployed)
+ done()
+
+ // Detecting previous bug where context termination after successful release
+ // caused release to fail.
+ time.Sleep(time.Millisecond * 100)
+ lastReleasei, err := upAction.cfg.Releases.Last(rel.Name)
+ req.NoError(err)
+ lastRelease, err := releaserToV1Release(lastReleasei)
+ req.NoError(err)
+ is.Equal(lastRelease.Info.Status, common.StatusDeployed)
+}
+
+func TestUpgradeRelease_Wait(t *testing.T) {
+ is := assert.New(t)
+ req := require.New(t)
+
+ upAction := upgradeAction(t)
+ rel := releaseStub()
+ rel.Name = "come-fail-away"
+ rel.Info.Status = common.StatusDeployed
+ require.NoError(t, upAction.cfg.Releases.Create(rel))
+
+ failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitError = fmt.Errorf("I timed out")
+ upAction.cfg.KubeClient = failer
+ upAction.WaitStrategy = kube.StatusWatcherStrategy
+ vals := map[string]interface{}{}
+
+ resi, err := upAction.Run(rel.Name, buildChart(), vals)
+ req.Error(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Contains(res.Info.Description, "I timed out")
+ is.Equal(res.Info.Status, common.StatusFailed)
+}
+
+func TestUpgradeRelease_WaitForJobs(t *testing.T) {
+ is := assert.New(t)
+ req := require.New(t)
+
+ upAction := upgradeAction(t)
+ rel := releaseStub()
+ rel.Name = "come-fail-away"
+ rel.Info.Status = common.StatusDeployed
+ require.NoError(t, upAction.cfg.Releases.Create(rel))
+
+ failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitError = fmt.Errorf("I timed out")
+ upAction.cfg.KubeClient = failer
+ upAction.WaitStrategy = kube.StatusWatcherStrategy
+ upAction.WaitForJobs = true
+ vals := map[string]interface{}{}
+
+ resi, err := upAction.Run(rel.Name, buildChart(), vals)
+ req.Error(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Contains(res.Info.Description, "I timed out")
+ is.Equal(res.Info.Status, common.StatusFailed)
+}
+
+func TestUpgradeRelease_CleanupOnFail(t *testing.T) {
+ is := assert.New(t)
+ req := require.New(t)
+
+ upAction := upgradeAction(t)
+ rel := releaseStub()
+ rel.Name = "come-fail-away"
+ rel.Info.Status = common.StatusDeployed
+ require.NoError(t, upAction.cfg.Releases.Create(rel))
+
+ failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitError = fmt.Errorf("I timed out")
+ failer.DeleteError = fmt.Errorf("I tried to delete nil")
+ upAction.cfg.KubeClient = failer
+ upAction.WaitStrategy = kube.StatusWatcherStrategy
+ upAction.CleanupOnFail = true
+ vals := map[string]interface{}{}
+
+ resi, err := upAction.Run(rel.Name, buildChart(), vals)
+ req.Error(err)
+ is.NotContains(err.Error(), "unable to cleanup resources")
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Contains(res.Info.Description, "I timed out")
+ is.Equal(res.Info.Status, common.StatusFailed)
+}
+
+func TestUpgradeRelease_RollbackOnFailure(t *testing.T) {
+ is := assert.New(t)
+ req := require.New(t)
+
+ t.Run("rollback-on-failure rollback succeeds", func(t *testing.T) {
+ upAction := upgradeAction(t)
+
+ rel := releaseStub()
+ rel.Name = "nuketown"
+ rel.Info.Status = common.StatusDeployed
+ require.NoError(t, upAction.cfg.Releases.Create(rel))
+
+ failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ // We can't make Update error because then the rollback won't work
+ failer.WatchUntilReadyError = fmt.Errorf("arming key removed")
+ upAction.cfg.KubeClient = failer
+ upAction.RollbackOnFailure = true
+ vals := map[string]interface{}{}
+
+ resi, err := upAction.Run(rel.Name, buildChart(), vals)
+ req.Error(err)
+ is.Contains(err.Error(), "arming key removed")
+ is.Contains(err.Error(), "rollback-on-failure")
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ // Now make sure it is actually upgraded
+ updatedResi, err := upAction.cfg.Releases.Get(res.Name, 3)
+ is.NoError(err)
+ updatedRes, err := releaserToV1Release(updatedResi)
+ is.NoError(err)
+ // Should have rolled back to the previous
+ is.Equal(updatedRes.Info.Status, common.StatusDeployed)
+ })
+
+ t.Run("rollback-on-failure uninstall fails", func(t *testing.T) {
+ upAction := upgradeAction(t)
+ rel := releaseStub()
+ rel.Name = "fallout"
+ rel.Info.Status = common.StatusDeployed
+ require.NoError(t, upAction.cfg.Releases.Create(rel))
+
+ failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.UpdateError = fmt.Errorf("update fail")
+ upAction.cfg.KubeClient = failer
+ upAction.RollbackOnFailure = true
+ vals := map[string]interface{}{}
+
+ _, err := upAction.Run(rel.Name, buildChart(), vals)
+ req.Error(err)
+ is.Contains(err.Error(), "update fail")
+ is.Contains(err.Error(), "an error occurred while rolling back the release")
+ })
+}
+
+func TestUpgradeRelease_ReuseValues(t *testing.T) {
+ is := assert.New(t)
+
+ t.Run("reuse values should work with values", func(t *testing.T) {
+ upAction := upgradeAction(t)
+
+ existingValues := map[string]interface{}{
+ "name": "value",
+ "maxHeapSize": "128m",
+ "replicas": 2,
+ }
+ newValues := map[string]interface{}{
+ "name": "newValue",
+ "maxHeapSize": "512m",
+ "cpu": "12m",
+ }
+ expectedValues := map[string]interface{}{
+ "name": "newValue",
+ "maxHeapSize": "512m",
+ "cpu": "12m",
+ "replicas": 2,
+ }
+
+ rel := releaseStub()
+ rel.Name = "nuketown"
+ rel.Info.Status = common.StatusDeployed
+ rel.Config = existingValues
+
+ err := upAction.cfg.Releases.Create(rel)
+ is.NoError(err)
+
+ upAction.ReuseValues = true
+ // setting newValues and upgrading
+ resi, err := upAction.Run(rel.Name, buildChart(), newValues)
+ is.NoError(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ // Now make sure it is actually upgraded
+ updatedResi, err := upAction.cfg.Releases.Get(res.Name, 2)
+ is.NoError(err)
+
+ if updatedResi == nil {
+ is.Fail("Updated Release is nil")
+ return
+ }
+ updatedRes, err := releaserToV1Release(updatedResi)
+ is.NoError(err)
+
+ is.Equal(common.StatusDeployed, updatedRes.Info.Status)
+ is.Equal(expectedValues, updatedRes.Config)
+ })
+
+ t.Run("reuse values should not install disabled charts", func(t *testing.T) {
+ upAction := upgradeAction(t)
+ chartDefaultValues := map[string]interface{}{
+ "subchart": map[string]interface{}{
+ "enabled": true,
+ },
+ }
+ dependency := chart.Dependency{
+ Name: "subchart",
+ Version: "0.1.0",
+ Repository: "http://some-repo.com",
+ Condition: "subchart.enabled",
+ }
+ sampleChart := buildChart(
+ withName("sample"),
+ withValues(chartDefaultValues),
+ withMetadataDependency(dependency),
+ )
+ now := time.Now()
+ existingValues := map[string]interface{}{
+ "subchart": map[string]interface{}{
+ "enabled": false,
+ },
+ }
+ rel := &release.Release{
+ Name: "nuketown",
+ Info: &release.Info{
+ FirstDeployed: now,
+ LastDeployed: now,
+ Status: common.StatusDeployed,
+ Description: "Named Release Stub",
+ },
+ Chart: sampleChart,
+ Config: existingValues,
+ Version: 1,
+ }
+ err := upAction.cfg.Releases.Create(rel)
+ is.NoError(err)
+
+ upAction.ReuseValues = true
+ sampleChartWithSubChart := buildChart(
+ withName(sampleChart.Name()),
+ withValues(sampleChart.Values),
+ withDependency(withName("subchart")),
+ withMetadataDependency(dependency),
+ )
+ // reusing values and upgrading
+ resi, err := upAction.Run(rel.Name, sampleChartWithSubChart, map[string]interface{}{})
+ is.NoError(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ // Now get the upgraded release
+ updatedResi, err := upAction.cfg.Releases.Get(res.Name, 2)
+ is.NoError(err)
+
+ if updatedResi == nil {
+ is.Fail("Updated Release is nil")
+ return
+ }
+ updatedRes, err := releaserToV1Release(updatedResi)
+ is.NoError(err)
+
+ is.Equal(common.StatusDeployed, updatedRes.Info.Status)
+ is.Equal(0, len(updatedRes.Chart.Dependencies()), "expected 0 dependencies")
+
+ expectedValues := map[string]interface{}{
+ "subchart": map[string]interface{}{
+ "enabled": false,
+ },
+ }
+ is.Equal(expectedValues, updatedRes.Config)
+ })
+}
+
+func TestUpgradeRelease_ResetThenReuseValues(t *testing.T) {
+ is := assert.New(t)
+
+ t.Run("reset then reuse values should work with values", func(t *testing.T) {
+ upAction := upgradeAction(t)
+
+ existingValues := map[string]interface{}{
+ "name": "value",
+ "maxHeapSize": "128m",
+ "replicas": 2,
+ }
+ newValues := map[string]interface{}{
+ "name": "newValue",
+ "maxHeapSize": "512m",
+ "cpu": "12m",
+ }
+ newChartValues := map[string]interface{}{
+ "memory": "256m",
+ }
+ expectedValues := map[string]interface{}{
+ "name": "newValue",
+ "maxHeapSize": "512m",
+ "cpu": "12m",
+ "replicas": 2,
+ }
+
+ rel := releaseStub()
+ rel.Name = "nuketown"
+ rel.Info.Status = common.StatusDeployed
+ rel.Config = existingValues
+
+ err := upAction.cfg.Releases.Create(rel)
+ is.NoError(err)
+
+ upAction.ResetThenReuseValues = true
+ // setting newValues and upgrading
+ resi, err := upAction.Run(rel.Name, buildChart(withValues(newChartValues)), newValues)
+ is.NoError(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ // Now make sure it is actually upgraded
+ updatedResi, err := upAction.cfg.Releases.Get(res.Name, 2)
+ is.NoError(err)
+
+ if updatedResi == nil {
+ is.Fail("Updated Release is nil")
+ return
+ }
+ updatedRes, err := releaserToV1Release(updatedResi)
+ is.NoError(err)
+
+ is.Equal(common.StatusDeployed, updatedRes.Info.Status)
+ is.Equal(expectedValues, updatedRes.Config)
+ is.Equal(newChartValues, updatedRes.Chart.Values)
+ })
+}
+
+func TestUpgradeRelease_Pending(t *testing.T) {
+ req := require.New(t)
+
+ upAction := upgradeAction(t)
+ rel := releaseStub()
+ rel.Name = "come-fail-away"
+ rel.Info.Status = common.StatusDeployed
+ require.NoError(t, upAction.cfg.Releases.Create(rel))
+ rel2 := releaseStub()
+ rel2.Name = "come-fail-away"
+ rel2.Info.Status = common.StatusPendingUpgrade
+ rel2.Version = 2
+ require.NoError(t, upAction.cfg.Releases.Create(rel2))
+
+ vals := map[string]interface{}{}
+
+ _, err := upAction.Run(rel.Name, buildChart(), vals)
+ req.Contains(err.Error(), "progress", err)
+}
+
+func TestUpgradeRelease_Interrupted_Wait(t *testing.T) {
+ is := assert.New(t)
+ req := require.New(t)
+
+ upAction := upgradeAction(t)
+ rel := releaseStub()
+ rel.Name = "interrupted-release"
+ rel.Info.Status = common.StatusDeployed
+ require.NoError(t, upAction.cfg.Releases.Create(rel))
+
+ failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitDuration = 10 * time.Second
+ upAction.cfg.KubeClient = failer
+ upAction.WaitStrategy = kube.StatusWatcherStrategy
+ vals := map[string]interface{}{}
+
+ ctx, cancel := context.WithCancel(t.Context())
+ time.AfterFunc(time.Second, cancel)
+
+ resi, err := upAction.RunWithContext(ctx, rel.Name, buildChart(), vals)
+
+ req.Error(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Contains(res.Info.Description, "Upgrade \"interrupted-release\" failed: context canceled")
+ is.Equal(res.Info.Status, common.StatusFailed)
+}
+
+func TestUpgradeRelease_Interrupted_RollbackOnFailure(t *testing.T) {
+
+ is := assert.New(t)
+ req := require.New(t)
+
+ upAction := upgradeAction(t)
+ rel := releaseStub()
+ rel.Name = "interrupted-release"
+ rel.Info.Status = common.StatusDeployed
+ require.NoError(t, upAction.cfg.Releases.Create(rel))
+
+ failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+ failer.WaitDuration = 5 * time.Second
+ upAction.cfg.KubeClient = failer
+ upAction.RollbackOnFailure = true
+ vals := map[string]interface{}{}
+
+ ctx, cancel := context.WithCancel(t.Context())
+ time.AfterFunc(time.Second, cancel)
+
+ resi, err := upAction.RunWithContext(ctx, rel.Name, buildChart(), vals)
+
+ req.Error(err)
+ is.Contains(err.Error(), "release interrupted-release failed, and has been rolled back due to rollback-on-failure being set: context canceled")
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ // Now make sure it is actually upgraded
+ updatedResi, err := upAction.cfg.Releases.Get(res.Name, 3)
+ is.NoError(err)
+ updatedRes, err := releaserToV1Release(updatedResi)
+ is.NoError(err)
+ // Should have rolled back to the previous
+ is.Equal(updatedRes.Info.Status, common.StatusDeployed)
+}
+
+func TestMergeCustomLabels(t *testing.T) {
+ tests := [][3]map[string]string{
+ {nil, nil, map[string]string{}},
+ {map[string]string{}, map[string]string{}, map[string]string{}},
+ {map[string]string{"k1": "v1", "k2": "v2"}, nil, map[string]string{"k1": "v1", "k2": "v2"}},
+ {nil, map[string]string{"k1": "v1", "k2": "v2"}, map[string]string{"k1": "v1", "k2": "v2"}},
+ {map[string]string{"k1": "v1", "k2": "v2"}, map[string]string{"k1": "null", "k2": "v3"}, map[string]string{"k2": "v3"}},
+ }
+ for _, test := range tests {
+ if output := mergeCustomLabels(test[0], test[1]); !reflect.DeepEqual(test[2], output) {
+ t.Errorf("Expected {%v}, got {%v}", test[2], output)
+ }
+ }
+}
+
+func TestUpgradeRelease_Labels(t *testing.T) {
+ is := assert.New(t)
+ upAction := upgradeAction(t)
+
+ rel := releaseStub()
+ rel.Name = "labels"
+ // It's needed to check that suppressed release would keep original labels
+ rel.Labels = map[string]string{
+ "key1": "val1",
+ "key2": "val2.1",
+ }
+ rel.Info.Status = common.StatusDeployed
+
+ err := upAction.cfg.Releases.Create(rel)
+ is.NoError(err)
+
+ upAction.Labels = map[string]string{
+ "key1": "null",
+ "key2": "val2.2",
+ "key3": "val3",
+ }
+ // setting newValues and upgrading
+ resi, err := upAction.Run(rel.Name, buildChart(), nil)
+ is.NoError(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+
+ // Now make sure it is actually upgraded and labels were merged
+ updatedResi, err := upAction.cfg.Releases.Get(res.Name, 2)
+ is.NoError(err)
+
+ if updatedResi == nil {
+ is.Fail("Updated Release is nil")
+ return
+ }
+ updatedRes, err := releaserToV1Release(updatedResi)
+ is.NoError(err)
+ is.Equal(common.StatusDeployed, updatedRes.Info.Status)
+ is.Equal(mergeCustomLabels(rel.Labels, upAction.Labels), updatedRes.Labels)
+
+ // Now make sure it is suppressed release still contains original labels
+ initialResi, err := upAction.cfg.Releases.Get(res.Name, 1)
+ is.NoError(err)
+
+ if initialResi == nil {
+ is.Fail("Updated Release is nil")
+ return
+ }
+ initialRes, err := releaserToV1Release(initialResi)
+ is.NoError(err)
+ is.Equal(initialRes.Info.Status, common.StatusSuperseded)
+ is.Equal(initialRes.Labels, rel.Labels)
+}
+
+func TestUpgradeRelease_SystemLabels(t *testing.T) {
+ is := assert.New(t)
+ upAction := upgradeAction(t)
+
+ rel := releaseStub()
+ rel.Name = "labels"
+ // It's needed to check that suppressed release would keep original labels
+ rel.Labels = map[string]string{
+ "key1": "val1",
+ "key2": "val2.1",
+ }
+ rel.Info.Status = common.StatusDeployed
+
+ err := upAction.cfg.Releases.Create(rel)
+ is.NoError(err)
+
+ upAction.Labels = map[string]string{
+ "key1": "null",
+ "key2": "val2.2",
+ "owner": "val3",
+ }
+ // setting newValues and upgrading
+ _, err = upAction.Run(rel.Name, buildChart(), nil)
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+
+ is.Equal(fmt.Errorf("user supplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()), err)
+}
+
+func TestUpgradeRelease_DryRun(t *testing.T) {
+ is := assert.New(t)
+ req := require.New(t)
+
+ upAction := upgradeAction(t)
+ rel := releaseStub()
+ rel.Name = "previous-release"
+ rel.Info.Status = common.StatusDeployed
+ req.NoError(upAction.cfg.Releases.Create(rel))
+
+ upAction.DryRunStrategy = DryRunClient
+ vals := map[string]interface{}{}
+
+ ctx, done := context.WithCancel(t.Context())
+ resi, err := upAction.RunWithContext(ctx, rel.Name, buildChart(withSampleSecret()), vals)
+ done()
+ req.NoError(err)
+ res, err := releaserToV1Release(resi)
+ is.NoError(err)
+ is.Equal(common.StatusPendingUpgrade, res.Info.Status)
+ is.Contains(res.Manifest, "kind: Secret")
+
+ lastReleasei, err := upAction.cfg.Releases.Last(rel.Name)
+ req.NoError(err)
+ lastRelease, err := releaserToV1Release(lastReleasei)
+ req.NoError(err)
+ is.Equal(lastRelease.Info.Status, common.StatusDeployed)
+ is.Equal(1, lastRelease.Version)
+
+ // Test the case for hiding the secret to ensure it is not displayed
+ upAction.HideSecret = true
+ vals = map[string]interface{}{}
+
+ ctx, done = context.WithCancel(t.Context())
+ resi, err = upAction.RunWithContext(ctx, rel.Name, buildChart(withSampleSecret()), vals)
+ done()
+ req.NoError(err)
+ res, err = releaserToV1Release(resi)
+ is.NoError(err)
+ is.Equal(common.StatusPendingUpgrade, res.Info.Status)
+ is.NotContains(res.Manifest, "kind: Secret")
+
+ lastReleasei, err = upAction.cfg.Releases.Last(rel.Name)
+ req.NoError(err)
+ lastRelease, err = releaserToV1Release(lastReleasei)
+ req.NoError(err)
+ is.Equal(lastRelease.Info.Status, common.StatusDeployed)
+ is.Equal(1, lastRelease.Version)
+
+ // Ensure in a dry run mode when using HideSecret
+ upAction.DryRunStrategy = DryRunNone
+ vals = map[string]interface{}{}
+
+ ctx, done = context.WithCancel(t.Context())
+ _, err = upAction.RunWithContext(ctx, rel.Name, buildChart(withSampleSecret()), vals)
+ done()
+ req.Error(err)
+}
+
+func TestGetUpgradeServerSideValue(t *testing.T) {
+ tests := []struct {
+ name string
+ actionServerSideOption string
+ releaseApplyMethod string
+ expectedServerSideApply bool
+ }{
+ {
+ name: "action ssa auto / release csa",
+ actionServerSideOption: "auto",
+ releaseApplyMethod: "csa",
+ expectedServerSideApply: false,
+ },
+ {
+ name: "action ssa auto / release ssa",
+ actionServerSideOption: "auto",
+ releaseApplyMethod: "ssa",
+ expectedServerSideApply: true,
+ },
+ {
+ name: "action ssa auto / release empty",
+ actionServerSideOption: "auto",
+ releaseApplyMethod: "",
+ expectedServerSideApply: false,
+ },
+ {
+ name: "action ssa true / release csa",
+ actionServerSideOption: "true",
+ releaseApplyMethod: "csa",
+ expectedServerSideApply: true,
+ },
+ {
+ name: "action ssa true / release ssa",
+ actionServerSideOption: "true",
+ releaseApplyMethod: "ssa",
+ expectedServerSideApply: true,
+ },
+ {
+ name: "action ssa true / release 'unknown'",
+ actionServerSideOption: "true",
+ releaseApplyMethod: "foo",
+ expectedServerSideApply: true,
+ },
+ {
+ name: "action ssa true / release empty",
+ actionServerSideOption: "true",
+ releaseApplyMethod: "",
+ expectedServerSideApply: true,
+ },
+ {
+ name: "action ssa false / release csa",
+ actionServerSideOption: "false",
+ releaseApplyMethod: "ssa",
+ expectedServerSideApply: false,
+ },
+ {
+ name: "action ssa false / release ssa",
+ actionServerSideOption: "false",
+ releaseApplyMethod: "ssa",
+ expectedServerSideApply: false,
+ },
+ {
+ name: "action ssa false / release 'unknown'",
+ actionServerSideOption: "false",
+ releaseApplyMethod: "foo",
+ expectedServerSideApply: false,
+ },
+ {
+ name: "action ssa false / release empty",
+ actionServerSideOption: "false",
+ releaseApplyMethod: "ssa",
+ expectedServerSideApply: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ serverSideApply, err := getUpgradeServerSideValue(tt.actionServerSideOption, tt.releaseApplyMethod)
+ assert.Nil(t, err)
+ assert.Equal(t, tt.expectedServerSideApply, serverSideApply)
+ })
+ }
+
+ testsError := []struct {
+ name string
+ actionServerSideOption string
+ releaseApplyMethod string
+ expectedErrorMsg string
+ }{
+ {
+ name: "action invalid option",
+ actionServerSideOption: "invalid",
+ releaseApplyMethod: "ssa",
+ expectedErrorMsg: "invalid/unknown release server-side apply method: invalid",
+ },
+ }
+
+ for _, tt := range testsError {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := getUpgradeServerSideValue(tt.actionServerSideOption, tt.releaseApplyMethod)
+ assert.ErrorContains(t, err, tt.expectedErrorMsg)
+ })
+ }
+
+}
+
+func TestUpgradeRun_UnreachableKubeClient(t *testing.T) {
+ t.Helper()
+ config := actionConfigFixture(t)
+ failingKubeClient := kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, DummyResources: nil}
+ failingKubeClient.ConnectionError = errors.New("connection refused")
+ config.KubeClient = &failingKubeClient
+
+ client := NewUpgrade(config)
+ vals := map[string]interface{}{}
+ result, err := client.Run("", buildChart(), vals)
+
+ assert.Nil(t, result)
+ assert.ErrorContains(t, err, "connection refused")
+}
+
+func TestUpgradeSetRegistryClient(t *testing.T) {
+ config := actionConfigFixture(t)
+ client := NewUpgrade(config)
+
+ registryClient := ®istry.Client{}
+ client.SetRegistryClient(registryClient)
+ assert.Equal(t, registryClient, client.registryClient)
+}
+
+func TestObjectKey(t *testing.T) {
+ obj := &appsv1.Deployment{}
+ obj.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"})
+ info := resource.Info{Name: "name", Namespace: "namespace", Object: obj}
+
+ assert.Equal(t, "apps/v1/Deployment/namespace/name", objectKey(&info))
+}
+
+func TestUpgradeRelease_WaitOptionsPassedDownstream(t *testing.T) {
+ is := assert.New(t)
+ req := require.New(t)
+
+ upAction := upgradeAction(t)
+ rel := releaseStub()
+ rel.Name = "wait-options-test"
+ rel.Info.Status = common.StatusDeployed
+ req.NoError(upAction.cfg.Releases.Create(rel))
+
+ upAction.WaitStrategy = kube.StatusWatcherStrategy
+
+ // Use WithWaitContext as a marker WaitOption that we can track
+ ctx := context.Background()
+ upAction.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)}
+
+ // Access the underlying FailingKubeClient to check recorded options
+ failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
+
+ vals := map[string]interface{}{}
+ _, err := upAction.Run(rel.Name, buildChart(), vals)
+ req.NoError(err)
+
+ // Verify that WaitOptions were passed to GetWaiter
+ is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter")
+}
diff --git a/helm/pkg/action/validate.go b/helm/pkg/action/validate.go
new file mode 100644
index 000000000..1bef5a742
--- /dev/null
+++ b/helm/pkg/action/validate.go
@@ -0,0 +1,203 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "fmt"
+ "maps"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/cli-runtime/pkg/resource"
+
+ "helm.sh/helm/v4/pkg/kube"
+)
+
+var accessor = meta.NewAccessor()
+
+const (
+ appManagedByLabel = "app.kubernetes.io/managed-by"
+ appManagedByHelm = "Helm"
+ helmReleaseNameAnnotation = "meta.helm.sh/release-name"
+ helmReleaseNamespaceAnnotation = "meta.helm.sh/release-namespace"
+)
+
+// requireAdoption returns the subset of resources that already exist in the cluster.
+func requireAdoption(resources kube.ResourceList) (kube.ResourceList, error) {
+ var requireUpdate kube.ResourceList
+
+ err := resources.Visit(func(info *resource.Info, err error) error {
+ if err != nil {
+ return err
+ }
+
+ helper := resource.NewHelper(info.Client, info.Mapping)
+ _, err = helper.Get(info.Namespace, info.Name)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return nil
+ }
+ return fmt.Errorf("could not get information about the resource %s: %w", resourceString(info), err)
+ }
+
+ infoCopy := *info
+ requireUpdate.Append(&infoCopy)
+ return nil
+ })
+
+ return requireUpdate, err
+}
+
+func existingResourceConflict(resources kube.ResourceList, releaseName, releaseNamespace string) (kube.ResourceList, error) {
+ var requireUpdate kube.ResourceList
+
+ err := resources.Visit(func(info *resource.Info, err error) error {
+ if err != nil {
+ return err
+ }
+
+ helper := resource.NewHelper(info.Client, info.Mapping)
+ existing, err := helper.Get(info.Namespace, info.Name)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return nil
+ }
+ return fmt.Errorf("could not get information about the resource %s: %w", resourceString(info), err)
+ }
+
+ // Allow adoption of the resource if it is managed by Helm and is annotated with correct release name and namespace.
+ if err := checkOwnership(existing, releaseName, releaseNamespace); err != nil {
+ return fmt.Errorf("%s exists and cannot be imported into the current release: %s", resourceString(info), err)
+ }
+
+ infoCopy := *info
+ requireUpdate.Append(&infoCopy)
+ return nil
+ })
+
+ return requireUpdate, err
+}
+
+func checkOwnership(obj runtime.Object, releaseName, releaseNamespace string) error {
+ lbls, err := accessor.Labels(obj)
+ if err != nil {
+ return err
+ }
+ annos, err := accessor.Annotations(obj)
+ if err != nil {
+ return err
+ }
+
+ var errs []error
+ if err := requireValue(lbls, appManagedByLabel, appManagedByHelm); err != nil {
+ errs = append(errs, fmt.Errorf("label validation error: %s", err))
+ }
+ if err := requireValue(annos, helmReleaseNameAnnotation, releaseName); err != nil {
+ errs = append(errs, fmt.Errorf("annotation validation error: %s", err))
+ }
+ if err := requireValue(annos, helmReleaseNamespaceAnnotation, releaseNamespace); err != nil {
+ errs = append(errs, fmt.Errorf("annotation validation error: %s", err))
+ }
+
+ if len(errs) > 0 {
+ return fmt.Errorf("invalid ownership metadata; %w", joinErrors(errs, "; "))
+ }
+
+ return nil
+}
+
+func requireValue(meta map[string]string, k, v string) error {
+ actual, ok := meta[k]
+ if !ok {
+ return fmt.Errorf("missing key %q: must be set to %q", k, v)
+ }
+ if actual != v {
+ return fmt.Errorf("key %q must equal %q: current value is %q", k, v, actual)
+ }
+ return nil
+}
+
+// setMetadataVisitor adds release tracking metadata to all resources. If forceOwnership is enabled, existing
+// ownership metadata will be overwritten. Otherwise an error will be returned if any resource has an
+// existing and conflicting value for the managed by label or Helm release/namespace annotations.
+func setMetadataVisitor(releaseName, releaseNamespace string, forceOwnership bool) resource.VisitorFunc {
+ return func(info *resource.Info, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if !forceOwnership {
+ if err := checkOwnership(info.Object, releaseName, releaseNamespace); err != nil {
+ return fmt.Errorf("%s cannot be owned: %s", resourceString(info), err)
+ }
+ }
+
+ if err := mergeLabels(info.Object, map[string]string{
+ appManagedByLabel: appManagedByHelm,
+ }); err != nil {
+ return fmt.Errorf(
+ "%s labels could not be updated: %s",
+ resourceString(info), err,
+ )
+ }
+
+ if err := mergeAnnotations(info.Object, map[string]string{
+ helmReleaseNameAnnotation: releaseName,
+ helmReleaseNamespaceAnnotation: releaseNamespace,
+ }); err != nil {
+ return fmt.Errorf(
+ "%s annotations could not be updated: %s",
+ resourceString(info), err,
+ )
+ }
+
+ return nil
+ }
+}
+
+func resourceString(info *resource.Info) string {
+ _, k := info.Mapping.GroupVersionKind.ToAPIVersionAndKind()
+ return fmt.Sprintf(
+ "%s %q in namespace %q",
+ k, info.Name, info.Namespace,
+ )
+}
+
+func mergeLabels(obj runtime.Object, labels map[string]string) error {
+ current, err := accessor.Labels(obj)
+ if err != nil {
+ return err
+ }
+ return accessor.SetLabels(obj, mergeStrStrMaps(current, labels))
+}
+
+func mergeAnnotations(obj runtime.Object, annotations map[string]string) error {
+ current, err := accessor.Annotations(obj)
+ if err != nil {
+ return err
+ }
+ return accessor.SetAnnotations(obj, mergeStrStrMaps(current, annotations))
+}
+
+// merge two maps, always taking the value on the right
+func mergeStrStrMaps(current, desired map[string]string) map[string]string {
+ result := make(map[string]string)
+ maps.Copy(result, current)
+ maps.Copy(result, desired)
+ return result
+}
diff --git a/helm/pkg/action/validate_test.go b/helm/pkg/action/validate_test.go
new file mode 100644
index 000000000..879a5fa4f
--- /dev/null
+++ b/helm/pkg/action/validate_test.go
@@ -0,0 +1,242 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "bytes"
+ "io"
+ "net/http"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/kube"
+
+ "github.com/stretchr/testify/assert"
+
+ appsv1 "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest/fake"
+)
+
+func newDeploymentResource(name, namespace string) *resource.Info {
+ return &resource.Info{
+ Name: name,
+ Mapping: &meta.RESTMapping{
+ Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"},
+ GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"},
+ },
+ Object: &appsv1.Deployment{
+ ObjectMeta: v1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ },
+ }
+}
+
+func newMissingDeployment(name, namespace string) *resource.Info {
+ info := &resource.Info{
+ Name: name,
+ Namespace: namespace,
+ Mapping: &meta.RESTMapping{
+ Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"},
+ GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"},
+ Scope: meta.RESTScopeNamespace,
+ },
+ Object: &appsv1.Deployment{
+ ObjectMeta: v1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ },
+ Client: fakeClientWith(http.StatusNotFound, appsV1GV, ""),
+ }
+
+ return info
+}
+
+func newDeploymentWithOwner(name, namespace string, labels map[string]string, annotations map[string]string) *resource.Info {
+ obj := &appsv1.Deployment{
+ ObjectMeta: v1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ Labels: labels,
+ Annotations: annotations,
+ },
+ }
+ return &resource.Info{
+ Name: name,
+ Namespace: namespace,
+ Mapping: &meta.RESTMapping{
+ Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"},
+ GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"},
+ Scope: meta.RESTScopeNamespace,
+ },
+ Object: obj,
+ Client: fakeClientWith(http.StatusOK, appsV1GV, runtime.EncodeOrDie(appsv1Codec, obj)),
+ }
+}
+
+var (
+ appsV1GV = schema.GroupVersion{Group: "apps", Version: "v1"}
+ appsv1Codec = scheme.Codecs.CodecForVersions(scheme.Codecs.LegacyCodec(appsV1GV), scheme.Codecs.UniversalDecoder(appsV1GV), appsV1GV, appsV1GV)
+)
+
+func stringBody(body string) io.ReadCloser {
+ return io.NopCloser(bytes.NewReader([]byte(body)))
+}
+
+func fakeClientWith(code int, gv schema.GroupVersion, body string) *fake.RESTClient {
+ return &fake.RESTClient{
+ GroupVersion: gv,
+ NegotiatedSerializer: scheme.Codecs.WithoutConversion(),
+ Client: fake.CreateHTTPClient(func(_ *http.Request) (*http.Response, error) {
+ header := http.Header{}
+ header.Set("Content-Type", runtime.ContentTypeJSON)
+ return &http.Response{
+ StatusCode: code,
+ Header: header,
+ Body: stringBody(body),
+ }, nil
+ }),
+ }
+}
+
+func TestRequireAdoption(t *testing.T) {
+ var (
+ missing = newMissingDeployment("missing", "ns-a")
+ existing = newDeploymentWithOwner("existing", "ns-a", nil, nil)
+ resources = kube.ResourceList{missing, existing}
+ )
+
+ // Verify that a resource that lacks labels/annotations can be adopted
+ found, err := requireAdoption(resources)
+ assert.NoError(t, err)
+ assert.Len(t, found, 1)
+ assert.Equal(t, found[0], existing)
+ assert.NotSame(t, found[0], existing)
+}
+
+func TestExistingResourceConflict(t *testing.T) {
+ var (
+ releaseName = "rel-name"
+ releaseNamespace = "rel-namespace"
+ labels = map[string]string{
+ appManagedByLabel: appManagedByHelm,
+ }
+ annotations = map[string]string{
+ helmReleaseNameAnnotation: releaseName,
+ helmReleaseNamespaceAnnotation: releaseNamespace,
+ }
+ missing = newMissingDeployment("missing", "ns-a")
+ existing = newDeploymentWithOwner("existing", "ns-a", labels, annotations)
+ conflict = newDeploymentWithOwner("conflict", "ns-a", nil, nil)
+ resources = kube.ResourceList{missing, existing}
+ )
+
+ // Verify only existing resources are returned
+ found, err := existingResourceConflict(resources, releaseName, releaseNamespace)
+ assert.NoError(t, err)
+ assert.Len(t, found, 1)
+ assert.Equal(t, found[0], existing)
+ assert.NotSame(t, found[0], existing)
+
+ // Verify that an existing resource that lacks labels/annotations results in an error
+ resources = append(resources, conflict)
+ _, err = existingResourceConflict(resources, releaseName, releaseNamespace)
+ assert.Error(t, err)
+}
+
+func TestCheckOwnership(t *testing.T) {
+ deployFoo := newDeploymentResource("foo", "ns-a")
+
+ // Verify that a resource that lacks labels/annotations is not owned
+ err := checkOwnership(deployFoo.Object, "rel-a", "ns-a")
+ assert.EqualError(t, err, `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"; annotation validation error: missing key "meta.helm.sh/release-name": must be set to "rel-a"; annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "ns-a"`)
+
+ // Set managed by label and verify annotation error message
+ _ = accessor.SetLabels(deployFoo.Object, map[string]string{
+ appManagedByLabel: appManagedByHelm,
+ })
+ err = checkOwnership(deployFoo.Object, "rel-a", "ns-a")
+ assert.EqualError(t, err, `invalid ownership metadata; annotation validation error: missing key "meta.helm.sh/release-name": must be set to "rel-a"; annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "ns-a"`)
+
+ // Set only the release name annotation and verify missing release namespace error message
+ _ = accessor.SetAnnotations(deployFoo.Object, map[string]string{
+ helmReleaseNameAnnotation: "rel-a",
+ })
+ err = checkOwnership(deployFoo.Object, "rel-a", "ns-a")
+ assert.EqualError(t, err, `invalid ownership metadata; annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "ns-a"`)
+
+ // Set both release name and namespace annotations and verify no ownership errors
+ _ = accessor.SetAnnotations(deployFoo.Object, map[string]string{
+ helmReleaseNameAnnotation: "rel-a",
+ helmReleaseNamespaceAnnotation: "ns-a",
+ })
+ err = checkOwnership(deployFoo.Object, "rel-a", "ns-a")
+ assert.NoError(t, err)
+
+ // Verify ownership error for wrong release name
+ err = checkOwnership(deployFoo.Object, "rel-b", "ns-a")
+ assert.EqualError(t, err, `invalid ownership metadata; annotation validation error: key "meta.helm.sh/release-name" must equal "rel-b": current value is "rel-a"`)
+
+ // Verify ownership error for wrong release namespace
+ err = checkOwnership(deployFoo.Object, "rel-a", "ns-b")
+ assert.EqualError(t, err, `invalid ownership metadata; annotation validation error: key "meta.helm.sh/release-namespace" must equal "ns-b": current value is "ns-a"`)
+
+ // Verify ownership error for wrong manager label
+ _ = accessor.SetLabels(deployFoo.Object, map[string]string{
+ appManagedByLabel: "helm",
+ })
+ err = checkOwnership(deployFoo.Object, "rel-a", "ns-a")
+ assert.EqualError(t, err, `invalid ownership metadata; label validation error: key "app.kubernetes.io/managed-by" must equal "Helm": current value is "helm"`)
+}
+
+func TestSetMetadataVisitor(t *testing.T) {
+ var (
+ err error
+ deployFoo = newDeploymentResource("foo", "ns-a")
+ deployBar = newDeploymentResource("bar", "ns-a-system")
+ resources = kube.ResourceList{deployFoo, deployBar}
+ )
+
+ // Set release tracking metadata and verify no error
+ err = resources.Visit(setMetadataVisitor("rel-a", "ns-a", true))
+ assert.NoError(t, err)
+
+ // Verify that release "b" cannot take ownership of "a"
+ err = resources.Visit(setMetadataVisitor("rel-b", "ns-a", false))
+ assert.Error(t, err)
+
+ // Force release "b" to take ownership
+ err = resources.Visit(setMetadataVisitor("rel-b", "ns-a", true))
+ assert.NoError(t, err)
+
+ // Check that there is now no ownership error when setting metadata without force
+ err = resources.Visit(setMetadataVisitor("rel-b", "ns-a", false))
+ assert.NoError(t, err)
+
+ // Add a new resource that is missing ownership metadata and verify error
+ resources.Append(newDeploymentResource("baz", "default"))
+ err = resources.Visit(setMetadataVisitor("rel-b", "ns-a", false))
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), `Deployment "baz" in namespace "" cannot be owned`)
+}
diff --git a/helm/pkg/action/verify.go b/helm/pkg/action/verify.go
new file mode 100644
index 000000000..6e4562f61
--- /dev/null
+++ b/helm/pkg/action/verify.go
@@ -0,0 +1,53 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "fmt"
+ "strings"
+
+ "helm.sh/helm/v4/pkg/downloader"
+)
+
+// Verify is the action for building a given chart's Verify tree.
+//
+// It provides the implementation of 'helm verify'.
+type Verify struct {
+ Keyring string
+}
+
+// NewVerify creates a new Verify object with the given configuration.
+func NewVerify() *Verify {
+ return &Verify{}
+}
+
+// Run executes 'helm verify'.
+func (v *Verify) Run(chartfile string) (string, error) {
+ var out strings.Builder
+ p, err := downloader.VerifyChart(chartfile, chartfile+".prov", v.Keyring)
+ if err != nil {
+ return "", err
+ }
+
+ for name := range p.SignedBy.Identities {
+ _, _ = fmt.Fprintf(&out, "Signed by: %v\n", name)
+ }
+ _, _ = fmt.Fprintf(&out, "Using Key With Fingerprint: %X\n", p.SignedBy.PrimaryKey.Fingerprint)
+ _, _ = fmt.Fprintf(&out, "Chart Hash Verified: %s\n", p.FileHash)
+
+ return out.String(), err
+}
diff --git a/helm/pkg/action/verify_test.go b/helm/pkg/action/verify_test.go
new file mode 100644
index 000000000..343dacaef
--- /dev/null
+++ b/helm/pkg/action/verify_test.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewVerify(t *testing.T) {
+ client := NewVerify()
+
+ assert.NotNil(t, client)
+}
+
+func TestVerifyRun(t *testing.T) {
+ client := NewVerify()
+
+ client.Keyring = "../downloader/testdata/helm-test-key.pub"
+ output, err := client.Run("../downloader/testdata/signtest-0.1.0.tgz")
+ assert.Contains(t, output, "Signed by:")
+ assert.Contains(t, output, "Using Key With Fingerprint:")
+ assert.Contains(t, output, "Chart Hash Verified:")
+ require.NoError(t, err)
+}
+
+func TestVerifyRun_DownloadError(t *testing.T) {
+ client := NewVerify()
+ output, err := client.Run("invalid-chart-path")
+ require.Error(t, err)
+ assert.Empty(t, output)
+}
diff --git a/helm/pkg/chart/common.go b/helm/pkg/chart/common.go
new file mode 100644
index 000000000..cd87e91e7
--- /dev/null
+++ b/helm/pkg/chart/common.go
@@ -0,0 +1,243 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chart
+
+import (
+ "errors"
+ "fmt"
+ "log/slog"
+ "reflect"
+ "strings"
+
+ v3chart "helm.sh/helm/v4/internal/chart/v3"
+ common "helm.sh/helm/v4/pkg/chart/common"
+ v2chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+var NewAccessor func(chrt Charter) (Accessor, error) = NewDefaultAccessor //nolint:revive
+
+func NewDefaultAccessor(chrt Charter) (Accessor, error) {
+ switch v := chrt.(type) {
+ case v2chart.Chart:
+ return &v2Accessor{&v}, nil
+ case *v2chart.Chart:
+ return &v2Accessor{v}, nil
+ case v3chart.Chart:
+ return &v3Accessor{&v}, nil
+ case *v3chart.Chart:
+ return &v3Accessor{v}, nil
+ default:
+ return nil, errors.New("unsupported chart type")
+ }
+}
+
+type v2Accessor struct {
+ chrt *v2chart.Chart
+}
+
+func (r *v2Accessor) Name() string {
+ return r.chrt.Metadata.Name
+}
+
+func (r *v2Accessor) IsRoot() bool {
+ return r.chrt.IsRoot()
+}
+
+func (r *v2Accessor) MetadataAsMap() map[string]interface{} {
+ var ret map[string]interface{}
+ if r.chrt.Metadata == nil {
+ return ret
+ }
+
+ ret, err := structToMap(r.chrt.Metadata)
+ if err != nil {
+ slog.Error("error converting metadata to map", "error", err)
+ }
+ return ret
+}
+
+func (r *v2Accessor) Files() []*common.File {
+ return r.chrt.Files
+}
+
+func (r *v2Accessor) Templates() []*common.File {
+ return r.chrt.Templates
+}
+
+func (r *v2Accessor) ChartFullPath() string {
+ return r.chrt.ChartFullPath()
+}
+
+func (r *v2Accessor) IsLibraryChart() bool {
+ return strings.EqualFold(r.chrt.Metadata.Type, "library")
+}
+
+func (r *v2Accessor) Dependencies() []Charter {
+ var deps = make([]Charter, len(r.chrt.Dependencies()))
+ for i, c := range r.chrt.Dependencies() {
+ deps[i] = c
+ }
+ return deps
+}
+
+func (r *v2Accessor) MetaDependencies() []Dependency {
+ var deps = make([]Dependency, len(r.chrt.Metadata.Dependencies))
+ for i, c := range r.chrt.Metadata.Dependencies {
+ deps[i] = c
+ }
+ return deps
+}
+
+func (r *v2Accessor) Values() map[string]interface{} {
+ return r.chrt.Values
+}
+
+func (r *v2Accessor) Schema() []byte {
+ return r.chrt.Schema
+}
+
+func (r *v2Accessor) Deprecated() bool {
+ return r.chrt.Metadata.Deprecated
+}
+
+type v3Accessor struct {
+ chrt *v3chart.Chart
+}
+
+func (r *v3Accessor) Name() string {
+ return r.chrt.Metadata.Name
+}
+
+func (r *v3Accessor) IsRoot() bool {
+ return r.chrt.IsRoot()
+}
+
+func (r *v3Accessor) MetadataAsMap() map[string]interface{} {
+ var ret map[string]interface{}
+ if r.chrt.Metadata == nil {
+ return ret
+ }
+
+ ret, err := structToMap(r.chrt.Metadata)
+ if err != nil {
+ slog.Error("error converting metadata to map", "error", err)
+ }
+ return ret
+}
+
+func (r *v3Accessor) Files() []*common.File {
+ return r.chrt.Files
+}
+
+func (r *v3Accessor) Templates() []*common.File {
+ return r.chrt.Templates
+}
+
+func (r *v3Accessor) ChartFullPath() string {
+ return r.chrt.ChartFullPath()
+}
+
+func (r *v3Accessor) IsLibraryChart() bool {
+ return strings.EqualFold(r.chrt.Metadata.Type, "library")
+}
+
+func (r *v3Accessor) Dependencies() []Charter {
+ var deps = make([]Charter, len(r.chrt.Dependencies()))
+ for i, c := range r.chrt.Dependencies() {
+ deps[i] = c
+ }
+ return deps
+}
+
+func (r *v3Accessor) MetaDependencies() []Dependency {
+ var deps = make([]Dependency, len(r.chrt.Dependencies()))
+ for i, c := range r.chrt.Metadata.Dependencies {
+ deps[i] = c
+ }
+ return deps
+}
+
+func (r *v3Accessor) Values() map[string]interface{} {
+ return r.chrt.Values
+}
+
+func (r *v3Accessor) Schema() []byte {
+ return r.chrt.Schema
+}
+
+func (r *v3Accessor) Deprecated() bool {
+ return r.chrt.Metadata.Deprecated
+}
+
+func structToMap(obj interface{}) (map[string]interface{}, error) {
+ objValue := reflect.ValueOf(obj)
+
+ // If the value is a pointer, dereference it
+ if objValue.Kind() == reflect.Pointer {
+ objValue = objValue.Elem()
+ }
+
+ // Check if the input is a struct
+ if objValue.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("input must be a struct or a pointer to a struct")
+ }
+
+ result := make(map[string]interface{})
+ objType := objValue.Type()
+
+ for i := 0; i < objValue.NumField(); i++ {
+ field := objType.Field(i)
+ value := objValue.Field(i)
+
+ switch value.Kind() {
+ case reflect.Struct:
+ nestedMap, err := structToMap(value.Interface())
+ if err != nil {
+ return nil, err
+ }
+ result[field.Name] = nestedMap
+ case reflect.Pointer:
+ // Recurse for pointers by dereferencing
+ if value.IsNil() {
+ result[field.Name] = nil
+ } else {
+ nestedMap, err := structToMap(value.Interface())
+ if err != nil {
+ return nil, err
+ }
+ result[field.Name] = nestedMap
+ }
+ case reflect.Slice:
+ sliceOfMaps := make([]interface{}, value.Len())
+ for j := 0; j < value.Len(); j++ {
+ sliceElement := value.Index(j)
+ if sliceElement.Kind() == reflect.Struct || sliceElement.Kind() == reflect.Pointer {
+ nestedMap, err := structToMap(sliceElement.Interface())
+ if err != nil {
+ return nil, err
+ }
+ sliceOfMaps[j] = nestedMap
+ } else {
+ sliceOfMaps[j] = sliceElement.Interface()
+ }
+ }
+ result[field.Name] = sliceOfMaps
+ default:
+ result[field.Name] = value.Interface()
+ }
+ }
+ return result, nil
+}
diff --git a/helm/pkg/chart/common/capabilities.go b/helm/pkg/chart/common/capabilities.go
new file mode 100644
index 000000000..18d00de90
--- /dev/null
+++ b/helm/pkg/chart/common/capabilities.go
@@ -0,0 +1,182 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "fmt"
+ "slices"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/Masterminds/semver/v3"
+ "k8s.io/client-go/kubernetes/scheme"
+
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ k8sversion "k8s.io/apimachinery/pkg/util/version"
+
+ helmversion "helm.sh/helm/v4/internal/version"
+)
+
+const (
+ kubeVersionMajorTesting = 1
+ kubeVersionMinorTesting = 20
+)
+
+var (
+ // DefaultVersionSet is the default version set, which includes only Core V1 ("v1").
+ DefaultVersionSet = allKnownVersions()
+
+ DefaultCapabilities = func() *Capabilities {
+ caps, err := makeDefaultCapabilities()
+ if err != nil {
+ panic(fmt.Sprintf("failed to create default capabilities: %v", err))
+ }
+ return caps
+
+ }()
+)
+
+// Capabilities describes the capabilities of the Kubernetes cluster.
+type Capabilities struct {
+ // KubeVersion is the Kubernetes version.
+ KubeVersion KubeVersion
+ // APIVersions are supported Kubernetes API versions.
+ APIVersions VersionSet
+ // HelmVersion is the build information for this helm version
+ HelmVersion helmversion.BuildInfo
+}
+
+func (capabilities *Capabilities) Copy() *Capabilities {
+ return &Capabilities{
+ KubeVersion: capabilities.KubeVersion,
+ APIVersions: capabilities.APIVersions,
+ HelmVersion: capabilities.HelmVersion,
+ }
+}
+
+// KubeVersion is the Kubernetes version.
+type KubeVersion struct {
+ Version string // Full version (e.g., v1.33.4-gke.1245000)
+ normalizedVersion string // Normalized for constraint checking (e.g., v1.33.4)
+ Major string // Kubernetes major version
+ Minor string // Kubernetes minor version
+}
+
+// String implements fmt.Stringer.
+// Returns the normalized version used for constraint checking.
+func (kv *KubeVersion) String() string {
+ if kv.normalizedVersion != "" {
+ return kv.normalizedVersion
+ }
+ return kv.Version
+}
+
+// GitVersion returns the full Kubernetes version string.
+//
+// Deprecated: use KubeVersion.Version.
+func (kv *KubeVersion) GitVersion() string { return kv.Version }
+
+// ParseKubeVersion parses kubernetes version from string
+func ParseKubeVersion(version string) (*KubeVersion, error) {
+ // Based on the original k8s version parser.
+ // https://github.com/kubernetes/kubernetes/blob/b266ac2c3e42c2c4843f81e20213d2b2f43e450a/staging/src/k8s.io/apimachinery/pkg/util/version/version.go#L137
+ sv, err := k8sversion.ParseGeneric(version)
+ if err != nil {
+ return nil, err
+ }
+
+ // Preserve original input (e.g., v1.33.4-gke.1245000)
+ gitVersion := version
+ if !strings.HasPrefix(version, "v") {
+ gitVersion = "v" + version
+ }
+
+ // Normalize for constraint checking (strips all suffixes)
+ normalizedVer := "v" + sv.String()
+
+ return &KubeVersion{
+ Version: gitVersion,
+ normalizedVersion: normalizedVer,
+ Major: strconv.FormatUint(uint64(sv.Major()), 10),
+ Minor: strconv.FormatUint(uint64(sv.Minor()), 10),
+ }, nil
+}
+
+// VersionSet is a set of Kubernetes API versions.
+type VersionSet []string
+
+// Has returns true if the version string is in the set.
+//
+// vs.Has("apps/v1")
+func (v VersionSet) Has(apiVersion string) bool {
+ return slices.Contains(v, apiVersion)
+}
+
+func allKnownVersions() VersionSet {
+ // We should register the built in extension APIs as well so CRDs are
+ // supported in the default version set. This has caused problems with `helm
+ // template` in the past, so let's be safe
+ apiextensionsv1beta1.AddToScheme(scheme.Scheme)
+ apiextensionsv1.AddToScheme(scheme.Scheme)
+
+ groups := scheme.Scheme.PrioritizedVersionsAllGroups()
+ vs := make(VersionSet, 0, len(groups))
+ for _, gv := range groups {
+ vs = append(vs, gv.String())
+ }
+ return vs
+}
+
+func makeDefaultCapabilities() (*Capabilities, error) {
+ // Test builds don't include debug info / module info
+ // (And even if they did, we probably want stable capabilities for tests anyway)
+ // Return a default value for test builds
+ if testing.Testing() {
+ return newCapabilities(kubeVersionMajorTesting, kubeVersionMinorTesting)
+ }
+
+ vstr, err := helmversion.K8sIOClientGoModVersion()
+ if err != nil {
+ return nil, fmt.Errorf("failed to retrieve k8s.io/client-go version: %w", err)
+ }
+
+ v, err := semver.NewVersion(vstr)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse k8s.io/client-go version %q: %v", vstr, err)
+ }
+
+ kubeVersionMajor := v.Major() + 1
+ kubeVersionMinor := v.Minor()
+
+ return newCapabilities(kubeVersionMajor, kubeVersionMinor)
+}
+
+func newCapabilities(kubeVersionMajor, kubeVersionMinor uint64) (*Capabilities, error) {
+
+ version := fmt.Sprintf("v%d.%d.0", kubeVersionMajor, kubeVersionMinor)
+ return &Capabilities{
+ KubeVersion: KubeVersion{
+ Version: version,
+ normalizedVersion: version,
+ Major: fmt.Sprintf("%d", kubeVersionMajor),
+ Minor: fmt.Sprintf("%d", kubeVersionMinor),
+ },
+ APIVersions: DefaultVersionSet,
+ HelmVersion: helmversion.Get(),
+ }, nil
+}
diff --git a/helm/pkg/chart/common/capabilities_test.go b/helm/pkg/chart/common/capabilities_test.go
new file mode 100644
index 000000000..b96d7d29b
--- /dev/null
+++ b/helm/pkg/chart/common/capabilities_test.go
@@ -0,0 +1,121 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "testing"
+)
+
+func TestVersionSet(t *testing.T) {
+ vs := VersionSet{"v1", "apps/v1"}
+ if d := len(vs); d != 2 {
+ t.Errorf("Expected 2 versions, got %d", d)
+ }
+
+ if !vs.Has("apps/v1") {
+ t.Error("Expected to find apps/v1")
+ }
+
+ if vs.Has("Spanish/inquisition") {
+ t.Error("No one expects the Spanish/inquisition")
+ }
+}
+
+func TestDefaultVersionSet(t *testing.T) {
+ if !DefaultVersionSet.Has("v1") {
+ t.Error("Expected core v1 version set")
+ }
+}
+
+func TestDefaultCapabilities(t *testing.T) {
+ caps := DefaultCapabilities
+ kv := caps.KubeVersion
+ if kv.String() != "v1.20.0" {
+ t.Errorf("Expected default KubeVersion.String() to be v1.20.0, got %q", kv.String())
+ }
+ if kv.Version != "v1.20.0" {
+ t.Errorf("Expected default KubeVersion.Version to be v1.20.0, got %q", kv.Version)
+ }
+ if kv.GitVersion() != "v1.20.0" {
+ t.Errorf("Expected default KubeVersion.GitVersion() to be v1.20.0, got %q", kv.Version)
+ }
+ if kv.Major != "1" {
+ t.Errorf("Expected default KubeVersion.Major to be 1, got %q", kv.Major)
+ }
+ if kv.Minor != "20" {
+ t.Errorf("Expected default KubeVersion.Minor to be 20, got %q", kv.Minor)
+ }
+
+ hv := caps.HelmVersion
+ if hv.Version != "v4.1" {
+ t.Errorf("Expected default HelmVersion to be v4.1, got %q", hv.Version)
+ }
+}
+
+func TestParseKubeVersion(t *testing.T) {
+ kv, err := ParseKubeVersion("v1.16.0")
+ if err != nil {
+ t.Errorf("Expected v1.16.0 to parse successfully")
+ }
+ if kv.Version != "v1.16.0" {
+ t.Errorf("Expected parsed KubeVersion.Version to be v1.16.0, got %q", kv.String())
+ }
+ if kv.Major != "1" {
+ t.Errorf("Expected parsed KubeVersion.Major to be 1, got %q", kv.Major)
+ }
+ if kv.Minor != "16" {
+ t.Errorf("Expected parsed KubeVersion.Minor to be 16, got %q", kv.Minor)
+ }
+}
+
+func TestParseKubeVersionWithVendorSuffixes(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ wantVer string
+ wantString string
+ wantMajor string
+ wantMinor string
+ }{
+ {"GKE vendor suffix", "v1.33.4-gke.1245000", "v1.33.4-gke.1245000", "v1.33.4", "1", "33"},
+ {"GKE without v", "1.30.2-gke.1587003", "v1.30.2-gke.1587003", "v1.30.2", "1", "30"},
+ {"EKS trailing +", "v1.28+", "v1.28+", "v1.28", "1", "28"},
+ {"EKS + without v", "1.28+", "v1.28+", "v1.28", "1", "28"},
+ {"Standard version", "v1.31.0", "v1.31.0", "v1.31.0", "1", "31"},
+ {"Standard without v", "1.29.0", "v1.29.0", "v1.29.0", "1", "29"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ kv, err := ParseKubeVersion(tt.input)
+ if err != nil {
+ t.Fatalf("ParseKubeVersion() error = %v", err)
+ }
+ if kv.Version != tt.wantVer {
+ t.Errorf("Version = %q, want %q", kv.Version, tt.wantVer)
+ }
+ if kv.String() != tt.wantString {
+ t.Errorf("String() = %q, want %q", kv.String(), tt.wantString)
+ }
+ if kv.Major != tt.wantMajor {
+ t.Errorf("Major = %q, want %q", kv.Major, tt.wantMajor)
+ }
+ if kv.Minor != tt.wantMinor {
+ t.Errorf("Minor = %q, want %q", kv.Minor, tt.wantMinor)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/chart/common/errors.go b/helm/pkg/chart/common/errors.go
new file mode 100644
index 000000000..b0a2d650e
--- /dev/null
+++ b/helm/pkg/chart/common/errors.go
@@ -0,0 +1,43 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "fmt"
+)
+
+// ErrNoTable indicates that a chart does not have a matching table.
+type ErrNoTable struct {
+ Key string
+}
+
+func (e ErrNoTable) Error() string { return fmt.Sprintf("%q is not a table", e.Key) }
+
+// ErrNoValue indicates that Values does not contain a key with a value
+type ErrNoValue struct {
+ Key string
+}
+
+func (e ErrNoValue) Error() string { return fmt.Sprintf("%q is not a value", e.Key) }
+
+type ErrInvalidChartName struct {
+ Name string
+}
+
+func (e ErrInvalidChartName) Error() string {
+ return fmt.Sprintf("%q is not a valid chart name", e.Name)
+}
diff --git a/helm/pkg/chart/common/errors_test.go b/helm/pkg/chart/common/errors_test.go
new file mode 100644
index 000000000..06b3b054c
--- /dev/null
+++ b/helm/pkg/chart/common/errors_test.go
@@ -0,0 +1,37 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "testing"
+)
+
+func TestErrorNoTableDoesNotPanic(t *testing.T) {
+ x := "empty"
+
+ y := ErrNoTable{x}
+
+ t.Logf("error is: %s", y)
+}
+
+func TestErrorNoValueDoesNotPanic(t *testing.T) {
+ x := "empty"
+
+ y := ErrNoValue{x}
+
+ t.Logf("error is: %s", y)
+}
diff --git a/helm/pkg/chart/common/file.go b/helm/pkg/chart/common/file.go
new file mode 100644
index 000000000..1068bf450
--- /dev/null
+++ b/helm/pkg/chart/common/file.go
@@ -0,0 +1,31 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import "time"
+
+// File represents a file as a name/value pair.
+//
+// By convention, name is a relative path within the scope of the chart's
+// base directory.
+type File struct {
+ // Name is the path-like name of the template.
+ Name string `json:"name"`
+ // Data is the template as byte data.
+ Data []byte `json:"data"`
+ // ModTime is the file's mod-time
+ ModTime time.Time `json:"modtime,omitzero"`
+}
diff --git a/helm/pkg/chart/common/testdata/coleridge.yaml b/helm/pkg/chart/common/testdata/coleridge.yaml
new file mode 100644
index 000000000..b6579628b
--- /dev/null
+++ b/helm/pkg/chart/common/testdata/coleridge.yaml
@@ -0,0 +1,12 @@
+poet: "Coleridge"
+title: "Rime of the Ancient Mariner"
+stanza: ["at", "length", "did", "cross", "an", "Albatross"]
+
+mariner:
+ with: "crossbow"
+ shot: "ALBATROSS"
+
+water:
+ water:
+ where: "everywhere"
+ nor: "any drop to drink"
diff --git a/helm/pkg/chart/common/util/coalesce.go b/helm/pkg/chart/common/util/coalesce.go
new file mode 100644
index 000000000..07794a04a
--- /dev/null
+++ b/helm/pkg/chart/common/util/coalesce.go
@@ -0,0 +1,335 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "log"
+ "maps"
+
+ "helm.sh/helm/v4/internal/copystructure"
+ chart "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+func concatPrefix(a, b string) string {
+ if a == "" {
+ return b
+ }
+ return fmt.Sprintf("%s.%s", a, b)
+}
+
+// CoalesceValues coalesces all of the values in a chart (and its subcharts).
+//
+// Values are coalesced together using the following rules:
+//
+// - Values in a higher level chart always override values in a lower-level
+// dependency chart
+// - Scalar values and arrays are replaced, maps are merged
+// - A chart has access to all of the variables for it, as well as all of
+// the values destined for its dependencies.
+func CoalesceValues(chrt chart.Charter, vals map[string]interface{}) (common.Values, error) {
+ valsCopy, err := copyValues(vals)
+ if err != nil {
+ return vals, err
+ }
+ return coalesce(log.Printf, chrt, valsCopy, "", false)
+}
+
+// MergeValues is used to merge the values in a chart and its subcharts. This
+// is different from Coalescing as nil/null values are preserved.
+//
+// Values are coalesced together using the following rules:
+//
+// - Values in a higher level chart always override values in a lower-level
+// dependency chart
+// - Scalar values and arrays are replaced, maps are merged
+// - A chart has access to all of the variables for it, as well as all of
+// the values destined for its dependencies.
+//
+// Retaining Nils is useful when processes early in a Helm action or business
+// logic need to retain them for when Coalescing will happen again later in the
+// business logic.
+func MergeValues(chrt chart.Charter, vals map[string]interface{}) (common.Values, error) {
+ valsCopy, err := copyValues(vals)
+ if err != nil {
+ return vals, err
+ }
+ return coalesce(log.Printf, chrt, valsCopy, "", true)
+}
+
+func copyValues(vals map[string]interface{}) (common.Values, error) {
+ v, err := copystructure.Copy(vals)
+ if err != nil {
+ return vals, err
+ }
+
+ valsCopy := v.(map[string]interface{})
+ // if we have an empty map, make sure it is initialized
+ if valsCopy == nil {
+ valsCopy = make(map[string]interface{})
+ }
+
+ return valsCopy, nil
+}
+
+type printFn func(format string, v ...interface{})
+
+// coalesce coalesces the dest values and the chart values, giving priority to the dest values.
+//
+// This is a helper function for CoalesceValues and MergeValues.
+//
+// Note, the merge argument specifies whether this is being used by MergeValues
+// or CoalesceValues. Coalescing removes null values and their keys in some
+// situations while merging keeps the null values.
+func coalesce(printf printFn, ch chart.Charter, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) {
+ coalesceValues(printf, ch, dest, prefix, merge)
+ return coalesceDeps(printf, ch, dest, prefix, merge)
+}
+
+// coalesceDeps coalesces the dependencies of the given chart.
+func coalesceDeps(printf printFn, chrt chart.Charter, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) {
+ ch, err := chart.NewAccessor(chrt)
+ if err != nil {
+ return dest, err
+ }
+ for _, subchart := range ch.Dependencies() {
+ sub, err := chart.NewAccessor(subchart)
+ if err != nil {
+ return dest, err
+ }
+ if c, ok := dest[sub.Name()]; !ok {
+ // If dest doesn't already have the key, create it.
+ dest[sub.Name()] = make(map[string]interface{})
+ } else if !istable(c) {
+ return dest, fmt.Errorf("type mismatch on %s: %t", sub.Name(), c)
+ }
+ if dv, ok := dest[sub.Name()]; ok {
+ dvmap := dv.(map[string]interface{})
+ subPrefix := concatPrefix(prefix, ch.Name())
+ // Get globals out of dest and merge them into dvmap.
+ coalesceGlobals(printf, dvmap, dest, subPrefix, merge)
+ // Now coalesce the rest of the values.
+ var err error
+ dest[sub.Name()], err = coalesce(printf, subchart, dvmap, subPrefix, merge)
+ if err != nil {
+ return dest, err
+ }
+ }
+ }
+ return dest, nil
+}
+
+// coalesceGlobals copies the globals out of src and merges them into dest.
+//
+// For convenience, returns dest.
+func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix string, _ bool) {
+ var dg, sg map[string]interface{}
+
+ if destglob, ok := dest[common.GlobalKey]; !ok {
+ dg = make(map[string]interface{})
+ } else if dg, ok = destglob.(map[string]interface{}); !ok {
+ printf("warning: skipping globals because destination %s is not a table.", common.GlobalKey)
+ return
+ }
+
+ if srcglob, ok := src[common.GlobalKey]; !ok {
+ sg = make(map[string]interface{})
+ } else if sg, ok = srcglob.(map[string]interface{}); !ok {
+ printf("warning: skipping globals because source %s is not a table.", common.GlobalKey)
+ return
+ }
+
+ // EXPERIMENTAL: In the past, we have disallowed globals to test tables. This
+ // reverses that decision. It may somehow be possible to introduce a loop
+ // here, but I haven't found a way. So for the time being, let's allow
+ // tables in globals.
+ for key, val := range sg {
+ if istable(val) {
+ vv := copyMap(val.(map[string]interface{}))
+ if destv, ok := dg[key]; !ok {
+ // Here there is no merge. We're just adding.
+ dg[key] = vv
+ } else {
+ if destvmap, ok := destv.(map[string]interface{}); !ok {
+ printf("Conflict: cannot merge map onto non-map for %q. Skipping.", key)
+ } else {
+ // Basically, we reverse order of coalesce here to merge
+ // top-down.
+ subPrefix := concatPrefix(prefix, key)
+ // In this location coalesceTablesFullKey should always have
+ // merge set to true. The output of coalesceGlobals is run
+ // through coalesce where any nils will be removed.
+ coalesceTablesFullKey(printf, vv, destvmap, subPrefix, true)
+ dg[key] = vv
+ }
+ }
+ } else if dv, ok := dg[key]; ok && istable(dv) {
+ // It's not clear if this condition can actually ever trigger.
+ printf("key %s is table. Skipping", key)
+ } else {
+ // TODO: Do we need to do any additional checking on the value?
+ dg[key] = val
+ }
+ }
+ dest[common.GlobalKey] = dg
+}
+
+func copyMap(src map[string]interface{}) map[string]interface{} {
+ m := make(map[string]interface{}, len(src))
+ maps.Copy(m, src)
+ return m
+}
+
+// coalesceValues builds up a values map for a particular chart.
+//
+// Values in v will override the values in the chart.
+func coalesceValues(printf printFn, c chart.Charter, v map[string]interface{}, prefix string, merge bool) {
+ ch, err := chart.NewAccessor(c)
+ if err != nil {
+ return
+ }
+
+ subPrefix := concatPrefix(prefix, ch.Name())
+
+ // Using c.Values directly when coalescing a table can cause problems where
+ // the original c.Values is altered. Creating a deep copy stops the problem.
+ // This section is fault-tolerant as there is no ability to return an error.
+ valuesCopy, err := copystructure.Copy(ch.Values())
+ var vc map[string]interface{}
+ var ok bool
+ if err != nil {
+ // If there is an error something is wrong with copying c.Values it
+ // means there is a problem in the deep copying package or something
+ // wrong with c.Values. In this case we will use c.Values and report
+ // an error.
+ printf("warning: unable to copy values, err: %s", err)
+ vc = ch.Values()
+ } else {
+ vc, ok = valuesCopy.(map[string]interface{})
+ if !ok {
+ // c.Values has a map[string]interface{} structure. If the copy of
+ // it cannot be treated as map[string]interface{} there is something
+ // strangely wrong. Log it and use c.Values
+ printf("warning: unable to convert values copy to values type")
+ vc = ch.Values()
+ }
+ }
+
+ for key, val := range vc {
+ if value, ok := v[key]; ok {
+ if value == nil && !merge {
+ // When the YAML value is null and we are coalescing instead of
+ // merging, we remove the value's key.
+ // This allows Helm's various sources of values (value files or --set) to
+ // remove incompatible keys from any previous chart, file, or set values.
+ delete(v, key)
+ } else if dest, ok := value.(map[string]interface{}); ok {
+ // if v[key] is a table, merge nv's val table into v[key].
+ src, ok := val.(map[string]interface{})
+ if !ok {
+ // If the original value is nil, there is nothing to coalesce, so we don't print
+ // the warning
+ if val != nil {
+ printf("warning: skipped value for %s.%s: Not a table.", subPrefix, key)
+ }
+ } else {
+ // If the key is a child chart, coalesce tables with Merge set to true
+ merge := childChartMergeTrue(c, key, merge)
+
+ // Because v has higher precedence than nv, dest values override src
+ // values.
+ coalesceTablesFullKey(printf, dest, src, concatPrefix(subPrefix, key), merge)
+ }
+ }
+ } else {
+ // If the key is not in v, copy it from nv.
+ v[key] = val
+ }
+ }
+}
+
+func childChartMergeTrue(chrt chart.Charter, key string, merge bool) bool {
+ ch, err := chart.NewAccessor(chrt)
+ if err != nil {
+ return merge
+ }
+ for _, subchart := range ch.Dependencies() {
+ sub, err := chart.NewAccessor(subchart)
+ if err != nil {
+ return merge
+ }
+ if sub.Name() == key {
+ return true
+ }
+ }
+ return merge
+}
+
+// CoalesceTables merges a source map into a destination map.
+//
+// dest is considered authoritative.
+func CoalesceTables(dst, src map[string]interface{}) map[string]interface{} {
+ return coalesceTablesFullKey(log.Printf, dst, src, "", false)
+}
+
+func MergeTables(dst, src map[string]interface{}) map[string]interface{} {
+ return coalesceTablesFullKey(log.Printf, dst, src, "", true)
+}
+
+// coalesceTablesFullKey merges a source map into a destination map.
+//
+// dest is considered authoritative.
+func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, prefix string, merge bool) map[string]interface{} {
+ // When --reuse-values is set but there are no modifications yet, return new values
+ if src == nil {
+ return dst
+ }
+ if dst == nil {
+ return src
+ }
+ for key, val := range dst {
+ if val == nil {
+ src[key] = nil
+ }
+ }
+ // Because dest has higher precedence than src, dest values override src
+ // values.
+ for key, val := range src {
+ fullkey := concatPrefix(prefix, key)
+ if dv, ok := dst[key]; ok && !merge && dv == nil {
+ delete(dst, key)
+ } else if !ok {
+ dst[key] = val
+ } else if istable(val) {
+ if istable(dv) {
+ coalesceTablesFullKey(printf, dv.(map[string]interface{}), val.(map[string]interface{}), fullkey, merge)
+ } else {
+ printf("warning: cannot overwrite table with non table for %s (%v)", fullkey, val)
+ }
+ } else if istable(dv) && val != nil {
+ printf("warning: destination for %s is a table. Ignoring non-table value (%v)", fullkey, val)
+ }
+ }
+ return dst
+}
+
+// istable is a special-purpose function to see if the present thing matches the definition of a YAML table.
+func istable(v interface{}) bool {
+ _, ok := v.(map[string]interface{})
+ return ok
+}
diff --git a/helm/pkg/chart/common/util/coalesce_test.go b/helm/pkg/chart/common/util/coalesce_test.go
new file mode 100644
index 000000000..871bfa8da
--- /dev/null
+++ b/helm/pkg/chart/common/util/coalesce_test.go
@@ -0,0 +1,733 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "maps"
+ "testing"
+ "text/template"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+// ref: http://www.yaml.org/spec/1.2/spec.html#id2803362
+var testCoalesceValuesYaml = []byte(`
+top: yup
+bottom: null
+right: Null
+left: NULL
+front: ~
+back: ""
+nested:
+ boat: null
+
+global:
+ name: Ishmael
+ subject: Queequeg
+ nested:
+ boat: true
+
+pequod:
+ boat: null
+ global:
+ name: Stinky
+ harpooner: Tashtego
+ nested:
+ boat: false
+ sail: true
+ foo2: null
+ ahab:
+ scope: whale
+ boat: null
+ nested:
+ foo: true
+ boat: null
+ object: null
+`)
+
+func withDeps(c *chart.Chart, deps ...*chart.Chart) *chart.Chart {
+ c.AddDependency(deps...)
+ return c
+}
+
+func TestCoalesceValues(t *testing.T) {
+ is := assert.New(t)
+
+ c := withDeps(&chart.Chart{
+ Metadata: &chart.Metadata{Name: "moby"},
+ Values: map[string]interface{}{
+ "back": "exists",
+ "bottom": "exists",
+ "front": "exists",
+ "left": "exists",
+ "name": "moby",
+ "nested": map[string]interface{}{"boat": true},
+ "override": "bad",
+ "right": "exists",
+ "scope": "moby",
+ "top": "nope",
+ "global": map[string]interface{}{
+ "nested2": map[string]interface{}{"l0": "moby"},
+ },
+ "pequod": map[string]interface{}{
+ "boat": "maybe",
+ "ahab": map[string]interface{}{
+ "boat": "maybe",
+ "nested": map[string]interface{}{"boat": "maybe"},
+ },
+ },
+ },
+ },
+ withDeps(&chart.Chart{
+ Metadata: &chart.Metadata{Name: "pequod"},
+ Values: map[string]interface{}{
+ "name": "pequod",
+ "scope": "pequod",
+ "global": map[string]interface{}{
+ "nested2": map[string]interface{}{"l1": "pequod"},
+ },
+ "boat": false,
+ "ahab": map[string]interface{}{
+ "boat": false,
+ "nested": map[string]interface{}{"boat": false},
+ },
+ },
+ },
+ &chart.Chart{
+ Metadata: &chart.Metadata{Name: "ahab"},
+ Values: map[string]interface{}{
+ "global": map[string]interface{}{
+ "nested": map[string]interface{}{"foo": "bar", "foo2": "bar2"},
+ "nested2": map[string]interface{}{"l2": "ahab"},
+ },
+ "scope": "ahab",
+ "name": "ahab",
+ "boat": true,
+ "nested": map[string]interface{}{"foo": false, "boat": true},
+ "object": map[string]interface{}{"foo": "bar"},
+ },
+ },
+ ),
+ &chart.Chart{
+ Metadata: &chart.Metadata{Name: "spouter"},
+ Values: map[string]interface{}{
+ "scope": "spouter",
+ "global": map[string]interface{}{
+ "nested2": map[string]interface{}{"l1": "spouter"},
+ },
+ },
+ },
+ )
+
+ vals, err := common.ReadValues(testCoalesceValuesYaml)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // taking a copy of the values before passing it
+ // to CoalesceValues as argument, so that we can
+ // use it for asserting later
+ valsCopy := make(common.Values, len(vals))
+ maps.Copy(valsCopy, vals)
+
+ v, err := CoalesceValues(c, vals)
+ if err != nil {
+ t.Fatal(err)
+ }
+ j, _ := json.MarshalIndent(v, "", " ")
+ t.Logf("Coalesced Values: %s", string(j))
+
+ tests := []struct {
+ tpl string
+ expect string
+ }{
+ {"{{.top}}", "yup"},
+ {"{{.back}}", ""},
+ {"{{.name}}", "moby"},
+ {"{{.global.name}}", "Ishmael"},
+ {"{{.global.subject}}", "Queequeg"},
+ {"{{.global.harpooner}}", ""},
+ {"{{.pequod.name}}", "pequod"},
+ {"{{.pequod.ahab.name}}", "ahab"},
+ {"{{.pequod.ahab.scope}}", "whale"},
+ {"{{.pequod.ahab.nested.foo}}", "true"},
+ {"{{.pequod.ahab.global.name}}", "Ishmael"},
+ {"{{.pequod.ahab.global.nested.foo}}", "bar"},
+ {"{{.pequod.ahab.global.nested.foo2}}", ""},
+ {"{{.pequod.ahab.global.subject}}", "Queequeg"},
+ {"{{.pequod.ahab.global.harpooner}}", "Tashtego"},
+ {"{{.pequod.global.name}}", "Ishmael"},
+ {"{{.pequod.global.nested.foo}}", ""},
+ {"{{.pequod.global.subject}}", "Queequeg"},
+ {"{{.spouter.global.name}}", "Ishmael"},
+ {"{{.spouter.global.harpooner}}", ""},
+
+ {"{{.global.nested.boat}}", "true"},
+ {"{{.pequod.global.nested.boat}}", "true"},
+ {"{{.spouter.global.nested.boat}}", "true"},
+ {"{{.pequod.global.nested.sail}}", "true"},
+ {"{{.spouter.global.nested.sail}}", ""},
+
+ {"{{.global.nested2.l0}}", "moby"},
+ {"{{.global.nested2.l1}}", ""},
+ {"{{.global.nested2.l2}}", ""},
+ {"{{.pequod.global.nested2.l0}}", "moby"},
+ {"{{.pequod.global.nested2.l1}}", "pequod"},
+ {"{{.pequod.global.nested2.l2}}", ""},
+ {"{{.pequod.ahab.global.nested2.l0}}", "moby"},
+ {"{{.pequod.ahab.global.nested2.l1}}", "pequod"},
+ {"{{.pequod.ahab.global.nested2.l2}}", "ahab"},
+ {"{{.spouter.global.nested2.l0}}", "moby"},
+ {"{{.spouter.global.nested2.l1}}", "spouter"},
+ {"{{.spouter.global.nested2.l2}}", ""},
+ }
+
+ for _, tt := range tests {
+ if o, err := ttpl(tt.tpl, v); err != nil || o != tt.expect {
+ t.Errorf("Expected %q to expand to %q, got %q", tt.tpl, tt.expect, o)
+ }
+ }
+
+ nullKeys := []string{"bottom", "right", "left", "front"}
+ for _, nullKey := range nullKeys {
+ if _, ok := v[nullKey]; ok {
+ t.Errorf("Expected key %q to be removed, still present", nullKey)
+ }
+ }
+
+ if _, ok := v["nested"].(map[string]interface{})["boat"]; ok {
+ t.Error("Expected nested boat key to be removed, still present")
+ }
+
+ subchart := v["pequod"].(map[string]interface{})
+ if _, ok := subchart["boat"]; ok {
+ t.Error("Expected subchart boat key to be removed, still present")
+ }
+
+ subsubchart := subchart["ahab"].(map[string]interface{})
+ if _, ok := subsubchart["boat"]; ok {
+ t.Error("Expected sub-subchart ahab boat key to be removed, still present")
+ }
+
+ if _, ok := subsubchart["nested"].(map[string]interface{})["boat"]; ok {
+ t.Error("Expected sub-subchart nested boat key to be removed, still present")
+ }
+
+ if _, ok := subsubchart["object"]; ok {
+ t.Error("Expected sub-subchart object map to be removed, still present")
+ }
+
+ // CoalesceValues should not mutate the passed arguments
+ is.Equal(valsCopy, vals)
+}
+
+func ttpl(tpl string, v map[string]interface{}) (string, error) {
+ var b bytes.Buffer
+ tt := template.Must(template.New("t").Parse(tpl))
+ err := tt.Execute(&b, v)
+ return b.String(), err
+}
+
+func TestMergeValues(t *testing.T) {
+ is := assert.New(t)
+
+ c := withDeps(&chart.Chart{
+ Metadata: &chart.Metadata{Name: "moby"},
+ Values: map[string]interface{}{
+ "back": "exists",
+ "bottom": "exists",
+ "front": "exists",
+ "left": "exists",
+ "name": "moby",
+ "nested": map[string]interface{}{"boat": true},
+ "override": "bad",
+ "right": "exists",
+ "scope": "moby",
+ "top": "nope",
+ "global": map[string]interface{}{
+ "nested2": map[string]interface{}{"l0": "moby"},
+ },
+ },
+ },
+ withDeps(&chart.Chart{
+ Metadata: &chart.Metadata{Name: "pequod"},
+ Values: map[string]interface{}{
+ "name": "pequod",
+ "scope": "pequod",
+ "global": map[string]interface{}{
+ "nested2": map[string]interface{}{"l1": "pequod"},
+ },
+ },
+ },
+ &chart.Chart{
+ Metadata: &chart.Metadata{Name: "ahab"},
+ Values: map[string]interface{}{
+ "global": map[string]interface{}{
+ "nested": map[string]interface{}{"foo": "bar"},
+ "nested2": map[string]interface{}{"l2": "ahab"},
+ },
+ "scope": "ahab",
+ "name": "ahab",
+ "boat": true,
+ "nested": map[string]interface{}{"foo": false, "bar": true},
+ },
+ },
+ ),
+ &chart.Chart{
+ Metadata: &chart.Metadata{Name: "spouter"},
+ Values: map[string]interface{}{
+ "scope": "spouter",
+ "global": map[string]interface{}{
+ "nested2": map[string]interface{}{"l1": "spouter"},
+ },
+ },
+ },
+ )
+
+ vals, err := common.ReadValues(testCoalesceValuesYaml)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // taking a copy of the values before passing it
+ // to MergeValues as argument, so that we can
+ // use it for asserting later
+ valsCopy := make(common.Values, len(vals))
+ maps.Copy(valsCopy, vals)
+
+ v, err := MergeValues(c, vals)
+ if err != nil {
+ t.Fatal(err)
+ }
+ j, _ := json.MarshalIndent(v, "", " ")
+ t.Logf("Coalesced Values: %s", string(j))
+
+ tests := []struct {
+ tpl string
+ expect string
+ }{
+ {"{{.top}}", "yup"},
+ {"{{.back}}", ""},
+ {"{{.name}}", "moby"},
+ {"{{.global.name}}", "Ishmael"},
+ {"{{.global.subject}}", "Queequeg"},
+ {"{{.global.harpooner}}", ""},
+ {"{{.pequod.name}}", "pequod"},
+ {"{{.pequod.ahab.name}}", "ahab"},
+ {"{{.pequod.ahab.scope}}", "whale"},
+ {"{{.pequod.ahab.nested.foo}}", "true"},
+ {"{{.pequod.ahab.global.name}}", "Ishmael"},
+ {"{{.pequod.ahab.global.nested.foo}}", "bar"},
+ {"{{.pequod.ahab.global.subject}}", "Queequeg"},
+ {"{{.pequod.ahab.global.harpooner}}", "Tashtego"},
+ {"{{.pequod.global.name}}", "Ishmael"},
+ {"{{.pequod.global.nested.foo}}", ""},
+ {"{{.pequod.global.subject}}", "Queequeg"},
+ {"{{.spouter.global.name}}", "Ishmael"},
+ {"{{.spouter.global.harpooner}}", ""},
+
+ {"{{.global.nested.boat}}", "true"},
+ {"{{.pequod.global.nested.boat}}", "true"},
+ {"{{.spouter.global.nested.boat}}", "true"},
+ {"{{.pequod.global.nested.sail}}", "true"},
+ {"{{.spouter.global.nested.sail}}", ""},
+
+ {"{{.global.nested2.l0}}", "moby"},
+ {"{{.global.nested2.l1}}", ""},
+ {"{{.global.nested2.l2}}", ""},
+ {"{{.pequod.global.nested2.l0}}", "moby"},
+ {"{{.pequod.global.nested2.l1}}", "pequod"},
+ {"{{.pequod.global.nested2.l2}}", ""},
+ {"{{.pequod.ahab.global.nested2.l0}}", "moby"},
+ {"{{.pequod.ahab.global.nested2.l1}}", "pequod"},
+ {"{{.pequod.ahab.global.nested2.l2}}", "ahab"},
+ {"{{.spouter.global.nested2.l0}}", "moby"},
+ {"{{.spouter.global.nested2.l1}}", "spouter"},
+ {"{{.spouter.global.nested2.l2}}", ""},
+ }
+
+ for _, tt := range tests {
+ if o, err := ttpl(tt.tpl, v); err != nil || o != tt.expect {
+ t.Errorf("Expected %q to expand to %q, got %q", tt.tpl, tt.expect, o)
+ }
+ }
+
+ // nullKeys is different from coalescing. Here the null/nil values are not
+ // removed.
+ nullKeys := []string{"bottom", "right", "left", "front"}
+ for _, nullKey := range nullKeys {
+ if vv, ok := v[nullKey]; !ok {
+ t.Errorf("Expected key %q to be present but it was removed", nullKey)
+ } else if vv != nil {
+ t.Errorf("Expected key %q to be null but it has a value of %v", nullKey, vv)
+ }
+ }
+
+ if _, ok := v["nested"].(map[string]interface{})["boat"]; !ok {
+ t.Error("Expected nested boat key to be present but it was removed")
+ }
+
+ subchart := v["pequod"].(map[string]interface{})["ahab"].(map[string]interface{})
+ if _, ok := subchart["boat"]; !ok {
+ t.Error("Expected subchart boat key to be present but it was removed")
+ }
+
+ if _, ok := subchart["nested"].(map[string]interface{})["bar"]; !ok {
+ t.Error("Expected subchart nested bar key to be present but it was removed")
+ }
+
+ // CoalesceValues should not mutate the passed arguments
+ is.Equal(valsCopy, vals)
+}
+
+func TestCoalesceTables(t *testing.T) {
+ dst := map[string]interface{}{
+ "name": "Ishmael",
+ "address": map[string]interface{}{
+ "street": "123 Spouter Inn Ct.",
+ "city": "Nantucket",
+ "country": nil,
+ },
+ "details": map[string]interface{}{
+ "friends": []string{"Tashtego"},
+ },
+ "boat": "pequod",
+ "hole": nil,
+ }
+ src := map[string]interface{}{
+ "occupation": "whaler",
+ "address": map[string]interface{}{
+ "state": "MA",
+ "street": "234 Spouter Inn Ct.",
+ "country": "US",
+ },
+ "details": "empty",
+ "boat": map[string]interface{}{
+ "mast": true,
+ },
+ "hole": "black",
+ }
+
+ // What we expect is that anything in dst overrides anything in src, but that
+ // otherwise the values are coalesced.
+ CoalesceTables(dst, src)
+
+ if dst["name"] != "Ishmael" {
+ t.Errorf("Unexpected name: %s", dst["name"])
+ }
+ if dst["occupation"] != "whaler" {
+ t.Errorf("Unexpected occupation: %s", dst["occupation"])
+ }
+
+ addr, ok := dst["address"].(map[string]interface{})
+ if !ok {
+ t.Fatal("Address went away.")
+ }
+
+ if addr["street"].(string) != "123 Spouter Inn Ct." {
+ t.Errorf("Unexpected address: %v", addr["street"])
+ }
+
+ if addr["city"].(string) != "Nantucket" {
+ t.Errorf("Unexpected city: %v", addr["city"])
+ }
+
+ if addr["state"].(string) != "MA" {
+ t.Errorf("Unexpected state: %v", addr["state"])
+ }
+
+ if _, ok = addr["country"]; ok {
+ t.Error("The country is not left out.")
+ }
+
+ if det, ok := dst["details"].(map[string]interface{}); !ok {
+ t.Fatalf("Details is the wrong type: %v", dst["details"])
+ } else if _, ok := det["friends"]; !ok {
+ t.Error("Could not find your friends. Maybe you don't have any. :-(")
+ }
+
+ if dst["boat"].(string) != "pequod" {
+ t.Errorf("Expected boat string, got %v", dst["boat"])
+ }
+
+ if _, ok = dst["hole"]; ok {
+ t.Error("The hole still exists.")
+ }
+
+ dst2 := map[string]interface{}{
+ "name": "Ishmael",
+ "address": map[string]interface{}{
+ "street": "123 Spouter Inn Ct.",
+ "city": "Nantucket",
+ "country": "US",
+ },
+ "details": map[string]interface{}{
+ "friends": []string{"Tashtego"},
+ },
+ "boat": "pequod",
+ "hole": "black",
+ }
+
+ // What we expect is that anything in dst should have all values set,
+ // this happens when the --reuse-values flag is set but the chart has no modifications yet
+ CoalesceTables(dst2, nil)
+
+ if dst2["name"] != "Ishmael" {
+ t.Errorf("Unexpected name: %s", dst2["name"])
+ }
+
+ addr2, ok := dst2["address"].(map[string]interface{})
+ if !ok {
+ t.Fatal("Address went away.")
+ }
+
+ if addr2["street"].(string) != "123 Spouter Inn Ct." {
+ t.Errorf("Unexpected address: %v", addr2["street"])
+ }
+
+ if addr2["city"].(string) != "Nantucket" {
+ t.Errorf("Unexpected city: %v", addr2["city"])
+ }
+
+ if addr2["country"].(string) != "US" {
+ t.Errorf("Unexpected Country: %v", addr2["country"])
+ }
+
+ if det2, ok := dst2["details"].(map[string]interface{}); !ok {
+ t.Fatalf("Details is the wrong type: %v", dst2["details"])
+ } else if _, ok := det2["friends"]; !ok {
+ t.Error("Could not find your friends. Maybe you don't have any. :-(")
+ }
+
+ if dst2["boat"].(string) != "pequod" {
+ t.Errorf("Expected boat string, got %v", dst2["boat"])
+ }
+
+ if dst2["hole"].(string) != "black" {
+ t.Errorf("Expected hole string, got %v", dst2["boat"])
+ }
+}
+
+func TestMergeTables(t *testing.T) {
+ dst := map[string]interface{}{
+ "name": "Ishmael",
+ "address": map[string]interface{}{
+ "street": "123 Spouter Inn Ct.",
+ "city": "Nantucket",
+ "country": nil,
+ },
+ "details": map[string]interface{}{
+ "friends": []string{"Tashtego"},
+ },
+ "boat": "pequod",
+ "hole": nil,
+ }
+ src := map[string]interface{}{
+ "occupation": "whaler",
+ "address": map[string]interface{}{
+ "state": "MA",
+ "street": "234 Spouter Inn Ct.",
+ "country": "US",
+ },
+ "details": "empty",
+ "boat": map[string]interface{}{
+ "mast": true,
+ },
+ "hole": "black",
+ }
+
+ // What we expect is that anything in dst overrides anything in src, but that
+ // otherwise the values are coalesced.
+ MergeTables(dst, src)
+
+ if dst["name"] != "Ishmael" {
+ t.Errorf("Unexpected name: %s", dst["name"])
+ }
+ if dst["occupation"] != "whaler" {
+ t.Errorf("Unexpected occupation: %s", dst["occupation"])
+ }
+
+ addr, ok := dst["address"].(map[string]interface{})
+ if !ok {
+ t.Fatal("Address went away.")
+ }
+
+ if addr["street"].(string) != "123 Spouter Inn Ct." {
+ t.Errorf("Unexpected address: %v", addr["street"])
+ }
+
+ if addr["city"].(string) != "Nantucket" {
+ t.Errorf("Unexpected city: %v", addr["city"])
+ }
+
+ if addr["state"].(string) != "MA" {
+ t.Errorf("Unexpected state: %v", addr["state"])
+ }
+
+ // This is one test that is different from CoalesceTables. Because country
+ // is a nil value and it's not removed it's still present.
+ if _, ok = addr["country"]; !ok {
+ t.Error("The country is left out.")
+ }
+
+ if det, ok := dst["details"].(map[string]interface{}); !ok {
+ t.Fatalf("Details is the wrong type: %v", dst["details"])
+ } else if _, ok := det["friends"]; !ok {
+ t.Error("Could not find your friends. Maybe you don't have any. :-(")
+ }
+
+ if dst["boat"].(string) != "pequod" {
+ t.Errorf("Expected boat string, got %v", dst["boat"])
+ }
+
+ // This is one test that is different from CoalesceTables. Because hole
+ // is a nil value and it's not removed it's still present.
+ if _, ok = dst["hole"]; !ok {
+ t.Error("The hole no longer exists.")
+ }
+
+ dst2 := map[string]interface{}{
+ "name": "Ishmael",
+ "address": map[string]interface{}{
+ "street": "123 Spouter Inn Ct.",
+ "city": "Nantucket",
+ "country": "US",
+ },
+ "details": map[string]interface{}{
+ "friends": []string{"Tashtego"},
+ },
+ "boat": "pequod",
+ "hole": "black",
+ "nilval": nil,
+ }
+
+ // What we expect is that anything in dst should have all values set,
+ // this happens when the --reuse-values flag is set but the chart has no modifications yet
+ MergeTables(dst2, nil)
+
+ if dst2["name"] != "Ishmael" {
+ t.Errorf("Unexpected name: %s", dst2["name"])
+ }
+
+ addr2, ok := dst2["address"].(map[string]interface{})
+ if !ok {
+ t.Fatal("Address went away.")
+ }
+
+ if addr2["street"].(string) != "123 Spouter Inn Ct." {
+ t.Errorf("Unexpected address: %v", addr2["street"])
+ }
+
+ if addr2["city"].(string) != "Nantucket" {
+ t.Errorf("Unexpected city: %v", addr2["city"])
+ }
+
+ if addr2["country"].(string) != "US" {
+ t.Errorf("Unexpected Country: %v", addr2["country"])
+ }
+
+ if det2, ok := dst2["details"].(map[string]interface{}); !ok {
+ t.Fatalf("Details is the wrong type: %v", dst2["details"])
+ } else if _, ok := det2["friends"]; !ok {
+ t.Error("Could not find your friends. Maybe you don't have any. :-(")
+ }
+
+ if dst2["boat"].(string) != "pequod" {
+ t.Errorf("Expected boat string, got %v", dst2["boat"])
+ }
+
+ if dst2["hole"].(string) != "black" {
+ t.Errorf("Expected hole string, got %v", dst2["boat"])
+ }
+
+ if dst2["nilval"] != nil {
+ t.Error("Expected nilvalue to have nil value but it does not")
+ }
+}
+
+func TestCoalesceValuesWarnings(t *testing.T) {
+
+ c := withDeps(&chart.Chart{
+ Metadata: &chart.Metadata{Name: "level1"},
+ Values: map[string]interface{}{
+ "name": "moby",
+ },
+ },
+ withDeps(&chart.Chart{
+ Metadata: &chart.Metadata{Name: "level2"},
+ Values: map[string]interface{}{
+ "name": "pequod",
+ },
+ },
+ &chart.Chart{
+ Metadata: &chart.Metadata{Name: "level3"},
+ Values: map[string]interface{}{
+ "name": "ahab",
+ "boat": true,
+ "spear": map[string]interface{}{
+ "tip": true,
+ "sail": map[string]interface{}{
+ "cotton": true,
+ },
+ },
+ },
+ },
+ ),
+ )
+
+ vals := map[string]interface{}{
+ "level2": map[string]interface{}{
+ "level3": map[string]interface{}{
+ "boat": map[string]interface{}{"mast": true},
+ "spear": map[string]interface{}{
+ "tip": map[string]interface{}{
+ "sharp": true,
+ },
+ "sail": true,
+ },
+ },
+ },
+ }
+
+ warnings := make([]string, 0)
+ printf := func(format string, v ...interface{}) {
+ t.Logf(format, v...)
+ warnings = append(warnings, fmt.Sprintf(format, v...))
+ }
+
+ _, err := coalesce(printf, c, vals, "", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Logf("vals: %v", vals)
+ assert.Contains(t, warnings, "warning: skipped value for level1.level2.level3.boat: Not a table.")
+ assert.Contains(t, warnings, "warning: destination for level1.level2.level3.spear.tip is a table. Ignoring non-table value (true)")
+ assert.Contains(t, warnings, "warning: cannot overwrite table with non table for level1.level2.level3.spear.sail (map[cotton:true])")
+
+}
+
+func TestConcatPrefix(t *testing.T) {
+ assert.Equal(t, "b", concatPrefix("", "b"))
+ assert.Equal(t, "a.b", concatPrefix("a", "b"))
+}
diff --git a/helm/pkg/chart/common/util/jsonschema.go b/helm/pkg/chart/common/util/jsonschema.go
new file mode 100644
index 000000000..6d7f32604
--- /dev/null
+++ b/helm/pkg/chart/common/util/jsonschema.go
@@ -0,0 +1,218 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/santhosh-tekuri/jsonschema/v6"
+
+ "helm.sh/helm/v4/internal/version"
+
+ chart "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// HTTPURLLoader implements a loader for HTTP/HTTPS URLs
+type HTTPURLLoader http.Client
+
+func (l *HTTPURLLoader) Load(urlStr string) (any, error) {
+ client := (*http.Client)(l)
+
+ req, err := http.NewRequest(http.MethodGet, urlStr, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create HTTP request for %s: %w", urlStr, err)
+ }
+ req.Header.Set("User-Agent", version.GetUserAgent())
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("HTTP request failed for %s: %w", urlStr, err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("HTTP request to %s returned status %d (%s)", urlStr, resp.StatusCode, http.StatusText(resp.StatusCode))
+ }
+
+ return jsonschema.UnmarshalJSON(resp.Body)
+}
+
+// newHTTPURLLoader creates a HTTP URL loader with proxy support.
+func newHTTPURLLoader() *HTTPURLLoader {
+ httpLoader := HTTPURLLoader(http.Client{
+ Timeout: 15 * time.Second,
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ TLSClientConfig: &tls.Config{},
+ },
+ })
+ return &httpLoader
+}
+
+// ValidateAgainstSchema checks that values does not violate the structure laid out in schema
+func ValidateAgainstSchema(ch chart.Charter, values map[string]interface{}) error {
+ chrt, err := chart.NewAccessor(ch)
+ if err != nil {
+ return err
+ }
+ var sb strings.Builder
+ if chrt.Schema() != nil {
+ slog.Debug("chart name", "chart-name", chrt.Name())
+ err := ValidateAgainstSingleSchema(values, chrt.Schema())
+ if err != nil {
+ sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name()))
+ sb.WriteString(err.Error())
+ }
+ }
+ slog.Debug("number of dependencies in the chart", "chart", chrt.Name(), "dependencies", len(chrt.Dependencies()))
+ // For each dependency, recursively call this function with the coalesced values
+ for _, subchart := range chrt.Dependencies() {
+ sub, err := chart.NewAccessor(subchart)
+ if err != nil {
+ return err
+ }
+
+ raw, exists := values[sub.Name()]
+ if !exists || raw == nil {
+ // No values provided for this subchart; nothing to validate
+ continue
+ }
+
+ subchartValues, ok := raw.(map[string]any)
+ if !ok {
+ sb.WriteString(fmt.Sprintf(
+ "%s:\ninvalid type for values: expected object (map), got %T\n",
+ sub.Name(), raw,
+ ))
+ continue
+ }
+
+ if err := ValidateAgainstSchema(subchart, subchartValues); err != nil {
+ sb.WriteString(err.Error())
+ }
+ }
+
+ if sb.Len() > 0 {
+ return errors.New(sb.String())
+ }
+
+ return nil
+}
+
+// ValidateAgainstSingleSchema checks that values does not violate the structure laid out in this schema
+func ValidateAgainstSingleSchema(values common.Values, schemaJSON []byte) (reterr error) {
+ defer func() {
+ if r := recover(); r != nil {
+ reterr = fmt.Errorf("unable to validate schema: %s", r)
+ }
+ }()
+
+ // This unmarshal function leverages UseNumber() for number precision. The parser
+ // used for values does this as well.
+ schema, err := jsonschema.UnmarshalJSON(bytes.NewReader(schemaJSON))
+ if err != nil {
+ return err
+ }
+ slog.Debug("unmarshalled JSON schema", "schema", schemaJSON)
+
+ // Configure compiler with loaders for different URL schemes
+ loader := jsonschema.SchemeURLLoader{
+ "file": jsonschema.FileLoader{},
+ "http": newHTTPURLLoader(),
+ "https": newHTTPURLLoader(),
+ "urn": urnLoader{},
+ }
+
+ compiler := jsonschema.NewCompiler()
+ compiler.UseLoader(loader)
+ err = compiler.AddResource("file:///values.schema.json", schema)
+ if err != nil {
+ return err
+ }
+
+ validator, err := compiler.Compile("file:///values.schema.json")
+ if err != nil {
+ return err
+ }
+
+ err = validator.Validate(values.AsMap())
+ if err != nil {
+ return JSONSchemaValidationError{err}
+ }
+
+ return nil
+}
+
+// URNResolverFunc allows SDK to plug a URN resolver. It must return a
+// schema document compatible with the validator (e.g., result of
+// jsonschema.UnmarshalJSON).
+type URNResolverFunc func(urn string) (any, error)
+
+// URNResolver is the default resolver used by the URN loader. By default it
+// returns a clear error.
+var URNResolver URNResolverFunc = func(urn string) (any, error) {
+ return nil, fmt.Errorf("URN not resolved: %s", urn)
+}
+
+// urnLoader implements resolution for the urn: scheme by delegating to
+// URNResolver. If unresolved, it logs a warning and returns a permissive
+// boolean-true schema to avoid hard failures (back-compat behavior).
+type urnLoader struct{}
+
+// warnedURNs ensures we log the unresolved-URN warning only once per URN.
+var warnedURNs sync.Map
+
+func (l urnLoader) Load(urlStr string) (any, error) {
+ if doc, err := URNResolver(urlStr); err == nil && doc != nil {
+ return doc, nil
+ }
+ if _, loaded := warnedURNs.LoadOrStore(urlStr, struct{}{}); !loaded {
+ slog.Warn("unresolved URN reference ignored; using permissive schema", "urn", urlStr)
+ }
+ return jsonschema.UnmarshalJSON(strings.NewReader("true"))
+}
+
+// Note, JSONSchemaValidationError is used to wrap the error from the underlying
+// validation package so that Helm has a clean interface and the validation package
+// could be replaced without changing the Helm SDK API.
+
+// JSONSchemaValidationError is the error returned when there is a schema validation
+// error.
+type JSONSchemaValidationError struct {
+ embeddedErr error
+}
+
+// Error prints the error message
+func (e JSONSchemaValidationError) Error() string {
+ errStr := e.embeddedErr.Error()
+
+ // This string prefixes all of our error details. Further up the stack of helm error message
+ // building more detail is provided to users. This is removed.
+ errStr = strings.TrimPrefix(errStr, "jsonschema validation failed with 'file:///values.schema.json#'\n")
+
+ // The extra new line is needed for when there are sub-charts.
+ return errStr + "\n"
+}
diff --git a/helm/pkg/chart/common/util/jsonschema_test.go b/helm/pkg/chart/common/util/jsonschema_test.go
new file mode 100644
index 000000000..834b1faf6
--- /dev/null
+++ b/helm/pkg/chart/common/util/jsonschema_test.go
@@ -0,0 +1,391 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+func TestValidateAgainstSingleSchema(t *testing.T) {
+ values, err := common.ReadValuesFile("./testdata/test-values.yaml")
+ if err != nil {
+ t.Fatalf("Error reading YAML file: %s", err)
+ }
+ schema, err := os.ReadFile("./testdata/test-values.schema.json")
+ if err != nil {
+ t.Fatalf("Error reading YAML file: %s", err)
+ }
+
+ if err := ValidateAgainstSingleSchema(values, schema); err != nil {
+ t.Errorf("Error validating Values against Schema: %s", err)
+ }
+}
+
+func TestValidateAgainstInvalidSingleSchema(t *testing.T) {
+ values, err := common.ReadValuesFile("./testdata/test-values.yaml")
+ if err != nil {
+ t.Fatalf("Error reading YAML file: %s", err)
+ }
+ schema, err := os.ReadFile("./testdata/test-values-invalid.schema.json")
+ if err != nil {
+ t.Fatalf("Error reading YAML file: %s", err)
+ }
+
+ var errString string
+ if err := ValidateAgainstSingleSchema(values, schema); err == nil {
+ t.Fatalf("Expected an error, but got nil")
+ } else {
+ errString = err.Error()
+ }
+
+ expectedErrString := `"file:///values.schema.json#" is not valid against metaschema: jsonschema validation failed with 'https://json-schema.org/draft/2020-12/schema#'
+- at '': got number, want boolean or object`
+ if errString != expectedErrString {
+ t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString)
+ }
+}
+
+func TestValidateAgainstSingleSchemaNegative(t *testing.T) {
+ values, err := common.ReadValuesFile("./testdata/test-values-negative.yaml")
+ if err != nil {
+ t.Fatalf("Error reading YAML file: %s", err)
+ }
+ schema, err := os.ReadFile("./testdata/test-values.schema.json")
+ if err != nil {
+ t.Fatalf("Error reading JSON file: %s", err)
+ }
+
+ var errString string
+ if err := ValidateAgainstSingleSchema(values, schema); err == nil {
+ t.Fatalf("Expected an error, but got nil")
+ } else {
+ errString = err.Error()
+ }
+
+ expectedErrString := `- at '': missing property 'employmentInfo'
+- at '/age': minimum: got -5, want 0
+`
+ if errString != expectedErrString {
+ t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString)
+ }
+}
+
+const subchartSchema = `{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Values",
+ "type": "object",
+ "properties": {
+ "age": {
+ "description": "Age",
+ "minimum": 0,
+ "type": "integer"
+ }
+ },
+ "required": [
+ "age"
+ ]
+}
+`
+
+const subchartSchema2020 = `{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "title": "Values",
+ "type": "object",
+ "properties": {
+ "data": {
+ "type": "array",
+ "contains": { "type": "string" },
+ "unevaluatedItems": { "type": "number" }
+ }
+ },
+ "required": ["data"]
+}
+`
+
+func TestValidateAgainstSchema(t *testing.T) {
+ subchartJSON := []byte(subchartSchema)
+ subchart := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "subchart",
+ },
+ Schema: subchartJSON,
+ }
+ chrt := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "chrt",
+ },
+ }
+ chrt.AddDependency(subchart)
+
+ vals := map[string]interface{}{
+ "name": "John",
+ "subchart": map[string]interface{}{
+ "age": 25,
+ },
+ }
+
+ if err := ValidateAgainstSchema(chrt, vals); err != nil {
+ t.Errorf("Error validating Values against Schema: %s", err)
+ }
+}
+
+func TestValidateAgainstSchemaNegative(t *testing.T) {
+ subchartJSON := []byte(subchartSchema)
+ subchart := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "subchart",
+ },
+ Schema: subchartJSON,
+ }
+ chrt := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "chrt",
+ },
+ }
+ chrt.AddDependency(subchart)
+
+ vals := map[string]interface{}{
+ "name": "John",
+ "subchart": map[string]interface{}{},
+ }
+
+ var errString string
+ if err := ValidateAgainstSchema(chrt, vals); err == nil {
+ t.Fatalf("Expected an error, but got nil")
+ } else {
+ errString = err.Error()
+ }
+
+ expectedErrString := `subchart:
+- at '': missing property 'age'
+`
+ if errString != expectedErrString {
+ t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString)
+ }
+}
+
+func TestValidateAgainstSchema2020(t *testing.T) {
+ subchartJSON := []byte(subchartSchema2020)
+ subchart := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "subchart",
+ },
+ Schema: subchartJSON,
+ }
+ chrt := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "chrt",
+ },
+ }
+ chrt.AddDependency(subchart)
+
+ vals := map[string]interface{}{
+ "name": "John",
+ "subchart": map[string]interface{}{
+ "data": []any{"hello", 12},
+ },
+ }
+
+ if err := ValidateAgainstSchema(chrt, vals); err != nil {
+ t.Errorf("Error validating Values against Schema: %s", err)
+ }
+}
+
+func TestValidateAgainstSchema2020Negative(t *testing.T) {
+ subchartJSON := []byte(subchartSchema2020)
+ subchart := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "subchart",
+ },
+ Schema: subchartJSON,
+ }
+ chrt := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "chrt",
+ },
+ }
+ chrt.AddDependency(subchart)
+
+ vals := map[string]interface{}{
+ "name": "John",
+ "subchart": map[string]interface{}{
+ "data": []any{12},
+ },
+ }
+
+ var errString string
+ if err := ValidateAgainstSchema(chrt, vals); err == nil {
+ t.Fatalf("Expected an error, but got nil")
+ } else {
+ errString = err.Error()
+ }
+
+ expectedErrString := `subchart:
+- at '/data': no items match contains schema
+ - at '/data/0': got number, want string
+`
+ if errString != expectedErrString {
+ t.Errorf("Error string :\n`%s`\ndoes not match expected\n`%s`", errString, expectedErrString)
+ }
+}
+
+func TestHTTPURLLoader_Load(t *testing.T) {
+ // Test successful JSON schema loading
+ t.Run("successful load", func(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(`{"type": "object", "properties": {"name": {"type": "string"}}}`))
+ }))
+ defer server.Close()
+
+ loader := newHTTPURLLoader()
+ result, err := loader.Load(server.URL)
+ if err != nil {
+ t.Fatalf("Expected no error, got: %v", err)
+ }
+ if result == nil {
+ t.Fatal("Expected result to be non-nil")
+ }
+ })
+
+ t.Run("HTTP error status", func(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer server.Close()
+
+ loader := newHTTPURLLoader()
+ _, err := loader.Load(server.URL)
+ if err == nil {
+ t.Fatal("Expected error for HTTP 404")
+ }
+ if !strings.Contains(err.Error(), "404") {
+ t.Errorf("Expected error message to contain '404', got: %v", err)
+ }
+ })
+}
+
+// Test that an unresolved URN $ref is soft-ignored and validation succeeds.
+// it mimics the behavior of Helm 3.18.4
+func TestValidateAgainstSingleSchema_UnresolvedURN_Ignored(t *testing.T) {
+ schema := []byte(`{
+ "$schema": "https://json-schema.org/draft-07/schema#",
+ "$ref": "urn:example:helm:schemas:v1:helm-schema-validation-conditions:v1/helmSchemaValidation-true"
+ }`)
+ vals := map[string]interface{}{"any": "value"}
+ if err := ValidateAgainstSingleSchema(vals, schema); err != nil {
+ t.Fatalf("expected no error when URN unresolved is ignored, got: %v", err)
+ }
+}
+
+// Non-regression tests for https://github.com/helm/helm/issues/31202
+// Ensure ValidateAgainstSchema does not panic when:
+// - subchart key is missing
+// - subchart value is nil
+// - subchart value has an invalid type
+
+func TestValidateAgainstSchema_MissingSubchartValues_NoPanic(t *testing.T) {
+ subchartJSON := []byte(subchartSchema)
+ subchart := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "subchart"},
+ Schema: subchartJSON,
+ }
+ chrt := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "chrt"},
+ }
+ chrt.AddDependency(subchart)
+
+ // No "subchart" key present in values
+ vals := map[string]any{
+ "name": "John",
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Fatalf("ValidateAgainstSchema panicked (missing subchart values): %v", r)
+ }
+ }()
+
+ if err := ValidateAgainstSchema(chrt, vals); err != nil {
+ t.Fatalf("expected no error when subchart values are missing, got: %v", err)
+ }
+}
+
+func TestValidateAgainstSchema_SubchartNil_NoPanic(t *testing.T) {
+ subchartJSON := []byte(subchartSchema)
+ subchart := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "subchart"},
+ Schema: subchartJSON,
+ }
+ chrt := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "chrt"},
+ }
+ chrt.AddDependency(subchart)
+
+ // "subchart" key present but nil
+ vals := map[string]any{
+ "name": "John",
+ "subchart": nil,
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Fatalf("ValidateAgainstSchema panicked (nil subchart values): %v", r)
+ }
+ }()
+
+ if err := ValidateAgainstSchema(chrt, vals); err != nil {
+ t.Fatalf("expected no error when subchart values are nil, got: %v", err)
+ }
+}
+
+func TestValidateAgainstSchema_InvalidSubchartValuesType_NoPanic(t *testing.T) {
+ subchartJSON := []byte(subchartSchema)
+ subchart := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "subchart"},
+ Schema: subchartJSON,
+ }
+ chrt := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "chrt"},
+ }
+ chrt.AddDependency(subchart)
+
+ // "subchart" is the wrong type (string instead of map)
+ vals := map[string]any{
+ "name": "John",
+ "subchart": "oops",
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Fatalf("ValidateAgainstSchema panicked (invalid subchart values type): %v", r)
+ }
+ }()
+
+ // We expect a non-nil error (invalid type), but crucially no panic.
+ if err := ValidateAgainstSchema(chrt, vals); err == nil {
+ t.Fatalf("expected an error when subchart values have invalid type, got nil")
+ }
+}
diff --git a/helm/pkg/chart/common/util/testdata/test-values-invalid.schema.json b/helm/pkg/chart/common/util/testdata/test-values-invalid.schema.json
new file mode 100644
index 000000000..35a16a2c4
--- /dev/null
+++ b/helm/pkg/chart/common/util/testdata/test-values-invalid.schema.json
@@ -0,0 +1 @@
+ 1E1111111
diff --git a/helm/pkg/chart/common/util/testdata/test-values-negative.yaml b/helm/pkg/chart/common/util/testdata/test-values-negative.yaml
new file mode 100644
index 000000000..5a1250bff
--- /dev/null
+++ b/helm/pkg/chart/common/util/testdata/test-values-negative.yaml
@@ -0,0 +1,14 @@
+firstname: John
+lastname: Doe
+age: -5
+likesCoffee: true
+addresses:
+ - city: Springfield
+ street: Main
+ number: 12345
+ - city: New York
+ street: Broadway
+ number: 67890
+phoneNumbers:
+ - "(888) 888-8888"
+ - "(555) 555-5555"
diff --git a/helm/pkg/chart/common/util/testdata/test-values.schema.json b/helm/pkg/chart/common/util/testdata/test-values.schema.json
new file mode 100644
index 000000000..4df89bbe8
--- /dev/null
+++ b/helm/pkg/chart/common/util/testdata/test-values.schema.json
@@ -0,0 +1,67 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "properties": {
+ "addresses": {
+ "description": "List of addresses",
+ "items": {
+ "properties": {
+ "city": {
+ "type": "string"
+ },
+ "number": {
+ "type": "number"
+ },
+ "street": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "age": {
+ "description": "Age",
+ "minimum": 0,
+ "type": "integer"
+ },
+ "employmentInfo": {
+ "properties": {
+ "salary": {
+ "minimum": 0,
+ "type": "number"
+ },
+ "title": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "salary"
+ ],
+ "type": "object"
+ },
+ "firstname": {
+ "description": "First name",
+ "type": "string"
+ },
+ "lastname": {
+ "type": "string"
+ },
+ "likesCoffee": {
+ "type": "boolean"
+ },
+ "phoneNumbers": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "firstname",
+ "lastname",
+ "addresses",
+ "employmentInfo"
+ ],
+ "title": "Values",
+ "type": "object"
+}
diff --git a/helm/pkg/chart/common/util/testdata/test-values.yaml b/helm/pkg/chart/common/util/testdata/test-values.yaml
new file mode 100644
index 000000000..042dea664
--- /dev/null
+++ b/helm/pkg/chart/common/util/testdata/test-values.yaml
@@ -0,0 +1,17 @@
+firstname: John
+lastname: Doe
+age: 25
+likesCoffee: true
+employmentInfo:
+ title: Software Developer
+ salary: 100000
+addresses:
+ - city: Springfield
+ street: Main
+ number: 12345
+ - city: New York
+ street: Broadway
+ number: 67890
+phoneNumbers:
+ - "(888) 888-8888"
+ - "(555) 555-5555"
diff --git a/helm/pkg/chart/common/util/values.go b/helm/pkg/chart/common/util/values.go
new file mode 100644
index 000000000..85cb29012
--- /dev/null
+++ b/helm/pkg/chart/common/util/values.go
@@ -0,0 +1,70 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+
+ "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files
+//
+// This takes both ReleaseOptions and Capabilities to merge into the render values.
+func ToRenderValues(chrt chart.Charter, chrtVals map[string]interface{}, options common.ReleaseOptions, caps *common.Capabilities) (common.Values, error) {
+ return ToRenderValuesWithSchemaValidation(chrt, chrtVals, options, caps, false)
+}
+
+// ToRenderValuesWithSchemaValidation composes the struct from the data coming from the Releases, Charts and Values files
+//
+// This takes both ReleaseOptions and Capabilities to merge into the render values.
+func ToRenderValuesWithSchemaValidation(chrt chart.Charter, chrtVals map[string]interface{}, options common.ReleaseOptions, caps *common.Capabilities, skipSchemaValidation bool) (common.Values, error) {
+ if caps == nil {
+ caps = common.DefaultCapabilities
+ }
+ accessor, err := chart.NewAccessor(chrt)
+ if err != nil {
+ return nil, err
+ }
+ top := map[string]interface{}{
+ "Chart": accessor.MetadataAsMap(),
+ "Capabilities": caps,
+ "Release": map[string]interface{}{
+ "Name": options.Name,
+ "Namespace": options.Namespace,
+ "IsUpgrade": options.IsUpgrade,
+ "IsInstall": options.IsInstall,
+ "Revision": options.Revision,
+ "Service": "Helm",
+ },
+ }
+
+ vals, err := CoalesceValues(chrt, chrtVals)
+ if err != nil {
+ return common.Values(top), err
+ }
+
+ if !skipSchemaValidation {
+ if err := ValidateAgainstSchema(chrt, vals); err != nil {
+ return top, fmt.Errorf("values don't meet the specifications of the schema(s) in the following chart(s):\n%w", err)
+ }
+ }
+
+ top["Values"] = vals
+ return top, nil
+}
diff --git a/helm/pkg/chart/common/util/values_test.go b/helm/pkg/chart/common/util/values_test.go
new file mode 100644
index 000000000..706d3cfda
--- /dev/null
+++ b/helm/pkg/chart/common/util/values_test.go
@@ -0,0 +1,112 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "testing"
+ "time"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+func TestToRenderValues(t *testing.T) {
+
+ chartValues := map[string]interface{}{
+ "name": "al Rashid",
+ "where": map[string]interface{}{
+ "city": "Basrah",
+ "title": "caliph",
+ },
+ }
+
+ overrideValues := map[string]interface{}{
+ "name": "Haroun",
+ "where": map[string]interface{}{
+ "city": "Baghdad",
+ "date": "809 CE",
+ },
+ }
+
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "test"},
+ Templates: []*common.File{},
+ Values: chartValues,
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: time.Now(), Data: []byte("1,001 Nights")},
+ },
+ }
+ c.AddDependency(&chart.Chart{
+ Metadata: &chart.Metadata{Name: "where"},
+ })
+
+ o := common.ReleaseOptions{
+ Name: "Seven Voyages",
+ Namespace: "default",
+ Revision: 1,
+ IsInstall: true,
+ }
+
+ res, err := ToRenderValuesWithSchemaValidation(c, overrideValues, o, nil, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Ensure that the top-level values are all set.
+ metamap := res["Chart"].(map[string]interface{})
+ if name := metamap["Name"]; name.(string) != "test" {
+ t.Errorf("Expected chart name 'test', got %q", name)
+ }
+ relmap := res["Release"].(map[string]interface{})
+ if name := relmap["Name"]; name.(string) != "Seven Voyages" {
+ t.Errorf("Expected release name 'Seven Voyages', got %q", name)
+ }
+ if namespace := relmap["Namespace"]; namespace.(string) != "default" {
+ t.Errorf("Expected namespace 'default', got %q", namespace)
+ }
+ if revision := relmap["Revision"]; revision.(int) != 1 {
+ t.Errorf("Expected revision '1', got %d", revision)
+ }
+ if relmap["IsUpgrade"].(bool) {
+ t.Error("Expected upgrade to be false.")
+ }
+ if !relmap["IsInstall"].(bool) {
+ t.Errorf("Expected install to be true.")
+ }
+ if !res["Capabilities"].(*common.Capabilities).APIVersions.Has("v1") {
+ t.Error("Expected Capabilities to have v1 as an API")
+ }
+ if res["Capabilities"].(*common.Capabilities).KubeVersion.Major != "1" {
+ t.Error("Expected Capabilities to have a Kube version")
+ }
+
+ vals := res["Values"].(common.Values)
+ if vals["name"] != "Haroun" {
+ t.Errorf("Expected 'Haroun', got %q (%v)", vals["name"], vals)
+ }
+ where := vals["where"].(map[string]interface{})
+ expects := map[string]string{
+ "city": "Baghdad",
+ "date": "809 CE",
+ "title": "caliph",
+ }
+ for field, expect := range expects {
+ if got := where[field]; got != expect {
+ t.Errorf("Expected %q, got %q (%v)", expect, got, where)
+ }
+ }
+}
diff --git a/helm/pkg/chart/common/values.go b/helm/pkg/chart/common/values.go
new file mode 100644
index 000000000..94958a779
--- /dev/null
+++ b/helm/pkg/chart/common/values.go
@@ -0,0 +1,175 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "errors"
+ "io"
+ "os"
+ "strings"
+
+ "sigs.k8s.io/yaml"
+)
+
+// GlobalKey is the name of the Values key that is used for storing global vars.
+const GlobalKey = "global"
+
+// Values represents a collection of chart values.
+type Values map[string]interface{}
+
+// YAML encodes the Values into a YAML string.
+func (v Values) YAML() (string, error) {
+ b, err := yaml.Marshal(v)
+ return string(b), err
+}
+
+// Table gets a table (YAML subsection) from a Values object.
+//
+// The table is returned as a Values.
+//
+// Compound table names may be specified with dots:
+//
+// foo.bar
+//
+// The above will be evaluated as "The table bar inside the table
+// foo".
+//
+// An ErrNoTable is returned if the table does not exist.
+func (v Values) Table(name string) (Values, error) {
+ table := v
+ var err error
+
+ for _, n := range parsePath(name) {
+ if table, err = tableLookup(table, n); err != nil {
+ break
+ }
+ }
+ return table, err
+}
+
+// AsMap is a utility function for converting Values to a map[string]interface{}.
+//
+// It protects against nil map panics.
+func (v Values) AsMap() map[string]interface{} {
+ if len(v) == 0 {
+ return map[string]interface{}{}
+ }
+ return v
+}
+
+// Encode writes serialized Values information to the given io.Writer.
+func (v Values) Encode(w io.Writer) error {
+ out, err := yaml.Marshal(v)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(out)
+ return err
+}
+
+func tableLookup(v Values, simple string) (Values, error) {
+ v2, ok := v[simple]
+ if !ok {
+ return v, ErrNoTable{simple}
+ }
+ if vv, ok := v2.(map[string]interface{}); ok {
+ return vv, nil
+ }
+
+ // This catches a case where a value is of type Values, but doesn't (for some
+ // reason) match the map[string]interface{}. This has been observed in the
+ // wild, and might be a result of a nil map of type Values.
+ if vv, ok := v2.(Values); ok {
+ return vv, nil
+ }
+
+ return Values{}, ErrNoTable{simple}
+}
+
+// ReadValues will parse YAML byte data into a Values.
+func ReadValues(data []byte) (vals Values, err error) {
+ err = yaml.Unmarshal(data, &vals)
+ if len(vals) == 0 {
+ vals = Values{}
+ }
+ return vals, err
+}
+
+// ReadValuesFile will parse a YAML file into a map of values.
+func ReadValuesFile(filename string) (Values, error) {
+ data, err := os.ReadFile(filename)
+ if err != nil {
+ return map[string]interface{}{}, err
+ }
+ return ReadValues(data)
+}
+
+// ReleaseOptions represents the additional release options needed
+// for the composition of the final values struct
+type ReleaseOptions struct {
+ Name string
+ Namespace string
+ Revision int
+ IsUpgrade bool
+ IsInstall bool
+}
+
+// istable is a special-purpose function to see if the present thing matches the definition of a YAML table.
+func istable(v interface{}) bool {
+ _, ok := v.(map[string]interface{})
+ return ok
+}
+
+// PathValue takes a path that traverses a YAML structure and returns the value at the end of that path.
+// The path starts at the root of the YAML structure and is comprised of YAML keys separated by periods.
+// Given the following YAML data the value at path "chapter.one.title" is "Loomings".
+//
+// chapter:
+// one:
+// title: "Loomings"
+func (v Values) PathValue(path string) (interface{}, error) {
+ if path == "" {
+ return nil, errors.New("YAML path cannot be empty")
+ }
+ return v.pathValue(parsePath(path))
+}
+
+func (v Values) pathValue(path []string) (interface{}, error) {
+ if len(path) == 1 {
+ // if exists must be root key not table
+ if _, ok := v[path[0]]; ok && !istable(v[path[0]]) {
+ return v[path[0]], nil
+ }
+ return nil, ErrNoValue{path[0]}
+ }
+
+ key, path := path[len(path)-1], path[:len(path)-1]
+ // get our table for table path
+ t, err := v.Table(joinPath(path...))
+ if err != nil {
+ return nil, ErrNoValue{key}
+ }
+ // check table for key and ensure value is not a table
+ if k, ok := t[key]; ok && !istable(k) {
+ return k, nil
+ }
+ return nil, ErrNoValue{key}
+}
+
+func parsePath(key string) []string { return strings.Split(key, ".") }
+
+func joinPath(path ...string) string { return strings.Join(path, ".") }
diff --git a/helm/pkg/chart/common/values_test.go b/helm/pkg/chart/common/values_test.go
new file mode 100644
index 000000000..3cceeb2b5
--- /dev/null
+++ b/helm/pkg/chart/common/values_test.go
@@ -0,0 +1,205 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+ "text/template"
+)
+
+func TestReadValues(t *testing.T) {
+ doc := `# Test YAML parse
+poet: "Coleridge"
+title: "Rime of the Ancient Mariner"
+stanza:
+ - "at"
+ - "length"
+ - "did"
+ - cross
+ - an
+ - Albatross
+
+mariner:
+ with: "crossbow"
+ shot: "ALBATROSS"
+
+water:
+ water:
+ where: "everywhere"
+ nor: "any drop to drink"
+`
+
+ data, err := ReadValues([]byte(doc))
+ if err != nil {
+ t.Fatalf("Error parsing bytes: %s", err)
+ }
+ matchValues(t, data)
+
+ tests := []string{`poet: "Coleridge"`, "# Just a comment", ""}
+
+ for _, tt := range tests {
+ data, err = ReadValues([]byte(tt))
+ if err != nil {
+ t.Fatalf("Error parsing bytes (%s): %s", tt, err)
+ }
+ if data == nil {
+ t.Errorf(`YAML string "%s" gave a nil map`, tt)
+ }
+ }
+}
+
+func TestReadValuesFile(t *testing.T) {
+ data, err := ReadValuesFile("./testdata/coleridge.yaml")
+ if err != nil {
+ t.Fatalf("Error reading YAML file: %s", err)
+ }
+ matchValues(t, data)
+}
+
+func ExampleValues() {
+ doc := `
+title: "Moby Dick"
+chapter:
+ one:
+ title: "Loomings"
+ two:
+ title: "The Carpet-Bag"
+ three:
+ title: "The Spouter Inn"
+`
+ d, err := ReadValues([]byte(doc))
+ if err != nil {
+ panic(err)
+ }
+ ch1, err := d.Table("chapter.one")
+ if err != nil {
+ panic("could not find chapter one")
+ }
+ fmt.Print(ch1["title"])
+ // Output:
+ // Loomings
+}
+
+func TestTable(t *testing.T) {
+ doc := `
+title: "Moby Dick"
+chapter:
+ one:
+ title: "Loomings"
+ two:
+ title: "The Carpet-Bag"
+ three:
+ title: "The Spouter Inn"
+`
+ d, err := ReadValues([]byte(doc))
+ if err != nil {
+ t.Fatalf("Failed to parse the White Whale: %s", err)
+ }
+
+ if _, err := d.Table("title"); err == nil {
+ t.Fatalf("Title is not a table.")
+ }
+
+ if _, err := d.Table("chapter"); err != nil {
+ t.Fatalf("Failed to get the chapter table: %s\n%v", err, d)
+ }
+
+ if v, err := d.Table("chapter.one"); err != nil {
+ t.Errorf("Failed to get chapter.one: %s", err)
+ } else if v["title"] != "Loomings" {
+ t.Errorf("Unexpected title: %s", v["title"])
+ }
+
+ if _, err := d.Table("chapter.three"); err != nil {
+ t.Errorf("Chapter three is missing: %s\n%v", err, d)
+ }
+
+ if _, err := d.Table("chapter.OneHundredThirtySix"); err == nil {
+ t.Errorf("I think you mean 'Epilogue'")
+ }
+}
+
+func matchValues(t *testing.T, data map[string]interface{}) {
+ t.Helper()
+ if data["poet"] != "Coleridge" {
+ t.Errorf("Unexpected poet: %s", data["poet"])
+ }
+
+ if o, err := ttpl("{{len .stanza}}", data); err != nil {
+ t.Errorf("len stanza: %s", err)
+ } else if o != "6" {
+ t.Errorf("Expected 6, got %s", o)
+ }
+
+ if o, err := ttpl("{{.mariner.shot}}", data); err != nil {
+ t.Errorf(".mariner.shot: %s", err)
+ } else if o != "ALBATROSS" {
+ t.Errorf("Expected that mariner shot ALBATROSS")
+ }
+
+ if o, err := ttpl("{{.water.water.where}}", data); err != nil {
+ t.Errorf(".water.water.where: %s", err)
+ } else if o != "everywhere" {
+ t.Errorf("Expected water water everywhere")
+ }
+}
+
+func ttpl(tpl string, v map[string]interface{}) (string, error) {
+ var b bytes.Buffer
+ tt := template.Must(template.New("t").Parse(tpl))
+ err := tt.Execute(&b, v)
+ return b.String(), err
+}
+
+func TestPathValue(t *testing.T) {
+ doc := `
+title: "Moby Dick"
+chapter:
+ one:
+ title: "Loomings"
+ two:
+ title: "The Carpet-Bag"
+ three:
+ title: "The Spouter Inn"
+`
+ d, err := ReadValues([]byte(doc))
+ if err != nil {
+ t.Fatalf("Failed to parse the White Whale: %s", err)
+ }
+
+ if v, err := d.PathValue("chapter.one.title"); err != nil {
+ t.Errorf("Got error instead of title: %s\n%v", err, d)
+ } else if v != "Loomings" {
+ t.Errorf("No error but got wrong value for title: %s\n%v", err, d)
+ }
+ if _, err := d.PathValue("chapter.one.doesnotexist"); err == nil {
+ t.Errorf("Non-existent key should return error: %s\n%v", err, d)
+ }
+ if _, err := d.PathValue("chapter.doesnotexist.one"); err == nil {
+ t.Errorf("Non-existent key in middle of path should return error: %s\n%v", err, d)
+ }
+ if _, err := d.PathValue(""); err == nil {
+ t.Error("Asking for the value from an empty path should yield an error")
+ }
+ if v, err := d.PathValue("title"); err == nil {
+ if v != "Moby Dick" {
+ t.Errorf("Failed to return values for root key title")
+ }
+ }
+}
diff --git a/helm/pkg/chart/dependency.go b/helm/pkg/chart/dependency.go
new file mode 100644
index 000000000..864fe6d2c
--- /dev/null
+++ b/helm/pkg/chart/dependency.go
@@ -0,0 +1,64 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chart
+
+import (
+ "errors"
+
+ v3chart "helm.sh/helm/v4/internal/chart/v3"
+ v2chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+var NewDependencyAccessor func(dep Dependency) (DependencyAccessor, error) = NewDefaultDependencyAccessor //nolint:revive
+
+func NewDefaultDependencyAccessor(dep Dependency) (DependencyAccessor, error) {
+ switch v := dep.(type) {
+ case v2chart.Dependency:
+ return &v2DependencyAccessor{&v}, nil
+ case *v2chart.Dependency:
+ return &v2DependencyAccessor{v}, nil
+ case v3chart.Dependency:
+ return &v3DependencyAccessor{&v}, nil
+ case *v3chart.Dependency:
+ return &v3DependencyAccessor{v}, nil
+ default:
+ return nil, errors.New("unsupported chart dependency type")
+ }
+}
+
+type v2DependencyAccessor struct {
+ dep *v2chart.Dependency
+}
+
+func (r *v2DependencyAccessor) Name() string {
+ return r.dep.Name
+}
+
+func (r *v2DependencyAccessor) Alias() string {
+ return r.dep.Alias
+}
+
+type v3DependencyAccessor struct {
+ dep *v3chart.Dependency
+}
+
+func (r *v3DependencyAccessor) Name() string {
+ return r.dep.Name
+}
+
+func (r *v3DependencyAccessor) Alias() string {
+ return r.dep.Alias
+}
diff --git a/helm/pkg/chart/interfaces.go b/helm/pkg/chart/interfaces.go
new file mode 100644
index 000000000..4001bc548
--- /dev/null
+++ b/helm/pkg/chart/interfaces.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package chart
+
+import (
+ common "helm.sh/helm/v4/pkg/chart/common"
+)
+
+type Charter interface{}
+
+type Dependency interface{}
+
+type Accessor interface {
+ Name() string
+ IsRoot() bool
+ MetadataAsMap() map[string]interface{}
+ Files() []*common.File
+ Templates() []*common.File
+ ChartFullPath() string
+ IsLibraryChart() bool
+ Dependencies() []Charter
+ MetaDependencies() []Dependency
+ Values() map[string]interface{}
+ Schema() []byte
+ Deprecated() bool
+}
+
+type DependencyAccessor interface {
+ Name() string
+ Alias() string
+}
diff --git a/helm/pkg/chart/loader/archive/archive.go b/helm/pkg/chart/loader/archive/archive.go
new file mode 100644
index 000000000..e98f5c333
--- /dev/null
+++ b/helm/pkg/chart/loader/archive/archive.go
@@ -0,0 +1,197 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// archive provides utility functions for working with Helm chart archive files
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+ "time"
+)
+
+// MaxDecompressedChartSize is the maximum size of a chart archive that will be
+// decompressed. This is the decompressed size of all the files.
+// The default value is 100 MiB.
+var MaxDecompressedChartSize int64 = 100 * 1024 * 1024 // Default 100 MiB
+
+// MaxDecompressedFileSize is the size of the largest file that Helm will attempt to load.
+// The size of the file is the decompressed version of it when it is stored in an archive.
+var MaxDecompressedFileSize int64 = 5 * 1024 * 1024 // Default 5 MiB
+
+var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`)
+
+var utf8bom = []byte{0xEF, 0xBB, 0xBF}
+
+// BufferedFile represents an archive file buffered for later processing.
+type BufferedFile struct {
+ Name string
+ ModTime time.Time
+ Data []byte
+}
+
+// LoadArchiveFiles reads in files out of an archive into memory. This function
+// performs important path security checks and should always be used before
+// expanding a tarball
+func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) {
+ unzipped, err := gzip.NewReader(in)
+ if err != nil {
+ return nil, err
+ }
+ defer unzipped.Close()
+
+ files := []*BufferedFile{}
+ tr := tar.NewReader(unzipped)
+ remainingSize := MaxDecompressedChartSize
+ for {
+ b := bytes.NewBuffer(nil)
+ hd, err := tr.Next()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if hd.FileInfo().IsDir() {
+ // Use this instead of hd.Typeflag because we don't have to do any
+ // inference chasing.
+ continue
+ }
+
+ switch hd.Typeflag {
+ // We don't want to process these extension header files.
+ case tar.TypeXGlobalHeader, tar.TypeXHeader:
+ continue
+ }
+
+ // Archive could contain \ if generated on Windows
+ delimiter := "/"
+ if strings.ContainsRune(hd.Name, '\\') {
+ delimiter = "\\"
+ }
+
+ parts := strings.Split(hd.Name, delimiter)
+ n := strings.Join(parts[1:], delimiter)
+
+ // Normalize the path to the / delimiter
+ n = strings.ReplaceAll(n, delimiter, "/")
+
+ if path.IsAbs(n) {
+ return nil, errors.New("chart illegally contains absolute paths")
+ }
+
+ n = path.Clean(n)
+ if n == "." {
+ // In this case, the original path was relative when it should have been absolute.
+ return nil, fmt.Errorf("chart illegally contains content outside the base directory: %q", hd.Name)
+ }
+ if strings.HasPrefix(n, "..") {
+ return nil, errors.New("chart illegally references parent directory")
+ }
+
+ // In some particularly arcane acts of path creativity, it is possible to intermix
+ // UNIX and Windows style paths in such a way that you produce a result of the form
+ // c:/foo even after all the built-in absolute path checks. So we explicitly check
+ // for this condition.
+ if drivePathPattern.MatchString(n) {
+ return nil, errors.New("chart contains illegally named files")
+ }
+
+ if parts[0] == "Chart.yaml" {
+ return nil, errors.New("chart yaml not in base directory")
+ }
+
+ if hd.Size > remainingSize {
+ return nil, fmt.Errorf("decompressed chart is larger than the maximum size %d", MaxDecompressedChartSize)
+ }
+
+ if hd.Size > MaxDecompressedFileSize {
+ return nil, fmt.Errorf("decompressed chart file %q is larger than the maximum file size %d", hd.Name, MaxDecompressedFileSize)
+ }
+
+ limitedReader := io.LimitReader(tr, remainingSize)
+
+ bytesWritten, err := io.Copy(b, limitedReader)
+ if err != nil {
+ return nil, err
+ }
+
+ remainingSize -= bytesWritten
+ // When the bytesWritten are less than the file size it means the limit reader ended
+ // copying early. Here we report that error. This is important if the last file extracted
+ // is the one that goes over the limit. It assumes the Size stored in the tar header
+ // is correct, something many applications do.
+ if bytesWritten < hd.Size || remainingSize <= 0 {
+ return nil, fmt.Errorf("decompressed chart is larger than the maximum size %d", MaxDecompressedChartSize)
+ }
+
+ data := bytes.TrimPrefix(b.Bytes(), utf8bom)
+
+ files = append(files, &BufferedFile{Name: n, ModTime: hd.ModTime, Data: data})
+ b.Reset()
+ }
+
+ if len(files) == 0 {
+ return nil, errors.New("no files in chart archive")
+ }
+ return files, nil
+}
+
+// ensureArchive's job is to return an informative error if the file does not appear to be a gzipped archive.
+//
+// Sometimes users will provide a values.yaml for an argument where a chart is expected. One common occurrence
+// of this is invoking `helm template values.yaml mychart` which would otherwise produce a confusing error
+// if we didn't check for this.
+func EnsureArchive(name string, raw *os.File) error {
+ defer raw.Seek(0, 0) // reset read offset to allow archive loading to proceed.
+
+ // Check the file format to give us a chance to provide the user with more actionable feedback.
+ buffer := make([]byte, 512)
+ _, err := raw.Read(buffer)
+ if err != nil && err != io.EOF {
+ return fmt.Errorf("file '%s' cannot be read: %s", name, err)
+ }
+
+ // Helm may identify achieve of the application/x-gzip as application/vnd.ms-fontobject.
+ // Fix for: https://github.com/helm/helm/issues/12261
+ if contentType := http.DetectContentType(buffer); contentType != "application/x-gzip" && !isGZipApplication(buffer) {
+ // TODO: Is there a way to reliably test if a file content is YAML? ghodss/yaml accepts a wide
+ // variety of content (Makefile, .zshrc) as valid YAML without errors.
+
+ // Wrong content type. Let's check if it's yaml and give an extra hint?
+ if strings.HasSuffix(name, ".yml") || strings.HasSuffix(name, ".yaml") {
+ return fmt.Errorf("file '%s' seems to be a YAML file, but expected a gzipped archive", name)
+ }
+ return fmt.Errorf("file '%s' does not appear to be a gzipped archive; got '%s'", name, contentType)
+ }
+ return nil
+}
+
+// isGZipApplication checks whether the archive is of the application/x-gzip type.
+func isGZipApplication(data []byte) bool {
+ sig := []byte("\x1F\x8B\x08")
+ return bytes.HasPrefix(data, sig)
+}
diff --git a/helm/pkg/chart/loader/archive/archive_test.go b/helm/pkg/chart/loader/archive/archive_test.go
new file mode 100644
index 000000000..2fe09e9b2
--- /dev/null
+++ b/helm/pkg/chart/loader/archive/archive_test.go
@@ -0,0 +1,92 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "testing"
+)
+
+func TestLoadArchiveFiles(t *testing.T) {
+ tcs := []struct {
+ name string
+ generate func(w *tar.Writer)
+ check func(t *testing.T, files []*BufferedFile, err error)
+ }{
+ {
+ name: "empty input should return no files",
+ generate: func(_ *tar.Writer) {},
+ check: func(t *testing.T, _ []*BufferedFile, err error) {
+ t.Helper()
+ if err.Error() != "no files in chart archive" {
+ t.Fatalf(`expected "no files in chart archive", got [%#v]`, err)
+ }
+ },
+ },
+ {
+ name: "should ignore files with XGlobalHeader type",
+ generate: func(w *tar.Writer) {
+ // simulate the presence of a `pax_global_header` file like you would get when
+ // processing a GitHub release archive.
+ err := w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeXGlobalHeader,
+ Name: "pax_global_header",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // we need to have at least one file, otherwise we'll get the "no files in chart archive" error
+ err = w.WriteHeader(&tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: "dir/empty",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ },
+ check: func(t *testing.T, files []*BufferedFile, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatalf(`got unwanted error [%#v] for tar file with pax_global_header content`, err)
+ }
+
+ if len(files) != 1 {
+ t.Fatalf(`expected to get one file but got [%v]`, files)
+ }
+ },
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ buf := &bytes.Buffer{}
+ gzw := gzip.NewWriter(buf)
+ tw := tar.NewWriter(gzw)
+
+ tc.generate(tw)
+
+ _ = tw.Close()
+ _ = gzw.Close()
+
+ files, err := LoadArchiveFiles(buf)
+ tc.check(t, files, err)
+ })
+ }
+}
diff --git a/helm/pkg/chart/loader/load.go b/helm/pkg/chart/loader/load.go
new file mode 100644
index 000000000..3fd381825
--- /dev/null
+++ b/helm/pkg/chart/loader/load.go
@@ -0,0 +1,196 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "sigs.k8s.io/yaml"
+
+ c3 "helm.sh/helm/v4/internal/chart/v3"
+ c3load "helm.sh/helm/v4/internal/chart/v3/loader"
+ "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/loader/archive"
+ c2 "helm.sh/helm/v4/pkg/chart/v2"
+ c2load "helm.sh/helm/v4/pkg/chart/v2/loader"
+)
+
+// ChartLoader loads a chart.
+type ChartLoader interface {
+ Load() (chart.Charter, error)
+}
+
+// Loader returns a new ChartLoader appropriate for the given chart name
+func Loader(name string) (ChartLoader, error) {
+ fi, err := os.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ if fi.IsDir() {
+ return DirLoader(name), nil
+ }
+ return FileLoader(name), nil
+}
+
+// Load takes a string name, tries to resolve it to a file or directory, and then loads it.
+//
+// This is the preferred way to load a chart. It will discover the chart encoding
+// and hand off to the appropriate chart reader.
+//
+// If a .helmignore file is present, the directory loader will skip loading any files
+// matching it. But .helmignore is not evaluated when reading out of an archive.
+func Load(name string) (chart.Charter, error) {
+ l, err := Loader(name)
+ if err != nil {
+ return nil, err
+ }
+ return l.Load()
+}
+
+// DirLoader loads a chart from a directory
+type DirLoader string
+
+// Load loads the chart
+func (l DirLoader) Load() (chart.Charter, error) {
+ return LoadDir(string(l))
+}
+
+func LoadDir(dir string) (chart.Charter, error) {
+ topdir, err := filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ name := filepath.Join(topdir, "Chart.yaml")
+ data, err := os.ReadFile(name)
+ if err != nil {
+ return nil, fmt.Errorf("unable to detect chart at %s: %w", name, err)
+ }
+
+ c := new(chartBase)
+ err = yaml.Unmarshal(data, c)
+ if err != nil {
+ return nil, fmt.Errorf("cannot load Chart.yaml: %w", err)
+ }
+
+ switch c.APIVersion {
+ case c2.APIVersionV1, c2.APIVersionV2, "":
+ return c2load.Load(dir)
+ case c3.APIVersionV3:
+ return c3load.Load(dir)
+ default:
+ return nil, errors.New("unsupported chart version")
+ }
+
+}
+
+// FileLoader loads a chart from a file
+type FileLoader string
+
+// Load loads a chart
+func (l FileLoader) Load() (chart.Charter, error) {
+ return LoadFile(string(l))
+}
+
+func LoadFile(name string) (chart.Charter, error) {
+ if fi, err := os.Stat(name); err != nil {
+ return nil, err
+ } else if fi.IsDir() {
+ return nil, errors.New("cannot load a directory")
+ }
+
+ raw, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer raw.Close()
+
+ err = archive.EnsureArchive(name, raw)
+ if err != nil {
+ return nil, err
+ }
+
+ files, err := archive.LoadArchiveFiles(raw)
+ if err != nil {
+ if errors.Is(err, gzip.ErrHeader) {
+ return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %w)", name, err)
+ }
+ return nil, errors.New("unable to load chart archive")
+ }
+
+ for _, f := range files {
+ if f.Name == "Chart.yaml" {
+ c := new(chartBase)
+ if err := yaml.Unmarshal(f.Data, c); err != nil {
+ return c, fmt.Errorf("cannot load Chart.yaml: %w", err)
+ }
+ switch c.APIVersion {
+ case c2.APIVersionV1, c2.APIVersionV2, "":
+ return c2load.Load(name)
+ case c3.APIVersionV3:
+ return c3load.Load(name)
+ default:
+ return nil, errors.New("unsupported chart version")
+ }
+ }
+ }
+
+ return nil, errors.New("unable to detect chart version, no Chart.yaml found")
+}
+
+// LoadArchive loads from a reader containing a compressed tar archive.
+func LoadArchive(in io.Reader) (chart.Charter, error) {
+ // Note: This function is for use by SDK users such as Flux.
+
+ files, err := archive.LoadArchiveFiles(in)
+ if err != nil {
+ if errors.Is(err, gzip.ErrHeader) {
+ return nil, fmt.Errorf("stream does not appear to be a valid chart file (details: %w)", err)
+ }
+ return nil, fmt.Errorf("unable to load chart archive: %w", err)
+ }
+
+ for _, f := range files {
+ if f.Name == "Chart.yaml" {
+ c := new(chartBase)
+ if err := yaml.Unmarshal(f.Data, c); err != nil {
+ return c, fmt.Errorf("cannot load Chart.yaml: %w", err)
+ }
+ switch c.APIVersion {
+ case c2.APIVersionV1, c2.APIVersionV2, "":
+ return c2load.LoadFiles(files)
+ case c3.APIVersionV3:
+ return c3load.LoadFiles(files)
+ default:
+ return nil, errors.New("unsupported chart version")
+ }
+ }
+ }
+
+ return nil, errors.New("unable to detect chart version, no Chart.yaml found")
+}
+
+// chartBase is used to detect the API Version for the chart to run it through the
+// loader for that type.
+type chartBase struct {
+ APIVersion string `json:"apiVersion,omitempty"`
+}
diff --git a/helm/pkg/chart/loader/load_test.go b/helm/pkg/chart/loader/load_test.go
new file mode 100644
index 000000000..40f46c09b
--- /dev/null
+++ b/helm/pkg/chart/loader/load_test.go
@@ -0,0 +1,186 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "maps"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ c3 "helm.sh/helm/v4/internal/chart/v3"
+ "helm.sh/helm/v4/pkg/chart"
+ c2 "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+// createChartArchive is a helper function to create a gzipped tar archive in memory
+func createChartArchive(t *testing.T, chartName, apiVersion string, extraFiles map[string][]byte, createChartYaml bool) io.Reader {
+ t.Helper()
+ var buf bytes.Buffer
+ gw := gzip.NewWriter(&buf)
+ tw := tar.NewWriter(gw)
+
+ files := make(map[string][]byte)
+ maps.Copy(files, extraFiles)
+
+ if createChartYaml {
+ chartYAMLContent := fmt.Sprintf(`apiVersion: %s
+name: %s
+version: 0.1.0
+description: A test chart
+`, apiVersion, chartName)
+ files["Chart.yaml"] = []byte(chartYAMLContent)
+ }
+
+ for name, data := range files {
+ header := &tar.Header{
+ Name: filepath.Join(chartName, name),
+ Mode: 0644,
+ Size: int64(len(data)),
+ ModTime: time.Now(),
+ }
+ if err := tw.WriteHeader(header); err != nil {
+ t.Fatalf("Failed to write tar header for %s: %v", name, err)
+ }
+ if _, err := tw.Write(data); err != nil {
+ t.Fatalf("Failed to write tar data for %s: %v", name, err)
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ t.Fatalf("Failed to close tar writer: %v", err)
+ }
+ if err := gw.Close(); err != nil {
+ t.Fatalf("Failed to close gzip writer: %v", err)
+ }
+ return &buf
+}
+
+func TestLoadArchive(t *testing.T) {
+ testCases := []struct {
+ name string
+ chartName string
+ apiVersion string
+ extraFiles map[string][]byte
+ inputReader io.Reader
+ expectedChart chart.Charter
+ expectedError string
+ createChartYaml bool
+ }{
+ {
+ name: "valid v2 chart archive",
+ chartName: "mychart-v2",
+ apiVersion: c2.APIVersionV2,
+ extraFiles: map[string][]byte{"templates/config.yaml": []byte("key: value")},
+ expectedChart: &c2.Chart{
+ Metadata: &c2.Metadata{APIVersion: c2.APIVersionV2, Name: "mychart-v2", Version: "0.1.0", Description: "A test chart"},
+ },
+ createChartYaml: true,
+ },
+ {
+ name: "valid v3 chart archive",
+ chartName: "mychart-v3",
+ apiVersion: c3.APIVersionV3,
+ extraFiles: map[string][]byte{"templates/config.yaml": []byte("key: value")},
+ expectedChart: &c3.Chart{
+ Metadata: &c3.Metadata{APIVersion: c3.APIVersionV3, Name: "mychart-v3", Version: "0.1.0", Description: "A test chart"},
+ },
+ createChartYaml: true,
+ },
+ {
+ name: "invalid gzip header",
+ inputReader: bytes.NewBufferString("not a gzip file"),
+ expectedError: "stream does not appear to be a valid chart file (details: gzip: invalid header)",
+ },
+ {
+ name: "archive without Chart.yaml",
+ chartName: "no-chart-yaml",
+ apiVersion: c2.APIVersionV2, // This will be ignored as Chart.yaml is missing
+ extraFiles: map[string][]byte{"values.yaml": []byte("foo: bar")},
+ expectedError: "unable to detect chart version, no Chart.yaml found",
+ createChartYaml: false,
+ },
+ {
+ name: "archive with malformed Chart.yaml",
+ chartName: "malformed-chart-yaml",
+ apiVersion: c2.APIVersionV2,
+ extraFiles: map[string][]byte{"Chart.yaml": []byte("apiVersion: v2\nname: mychart\nversion: 0.1.0\ndescription: A test chart\ninvalid: :")},
+ expectedError: "cannot load Chart.yaml: error converting YAML to JSON: yaml: line 5: mapping values are not allowed in this context",
+ createChartYaml: false,
+ },
+ {
+ name: "unsupported API version",
+ chartName: "unsupported-api",
+ apiVersion: "v99",
+ expectedError: "unsupported chart version",
+ createChartYaml: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ var reader io.Reader
+ if tc.inputReader != nil {
+ reader = tc.inputReader
+ } else {
+ reader = createChartArchive(t, tc.chartName, tc.apiVersion, tc.extraFiles, tc.createChartYaml)
+ }
+
+ loadedChart, err := LoadArchive(reader)
+
+ if tc.expectedError != "" {
+ if err == nil || !strings.Contains(err.Error(), tc.expectedError) {
+ t.Errorf("Expected error containing %q, but got %v", tc.expectedError, err)
+ }
+ return
+ }
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ lac, err := chart.NewAccessor(loadedChart)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ eac, err := chart.NewAccessor(tc.expectedChart)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ if lac.Name() != eac.Name() {
+ t.Errorf("Expected chart name %q, got %q", eac.Name(), lac.Name())
+ }
+
+ var loadedAPIVersion string
+ switch lc := loadedChart.(type) {
+ case *c2.Chart:
+ loadedAPIVersion = lc.Metadata.APIVersion
+ case *c3.Chart:
+ loadedAPIVersion = lc.Metadata.APIVersion
+ }
+ if loadedAPIVersion != tc.apiVersion {
+ t.Errorf("Expected API version %q, got %q", tc.apiVersion, loadedAPIVersion)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/chart/v2/chart.go b/helm/pkg/chart/v2/chart.go
new file mode 100644
index 000000000..d77a53ddc
--- /dev/null
+++ b/helm/pkg/chart/v2/chart.go
@@ -0,0 +1,182 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2
+
+import (
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// APIVersionV1 is the API version number for version 1.
+const APIVersionV1 = "v1"
+
+// APIVersionV2 is the API version number for version 2.
+const APIVersionV2 = "v2"
+
+// aliasNameFormat defines the characters that are legal in an alias name.
+var aliasNameFormat = regexp.MustCompile("^[a-zA-Z0-9_-]+$")
+
+// Chart is a helm package that contains metadata, a default config, zero or more
+// optionally parameterizable templates, and zero or more charts (dependencies).
+type Chart struct {
+ // Raw contains the raw contents of the files originally contained in the chart archive.
+ //
+ // This should not be used except in special cases like `helm show values`,
+ // where we want to display the raw values, comments and all.
+ Raw []*common.File `json:"-"`
+ // Metadata is the contents of the Chartfile.
+ Metadata *Metadata `json:"metadata"`
+ // Lock is the contents of Chart.lock.
+ Lock *Lock `json:"lock"`
+ // Templates for this chart.
+ Templates []*common.File `json:"templates"`
+ // Values are default config for this chart.
+ Values map[string]interface{} `json:"values"`
+ // Schema is an optional JSON schema for imposing structure on Values
+ Schema []byte `json:"schema"`
+ // SchemaModTime the schema was last modified
+ SchemaModTime time.Time `json:"schemamodtime,omitempty"`
+ // Files are miscellaneous files in a chart archive,
+ // e.g. README, LICENSE, etc.
+ Files []*common.File `json:"files"`
+ // ModTime the chart metadata was last modified
+ ModTime time.Time `json:"modtime,omitzero"`
+
+ parent *Chart
+ dependencies []*Chart
+}
+
+type CRD struct {
+ // Name is the File.Name for the crd file
+ Name string
+ // Filename is the File obj Name including (sub-)chart.ChartFullPath
+ Filename string
+ // File is the File obj for the crd
+ File *common.File
+}
+
+// SetDependencies replaces the chart dependencies.
+func (ch *Chart) SetDependencies(charts ...*Chart) {
+ ch.dependencies = nil
+ ch.AddDependency(charts...)
+}
+
+// Name returns the name of the chart.
+func (ch *Chart) Name() string {
+ if ch.Metadata == nil {
+ return ""
+ }
+ return ch.Metadata.Name
+}
+
+// AddDependency determines if the chart is a subchart.
+func (ch *Chart) AddDependency(charts ...*Chart) {
+ for i, x := range charts {
+ charts[i].parent = ch
+ ch.dependencies = append(ch.dependencies, x)
+ }
+}
+
+// Root finds the root chart.
+func (ch *Chart) Root() *Chart {
+ if ch.IsRoot() {
+ return ch
+ }
+ return ch.Parent().Root()
+}
+
+// Dependencies are the charts that this chart depends on.
+func (ch *Chart) Dependencies() []*Chart { return ch.dependencies }
+
+// IsRoot determines if the chart is the root chart.
+func (ch *Chart) IsRoot() bool { return ch.parent == nil }
+
+// Parent returns a subchart's parent chart.
+func (ch *Chart) Parent() *Chart { return ch.parent }
+
+// ChartPath returns the full path to this chart in dot notation.
+func (ch *Chart) ChartPath() string {
+ if !ch.IsRoot() {
+ return ch.Parent().ChartPath() + "." + ch.Name()
+ }
+ return ch.Name()
+}
+
+// ChartFullPath returns the full path to this chart.
+// Note that the path may not correspond to the path where the file can be found on the file system if the path
+// points to an aliased subchart.
+func (ch *Chart) ChartFullPath() string {
+ if !ch.IsRoot() {
+ return ch.Parent().ChartFullPath() + "/charts/" + ch.Name()
+ }
+ return ch.Name()
+}
+
+// Validate validates the metadata.
+func (ch *Chart) Validate() error {
+ return ch.Metadata.Validate()
+}
+
+// AppVersion returns the appversion of the chart.
+func (ch *Chart) AppVersion() string {
+ if ch.Metadata == nil {
+ return ""
+ }
+ return ch.Metadata.AppVersion
+}
+
+// CRDs returns a list of File objects in the 'crds/' directory of a Helm chart.
+// Deprecated: use CRDObjects()
+func (ch *Chart) CRDs() []*common.File {
+ files := []*common.File{}
+ // Find all resources in the crds/ directory
+ for _, f := range ch.Files {
+ if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) {
+ files = append(files, f)
+ }
+ }
+ // Get CRDs from dependencies, too.
+ for _, dep := range ch.Dependencies() {
+ files = append(files, dep.CRDs()...)
+ }
+ return files
+}
+
+// CRDObjects returns a list of CRD objects in the 'crds/' directory of a Helm chart & subcharts
+func (ch *Chart) CRDObjects() []CRD {
+ crds := []CRD{}
+ // Find all resources in the crds/ directory
+ for _, f := range ch.Files {
+ if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) {
+ mycrd := CRD{Name: f.Name, Filename: filepath.Join(ch.ChartFullPath(), f.Name), File: f}
+ crds = append(crds, mycrd)
+ }
+ }
+ // Get CRDs from dependencies, too.
+ for _, dep := range ch.Dependencies() {
+ crds = append(crds, dep.CRDObjects()...)
+ }
+ return crds
+}
+
+func hasManifestExtension(fname string) bool {
+ ext := filepath.Ext(fname)
+ return strings.EqualFold(ext, ".yaml") || strings.EqualFold(ext, ".yml") || strings.EqualFold(ext, ".json")
+}
diff --git a/helm/pkg/chart/v2/chart_test.go b/helm/pkg/chart/v2/chart_test.go
new file mode 100644
index 000000000..d0837eb16
--- /dev/null
+++ b/helm/pkg/chart/v2/chart_test.go
@@ -0,0 +1,229 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package v2
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+func TestCRDs(t *testing.T) {
+ modTime := time.Now()
+ chrt := Chart{
+ Files: []*common.File{
+ {
+ Name: "crds/foo.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "bar.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/foo/bar/baz.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crdsfoo/bar/baz.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/README.md",
+ ModTime: modTime,
+ Data: []byte("# hello"),
+ },
+ },
+ }
+
+ is := assert.New(t)
+ crds := chrt.CRDs()
+ is.Equal(2, len(crds))
+ is.Equal("crds/foo.yaml", crds[0].Name)
+ is.Equal("crds/foo/bar/baz.yaml", crds[1].Name)
+}
+
+func TestSaveChartNoRawData(t *testing.T) {
+ chrt := Chart{
+ Raw: []*common.File{
+ {
+ Name: "fhqwhgads.yaml",
+ ModTime: time.Now(),
+ Data: []byte("Everybody to the Limit"),
+ },
+ },
+ }
+
+ is := assert.New(t)
+ data, err := json.Marshal(chrt)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res := &Chart{}
+ if err := json.Unmarshal(data, res); err != nil {
+ t.Fatal(err)
+ }
+
+ is.Equal([]*common.File(nil), res.Raw)
+}
+
+func TestMetadata(t *testing.T) {
+ chrt := Chart{
+ Metadata: &Metadata{
+ Name: "foo.yaml",
+ AppVersion: "1.0.0",
+ APIVersion: "v2",
+ Version: "1.0.0",
+ Type: "application",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal("foo.yaml", chrt.Name())
+ is.Equal("1.0.0", chrt.AppVersion())
+ is.Equal(nil, chrt.Validate())
+}
+
+func TestIsRoot(t *testing.T) {
+ chrt1 := Chart{
+ parent: &Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ },
+ }
+
+ chrt2 := Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal(false, chrt1.IsRoot())
+ is.Equal(true, chrt2.IsRoot())
+}
+
+func TestChartPath(t *testing.T) {
+ chrt1 := Chart{
+ parent: &Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ },
+ }
+
+ chrt2 := Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal("foo.", chrt1.ChartPath())
+ is.Equal("foo", chrt2.ChartPath())
+}
+
+func TestChartFullPath(t *testing.T) {
+ chrt1 := Chart{
+ parent: &Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ },
+ }
+
+ chrt2 := Chart{
+ Metadata: &Metadata{
+ Name: "foo",
+ },
+ }
+
+ is := assert.New(t)
+
+ is.Equal("foo/charts/", chrt1.ChartFullPath())
+ is.Equal("foo", chrt2.ChartFullPath())
+}
+
+func TestCRDObjects(t *testing.T) {
+ modTime := time.Now()
+ chrt := Chart{
+ Files: []*common.File{
+ {
+ Name: "crds/foo.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "bar.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/foo/bar/baz.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crdsfoo/bar/baz.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ {
+ Name: "crds/README.md",
+ ModTime: modTime,
+ Data: []byte("# hello"),
+ },
+ },
+ }
+
+ expected := []CRD{
+ {
+ Name: "crds/foo.yaml",
+ Filename: "crds/foo.yaml",
+ File: &common.File{
+ Name: "crds/foo.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ },
+ {
+ Name: "crds/foo/bar/baz.yaml",
+ Filename: "crds/foo/bar/baz.yaml",
+ File: &common.File{
+ Name: "crds/foo/bar/baz.yaml",
+ ModTime: modTime,
+ Data: []byte("hello"),
+ },
+ },
+ }
+
+ is := assert.New(t)
+ crds := chrt.CRDObjects()
+ is.Equal(expected, crds)
+}
diff --git a/helm/pkg/chart/v2/dependency.go b/helm/pkg/chart/v2/dependency.go
new file mode 100644
index 000000000..8a590a036
--- /dev/null
+++ b/helm/pkg/chart/v2/dependency.go
@@ -0,0 +1,82 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2
+
+import "time"
+
+// Dependency describes a chart upon which another chart depends.
+//
+// Dependencies can be used to express developer intent, or to capture the state
+// of a chart.
+type Dependency struct {
+ // Name is the name of the dependency.
+ //
+ // This must mach the name in the dependency's Chart.yaml.
+ Name string `json:"name" yaml:"name"`
+ // Version is the version (range) of this chart.
+ //
+ // A lock file will always produce a single version, while a dependency
+ // may contain a semantic version range.
+ Version string `json:"version,omitempty" yaml:"version,omitempty"`
+ // The URL to the repository.
+ //
+ // Appending `index.yaml` to this string should result in a URL that can be
+ // used to fetch the repository index.
+ Repository string `json:"repository" yaml:"repository"`
+ // A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled )
+ Condition string `json:"condition,omitempty" yaml:"condition,omitempty"`
+ // Tags can be used to group charts for enabling/disabling together
+ Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"`
+ // Enabled bool determines if chart should be loaded
+ Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"`
+ // ImportValues holds the mapping of source values to parent key to be imported. Each item can be a
+ // string or pair of child/parent sublist items.
+ ImportValues []interface{} `json:"import-values,omitempty" yaml:"import-values,omitempty"`
+ // Alias usable alias to be used for the chart
+ Alias string `json:"alias,omitempty" yaml:"alias,omitempty"`
+}
+
+// Validate checks for common problems with the dependency datastructure in
+// the chart. This check must be done at load time before the dependency's charts are
+// loaded.
+func (d *Dependency) Validate() error {
+ if d == nil {
+ return ValidationError("dependencies must not contain empty or null nodes")
+ }
+ d.Name = sanitizeString(d.Name)
+ d.Version = sanitizeString(d.Version)
+ d.Repository = sanitizeString(d.Repository)
+ d.Condition = sanitizeString(d.Condition)
+ for i := range d.Tags {
+ d.Tags[i] = sanitizeString(d.Tags[i])
+ }
+ if d.Alias != "" && !aliasNameFormat.MatchString(d.Alias) {
+ return ValidationErrorf("dependency %q has disallowed characters in the alias", d.Name)
+ }
+ return nil
+}
+
+// Lock is a lock file for dependencies.
+//
+// It represents the state that the dependencies should be in.
+type Lock struct {
+ // Generated is the date the lock file was last generated.
+ Generated time.Time `json:"generated"`
+ // Digest is a hash of the dependencies in Chart.yaml.
+ Digest string `json:"digest"`
+ // Dependencies is the list of dependencies that this lock file has locked.
+ Dependencies []*Dependency `json:"dependencies"`
+}
diff --git a/helm/pkg/chart/v2/dependency_test.go b/helm/pkg/chart/v2/dependency_test.go
new file mode 100644
index 000000000..35919bd7a
--- /dev/null
+++ b/helm/pkg/chart/v2/dependency_test.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package v2
+
+import (
+ "testing"
+)
+
+func TestValidateDependency(t *testing.T) {
+ dep := &Dependency{
+ Name: "example",
+ }
+ for value, shouldFail := range map[string]bool{
+ "abcdefghijklmenopQRSTUVWXYZ-0123456780_": false,
+ "-okay": false,
+ "_okay": false,
+ "- bad": true,
+ " bad": true,
+ "bad\nvalue": true,
+ "bad ": true,
+ "bad$": true,
+ } {
+ dep.Alias = value
+ res := dep.Validate()
+ if res != nil && !shouldFail {
+ t.Errorf("Failed on case %q", dep.Alias)
+ } else if res == nil && shouldFail {
+ t.Errorf("Expected failure for %q", dep.Alias)
+ }
+ }
+}
diff --git a/helm/pkg/chart/v2/doc.go b/helm/pkg/chart/v2/doc.go
new file mode 100644
index 000000000..d36ca3ec4
--- /dev/null
+++ b/helm/pkg/chart/v2/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package v2 provides chart handling for apiVersion v1 and v2 charts
+
+This package and its sub-packages provide handling for apiVersion v1 and v2 charts.
+The changes from v1 to v2 charts are minor and were able to be handled with minor
+switches based on characteristics.
+*/
+package v2
diff --git a/helm/pkg/chart/v2/errors.go b/helm/pkg/chart/v2/errors.go
new file mode 100644
index 000000000..eeef75315
--- /dev/null
+++ b/helm/pkg/chart/v2/errors.go
@@ -0,0 +1,30 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2
+
+import "fmt"
+
+// ValidationError represents a data validation error.
+type ValidationError string
+
+func (v ValidationError) Error() string {
+ return "validation: " + string(v)
+}
+
+// ValidationErrorf takes a message and formatting options and creates a ValidationError
+func ValidationErrorf(msg string, args ...interface{}) ValidationError {
+ return ValidationError(fmt.Sprintf(msg, args...))
+}
diff --git a/helm/pkg/chart/v2/fuzz_test.go b/helm/pkg/chart/v2/fuzz_test.go
new file mode 100644
index 000000000..a897ef7b9
--- /dev/null
+++ b/helm/pkg/chart/v2/fuzz_test.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2
+
+import (
+ "testing"
+
+ fuzz "github.com/AdaLogics/go-fuzz-headers"
+)
+
+func FuzzMetadataValidate(f *testing.F) {
+ f.Fuzz(func(t *testing.T, data []byte) {
+ fdp := fuzz.NewConsumer(data)
+ // Add random values to the metadata
+ md := &Metadata{}
+ err := fdp.GenerateStruct(md)
+ if err != nil {
+ t.Skip()
+ }
+ md.Validate()
+ })
+}
+
+func FuzzDependencyValidate(f *testing.F) {
+ f.Fuzz(func(t *testing.T, data []byte) {
+ f := fuzz.NewConsumer(data)
+ // Add random values to the dependenci
+ d := &Dependency{}
+ err := f.GenerateStruct(d)
+ if err != nil {
+ t.Skip()
+ }
+ d.Validate()
+ })
+}
diff --git a/helm/pkg/chart/v2/lint/lint.go b/helm/pkg/chart/v2/lint/lint.go
new file mode 100644
index 000000000..1c871d936
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/lint.go
@@ -0,0 +1,71 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lint // import "helm.sh/helm/v4/pkg/chart/v2/lint"
+
+import (
+ "path/filepath"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/rules"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+)
+
+type linterOptions struct {
+ KubeVersion *common.KubeVersion
+ SkipSchemaValidation bool
+}
+
+type LinterOption func(lo *linterOptions)
+
+func WithKubeVersion(kubeVersion *common.KubeVersion) LinterOption {
+ return func(lo *linterOptions) {
+ lo.KubeVersion = kubeVersion
+ }
+}
+
+func WithSkipSchemaValidation(skipSchemaValidation bool) LinterOption {
+ return func(lo *linterOptions) {
+ lo.SkipSchemaValidation = skipSchemaValidation
+ }
+}
+
+func RunAll(baseDir string, values map[string]interface{}, namespace string, options ...LinterOption) support.Linter {
+
+ chartDir, _ := filepath.Abs(baseDir)
+
+ lo := linterOptions{}
+ for _, option := range options {
+ option(&lo)
+ }
+
+ result := support.Linter{
+ ChartDir: chartDir,
+ }
+
+ rules.Chartfile(&result)
+ rules.ValuesWithOverrides(&result, values, lo.SkipSchemaValidation)
+ rules.Templates(
+ &result,
+ namespace,
+ values,
+ rules.TemplateLinterKubeVersion(lo.KubeVersion),
+ rules.TemplateLinterSkipSchemaValidation(lo.SkipSchemaValidation))
+ rules.Dependencies(&result)
+ rules.Crds(&result)
+
+ return result
+}
diff --git a/helm/pkg/chart/v2/lint/lint_test.go b/helm/pkg/chart/v2/lint/lint_test.go
new file mode 100644
index 000000000..80dcef932
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/lint_test.go
@@ -0,0 +1,247 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lint
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+)
+
+const namespace = "testNamespace"
+
+const badChartDir = "rules/testdata/badchartfile"
+const badValuesFileDir = "rules/testdata/badvaluesfile"
+const badYamlFileDir = "rules/testdata/albatross"
+const badCrdFileDir = "rules/testdata/badcrdfile"
+const goodChartDir = "rules/testdata/goodone"
+const subChartValuesDir = "rules/testdata/withsubchart"
+const malformedTemplate = "rules/testdata/malformed-template"
+const invalidChartFileDir = "rules/testdata/invalidchartfile"
+
+func TestBadChart(t *testing.T) {
+ var values map[string]any
+ m := RunAll(badChartDir, values, namespace).Messages
+ if len(m) != 9 {
+ t.Errorf("Number of errors %v", len(m))
+ t.Errorf("All didn't fail with expected errors, got %#v", m)
+ }
+ // There should be one INFO, 2 WARNING and 2 ERROR messages, check for them
+ var i, w, w2, e, e2, e3, e4, e5, e6 bool
+ for _, msg := range m {
+ if msg.Severity == support.InfoSev {
+ if strings.Contains(msg.Err.Error(), "icon is recommended") {
+ i = true
+ }
+ }
+ if msg.Severity == support.WarningSev {
+ if strings.Contains(msg.Err.Error(), "does not exist") {
+ w = true
+ }
+ }
+ if msg.Severity == support.ErrorSev {
+ if strings.Contains(msg.Err.Error(), "version '0.0.0.0' is not a valid SemVer") {
+ e = true
+ }
+ if strings.Contains(msg.Err.Error(), "name is required") {
+ e2 = true
+ }
+
+ if strings.Contains(msg.Err.Error(), "apiVersion is required. The value must be either \"v1\" or \"v2\"") {
+ e3 = true
+ }
+
+ if strings.Contains(msg.Err.Error(), "chart type is not valid in apiVersion") {
+ e4 = true
+ }
+
+ if strings.Contains(msg.Err.Error(), "dependencies are not valid in the Chart file with apiVersion") {
+ e5 = true
+ }
+ // This comes from the dependency check, which loads dependency info from the Chart.yaml
+ if strings.Contains(msg.Err.Error(), "unable to load chart") {
+ e6 = true
+ }
+ }
+ if msg.Severity == support.WarningSev {
+ if strings.Contains(msg.Err.Error(), "version '0.0.0.0' is not a valid SemVerV2") {
+ w2 = true
+ }
+ }
+ }
+ if !e || !e2 || !e3 || !e4 || !e5 || !i || !e6 || !w || !w2 {
+ t.Errorf("Didn't find all the expected errors, got %#v", m)
+ }
+}
+
+func TestInvalidYaml(t *testing.T) {
+ var values map[string]any
+ m := RunAll(badYamlFileDir, values, namespace).Messages
+ if len(m) != 1 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "deliberateSyntaxError") {
+ t.Errorf("All didn't have the error for deliberateSyntaxError")
+ }
+}
+
+func TestInvalidChartYaml(t *testing.T) {
+ var values map[string]any
+ m := RunAll(invalidChartFileDir, values, namespace).Messages
+ if len(m) != 2 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "failed to strictly parse chart metadata file") {
+ t.Errorf("All didn't have the error for duplicate YAML keys")
+ }
+}
+
+func TestBadValues(t *testing.T) {
+ var values map[string]any
+ m := RunAll(badValuesFileDir, values, namespace).Messages
+ if len(m) < 1 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "unable to parse YAML") {
+ t.Errorf("All didn't have the error for invalid key format: %s", m[0].Err)
+ }
+}
+
+func TestBadCrdFile(t *testing.T) {
+ var values map[string]any
+ m := RunAll(badCrdFileDir, values, namespace).Messages
+ assert.Lenf(t, m, 2, "All didn't fail with expected errors, got %#v", m)
+ assert.ErrorContains(t, m[0].Err, "apiVersion is not in 'apiextensions.k8s.io'")
+ assert.ErrorContains(t, m[1].Err, "object kind is not 'CustomResourceDefinition'")
+}
+
+func TestGoodChart(t *testing.T) {
+ var values map[string]any
+ m := RunAll(goodChartDir, values, namespace).Messages
+ if len(m) != 0 {
+ t.Error("All returned linter messages when it shouldn't have")
+ for i, msg := range m {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ }
+}
+
+// TestHelmCreateChart tests that a `helm create` always passes a `helm lint` test.
+//
+// See https://github.com/helm/helm/issues/7923
+func TestHelmCreateChart(t *testing.T) {
+ var values map[string]any
+ dir := t.TempDir()
+
+ createdChart, err := chartutil.Create("testhelmcreatepasseslint", dir)
+ if err != nil {
+ t.Error(err)
+ // Fatal is bad because of the defer.
+ return
+ }
+
+ // Note: we test with strict=true here, even though others have
+ // strict = false.
+ m := RunAll(createdChart, values, namespace, WithSkipSchemaValidation(true)).Messages
+ if ll := len(m); ll != 1 {
+ t.Errorf("All should have had exactly 1 error. Got %d", ll)
+ for i, msg := range m {
+ t.Logf("Message %d: %s", i, msg.Error())
+ }
+ } else if msg := m[0].Err.Error(); !strings.Contains(msg, "icon is recommended") {
+ t.Errorf("Unexpected lint error: %s", msg)
+ }
+}
+
+// TestHelmCreateChart_CheckDeprecatedWarnings checks if any default template created by `helm create` throws
+// deprecated warnings in the linter check against the current Kubernetes version (provided using ldflags).
+//
+// See https://github.com/helm/helm/issues/11495
+//
+// Resources like hpa and ingress, which are disabled by default in values.yaml are enabled here using the equivalent
+// of the `--set` flag.
+func TestHelmCreateChart_CheckDeprecatedWarnings(t *testing.T) {
+ createdChart, err := chartutil.Create("checkdeprecatedwarnings", t.TempDir())
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // Add values to enable hpa, and ingress which are disabled by default.
+ // This is the equivalent of:
+ // helm lint checkdeprecatedwarnings --set 'autoscaling.enabled=true,ingress.enabled=true'
+ updatedValues := map[string]any{
+ "autoscaling": map[string]any{
+ "enabled": true,
+ },
+ "ingress": map[string]any{
+ "enabled": true,
+ },
+ }
+
+ linterRunDetails := RunAll(createdChart, updatedValues, namespace, WithSkipSchemaValidation(true))
+ for _, msg := range linterRunDetails.Messages {
+ if strings.HasPrefix(msg.Error(), "[WARNING]") &&
+ strings.Contains(msg.Error(), "deprecated") {
+ // When there is a deprecation warning for an object created
+ // by `helm create` for the current Kubernetes version, fail.
+ t.Errorf("Unexpected deprecation warning for %q: %s", msg.Path, msg.Error())
+ }
+ }
+}
+
+// lint ignores import-values
+// See https://github.com/helm/helm/issues/9658
+func TestSubChartValuesChart(t *testing.T) {
+ var values map[string]any
+ m := RunAll(subChartValuesDir, values, namespace).Messages
+ if len(m) != 0 {
+ t.Error("All returned linter messages when it shouldn't have")
+ for i, msg := range m {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ }
+}
+
+// lint stuck with malformed template object
+// See https://github.com/helm/helm/issues/11391
+func TestMalformedTemplate(t *testing.T) {
+ var values map[string]any
+ c := time.After(3 * time.Second)
+ ch := make(chan int, 1)
+ var m []support.Message
+ go func() {
+ m = RunAll(malformedTemplate, values, namespace).Messages
+ ch <- 1
+ }()
+ select {
+ case <-c:
+ t.Fatalf("lint malformed template timeout")
+ case <-ch:
+ if len(m) != 1 {
+ t.Fatalf("All didn't fail with expected errors, got %#v", m)
+ }
+ if !strings.Contains(m[0].Err.Error(), "invalid character '{'") {
+ t.Errorf("All didn't have the error for invalid character '{'")
+ }
+ }
+}
diff --git a/helm/pkg/chart/v2/lint/rules/chartfile.go b/helm/pkg/chart/v2/lint/rules/chartfile.go
new file mode 100644
index 000000000..806363477
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/chartfile.go
@@ -0,0 +1,236 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules"
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/asaskevich/govalidator"
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+)
+
+// Chartfile runs a set of linter rules related to Chart.yaml file
+func Chartfile(linter *support.Linter) {
+ chartFileName := "Chart.yaml"
+ chartPath := filepath.Join(linter.ChartDir, chartFileName)
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlNotDirectory(chartPath))
+
+ chartFile, err := chartutil.LoadChartfile(chartPath)
+ validChartFile := linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlFormat(err))
+
+ // Guard clause. Following linter rules require a parsable ChartFile
+ if !validChartFile {
+ return
+ }
+
+ _, err = chartutil.StrictLoadChartfile(chartPath)
+ linter.RunLinterRule(support.WarningSev, chartFileName, validateChartYamlStrictFormat(err))
+
+ // type check for Chart.yaml . ignoring error as any parse
+ // errors would already be caught in the above load function
+ chartFileForTypeCheck, _ := loadChartFileForTypeCheck(chartPath)
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartName(chartFile))
+
+ // Chart metadata
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAPIVersion(chartFile))
+
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersionType(chartFileForTypeCheck))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersion(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAppVersionType(chartFileForTypeCheck))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartMaintainer(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartSources(chartFile))
+ linter.RunLinterRule(support.InfoSev, chartFileName, validateChartIconPresence(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartIconURL(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartType(chartFile))
+ linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartDependencies(chartFile))
+ linter.RunLinterRule(support.WarningSev, chartFileName, validateChartVersionStrictSemVerV2(chartFile))
+}
+
+func validateChartVersionType(data map[string]interface{}) error {
+ return isStringValue(data, "version")
+}
+
+func validateChartAppVersionType(data map[string]interface{}) error {
+ return isStringValue(data, "appVersion")
+}
+
+func isStringValue(data map[string]interface{}, key string) error {
+ value, ok := data[key]
+ if !ok {
+ return nil
+ }
+ valueType := fmt.Sprintf("%T", value)
+ if valueType != "string" {
+ return fmt.Errorf("%s should be of type string but it's of type %s", key, valueType)
+ }
+ return nil
+}
+
+func validateChartYamlNotDirectory(chartPath string) error {
+ fi, err := os.Stat(chartPath)
+
+ if err == nil && fi.IsDir() {
+ return errors.New("should be a file, not a directory")
+ }
+ return nil
+}
+
+func validateChartYamlFormat(chartFileError error) error {
+ if chartFileError != nil {
+ return fmt.Errorf("unable to parse YAML\n\t%w", chartFileError)
+ }
+ return nil
+}
+
+func validateChartYamlStrictFormat(chartFileError error) error {
+ if chartFileError != nil {
+ return fmt.Errorf("failed to strictly parse chart metadata file\n\t%w", chartFileError)
+ }
+ return nil
+}
+
+func validateChartName(cf *chart.Metadata) error {
+ if cf.Name == "" {
+ return errors.New("name is required")
+ }
+ name := filepath.Base(cf.Name)
+ if name != cf.Name {
+ return fmt.Errorf("chart name %q is invalid", cf.Name)
+ }
+ return nil
+}
+
+func validateChartAPIVersion(cf *chart.Metadata) error {
+ if cf.APIVersion == "" {
+ return errors.New("apiVersion is required. The value must be either \"v1\" or \"v2\"")
+ }
+
+ if cf.APIVersion != chart.APIVersionV1 && cf.APIVersion != chart.APIVersionV2 {
+ return fmt.Errorf("apiVersion '%s' is not valid. The value must be either \"v1\" or \"v2\"", cf.APIVersion)
+ }
+
+ return nil
+}
+
+func validateChartVersion(cf *chart.Metadata) error {
+ if cf.Version == "" {
+ return errors.New("version is required")
+ }
+
+ version, err := semver.NewVersion(cf.Version)
+ if err != nil {
+ return fmt.Errorf("version '%s' is not a valid SemVer", cf.Version)
+ }
+
+ c, err := semver.NewConstraint(">0.0.0-0")
+ if err != nil {
+ return err
+ }
+ valid, msg := c.Validate(version)
+
+ if !valid && len(msg) > 0 {
+ return fmt.Errorf("version %v", msg[0])
+ }
+
+ return nil
+}
+
+func validateChartVersionStrictSemVerV2(cf *chart.Metadata) error {
+ _, err := semver.StrictNewVersion(cf.Version)
+
+ if err != nil {
+ return fmt.Errorf("version '%s' is not a valid SemVerV2", cf.Version)
+ }
+
+ return nil
+}
+
+func validateChartMaintainer(cf *chart.Metadata) error {
+ for _, maintainer := range cf.Maintainers {
+ if maintainer == nil {
+ return errors.New("a maintainer entry is empty")
+ }
+ if maintainer.Name == "" {
+ return errors.New("each maintainer requires a name")
+ } else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) {
+ return fmt.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name)
+ } else if maintainer.URL != "" && !govalidator.IsURL(maintainer.URL) {
+ return fmt.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name)
+ }
+ }
+ return nil
+}
+
+func validateChartSources(cf *chart.Metadata) error {
+ for _, source := range cf.Sources {
+ if source == "" || !govalidator.IsRequestURL(source) {
+ return fmt.Errorf("invalid source URL '%s'", source)
+ }
+ }
+ return nil
+}
+
+func validateChartIconPresence(cf *chart.Metadata) error {
+ if cf.Icon == "" {
+ return errors.New("icon is recommended")
+ }
+ return nil
+}
+
+func validateChartIconURL(cf *chart.Metadata) error {
+ if cf.Icon != "" && !govalidator.IsRequestURL(cf.Icon) {
+ return fmt.Errorf("invalid icon URL '%s'", cf.Icon)
+ }
+ return nil
+}
+
+func validateChartDependencies(cf *chart.Metadata) error {
+ if len(cf.Dependencies) > 0 && cf.APIVersion != chart.APIVersionV2 {
+ return fmt.Errorf("dependencies are not valid in the Chart file with apiVersion '%s'. They are valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2)
+ }
+ return nil
+}
+
+func validateChartType(cf *chart.Metadata) error {
+ if len(cf.Type) > 0 && cf.APIVersion != chart.APIVersionV2 {
+ return fmt.Errorf("chart type is not valid in apiVersion '%s'. It is valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2)
+ }
+ return nil
+}
+
+// loadChartFileForTypeCheck loads the Chart.yaml
+// in a generic form of a map[string]interface{}, so that the type
+// of the values can be checked
+func loadChartFileForTypeCheck(filename string) (map[string]interface{}, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := make(map[string]interface{})
+ err = yaml.Unmarshal(b, &y)
+ return y, err
+}
diff --git a/helm/pkg/chart/v2/lint/rules/chartfile_test.go b/helm/pkg/chart/v2/lint/rules/chartfile_test.go
new file mode 100644
index 000000000..692358426
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/chartfile_test.go
@@ -0,0 +1,319 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+)
+
+const (
+ badChartNameDir = "testdata/badchartname"
+ badChartDir = "testdata/badchartfile"
+ anotherBadChartDir = "testdata/anotherbadchartfile"
+)
+
+var (
+ badChartNamePath = filepath.Join(badChartNameDir, "Chart.yaml")
+ badChartFilePath = filepath.Join(badChartDir, "Chart.yaml")
+ nonExistingChartFilePath = filepath.Join(os.TempDir(), "Chart.yaml")
+)
+
+var badChart, _ = chartutil.LoadChartfile(badChartFilePath)
+var badChartName, _ = chartutil.LoadChartfile(badChartNamePath)
+
+// Validation functions Test
+func TestValidateChartYamlNotDirectory(t *testing.T) {
+ _ = os.Mkdir(nonExistingChartFilePath, os.ModePerm)
+ defer os.Remove(nonExistingChartFilePath)
+
+ err := validateChartYamlNotDirectory(nonExistingChartFilePath)
+ if err == nil {
+ t.Errorf("validateChartYamlNotDirectory to return a linter error, got no error")
+ }
+}
+
+func TestValidateChartYamlFormat(t *testing.T) {
+ err := validateChartYamlFormat(errors.New("Read error"))
+ if err == nil {
+ t.Errorf("validateChartYamlFormat to return a linter error, got no error")
+ }
+
+ err = validateChartYamlFormat(nil)
+ if err != nil {
+ t.Errorf("validateChartYamlFormat to return no error, got a linter error")
+ }
+}
+
+func TestValidateChartName(t *testing.T) {
+ err := validateChartName(badChart)
+ if err == nil {
+ t.Errorf("validateChartName to return a linter error, got no error")
+ }
+
+ err = validateChartName(badChartName)
+ if err == nil {
+ t.Error("expected validateChartName to return a linter error for an invalid name, got no error")
+ }
+}
+
+func TestValidateChartVersion(t *testing.T) {
+ var failTest = []struct {
+ Version string
+ ErrorMsg string
+ }{
+ {"", "version is required"},
+ {"1.2.3.4", "version '1.2.3.4' is not a valid SemVer"},
+ {"waps", "'waps' is not a valid SemVer"},
+ {"-3", "'-3' is not a valid SemVer"},
+ }
+
+ var successTest = []string{"0.0.1", "0.0.1+build", "0.0.1-beta"}
+
+ for _, test := range failTest {
+ badChart.Version = test.Version
+ err := validateChartVersion(badChart)
+ if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) {
+ t.Errorf("validateChartVersion(%s) to return \"%s\", got no error", test.Version, test.ErrorMsg)
+ }
+ }
+
+ for _, version := range successTest {
+ badChart.Version = version
+ err := validateChartVersion(badChart)
+ if err != nil {
+ t.Errorf("validateChartVersion(%s) to return no error, got a linter error", version)
+ }
+ }
+}
+
+func TestValidateChartVersionStrictSemVerV2(t *testing.T) {
+ var failTest = []struct {
+ Version string
+ ErrorMsg string
+ }{
+ {"", "version '' is not a valid SemVerV2"},
+ {"1", "version '1' is not a valid SemVerV2"},
+ {"1.1", "version '1.1' is not a valid SemVerV2"},
+ }
+
+ var successTest = []string{"1.1.1", "0.0.1+build", "0.0.1-beta"}
+
+ for _, test := range failTest {
+ badChart.Version = test.Version
+ err := validateChartVersionStrictSemVerV2(badChart)
+ if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) {
+ t.Errorf("validateChartVersionStrictSemVerV2(%s) to return \"%s\", got no error", test.Version, test.ErrorMsg)
+ }
+ }
+
+ for _, version := range successTest {
+ badChart.Version = version
+ err := validateChartVersionStrictSemVerV2(badChart)
+ if err != nil {
+ t.Errorf("validateChartVersionStrictSemVerV2(%s) to return no error, got a linter error", version)
+ }
+ }
+}
+
+func TestValidateChartMaintainer(t *testing.T) {
+ var failTest = []struct {
+ Name string
+ Email string
+ ErrorMsg string
+ }{
+ {"", "", "each maintainer requires a name"},
+ {"", "test@test.com", "each maintainer requires a name"},
+ {"John Snow", "wrongFormatEmail.com", "invalid email"},
+ }
+
+ var successTest = []struct {
+ Name string
+ Email string
+ }{
+ {"John Snow", ""},
+ {"John Snow", "john@winterfell.com"},
+ }
+
+ for _, test := range failTest {
+ badChart.Maintainers = []*chart.Maintainer{{Name: test.Name, Email: test.Email}}
+ err := validateChartMaintainer(badChart)
+ if err == nil || !strings.Contains(err.Error(), test.ErrorMsg) {
+ t.Errorf("validateChartMaintainer(%s, %s) to return \"%s\", got no error", test.Name, test.Email, test.ErrorMsg)
+ }
+ }
+
+ for _, test := range successTest {
+ badChart.Maintainers = []*chart.Maintainer{{Name: test.Name, Email: test.Email}}
+ err := validateChartMaintainer(badChart)
+ if err != nil {
+ t.Errorf("validateChartMaintainer(%s, %s) to return no error, got %s", test.Name, test.Email, err.Error())
+ }
+ }
+
+ // Testing for an empty maintainer
+ badChart.Maintainers = []*chart.Maintainer{nil}
+ err := validateChartMaintainer(badChart)
+ if err == nil {
+ t.Errorf("validateChartMaintainer did not return error for nil maintainer as expected")
+ }
+ if err.Error() != "a maintainer entry is empty" {
+ t.Errorf("validateChartMaintainer returned unexpected error for nil maintainer: %s", err.Error())
+ }
+}
+
+func TestValidateChartSources(t *testing.T) {
+ var failTest = []string{"", "RiverRun", "john@winterfell", "riverrun.io"}
+ var successTest = []string{"http://riverrun.io", "https://riverrun.io", "https://riverrun.io/blackfish"}
+ for _, test := range failTest {
+ badChart.Sources = []string{test}
+ err := validateChartSources(badChart)
+ if err == nil || !strings.Contains(err.Error(), "invalid source URL") {
+ t.Errorf("validateChartSources(%s) to return \"invalid source URL\", got no error", test)
+ }
+ }
+
+ for _, test := range successTest {
+ badChart.Sources = []string{test}
+ err := validateChartSources(badChart)
+ if err != nil {
+ t.Errorf("validateChartSources(%s) to return no error, got %s", test, err.Error())
+ }
+ }
+}
+
+func TestValidateChartIconPresence(t *testing.T) {
+ t.Run("Icon absent", func(t *testing.T) {
+ testChart := &chart.Metadata{
+ Icon: "",
+ }
+
+ err := validateChartIconPresence(testChart)
+
+ if err == nil {
+ t.Errorf("validateChartIconPresence to return a linter error, got no error")
+ } else if !strings.Contains(err.Error(), "icon is recommended") {
+ t.Errorf("expected %q, got %q", "icon is recommended", err.Error())
+ }
+ })
+ t.Run("Icon present", func(t *testing.T) {
+ testChart := &chart.Metadata{
+ Icon: "http://example.org/icon.png",
+ }
+
+ err := validateChartIconPresence(testChart)
+
+ if err != nil {
+ t.Errorf("Unexpected error: %q", err.Error())
+ }
+ })
+}
+
+func TestValidateChartIconURL(t *testing.T) {
+ var failTest = []string{"RiverRun", "john@winterfell", "riverrun.io"}
+ var successTest = []string{"http://riverrun.io", "https://riverrun.io", "https://riverrun.io/blackfish.png"}
+ for _, test := range failTest {
+ badChart.Icon = test
+ err := validateChartIconURL(badChart)
+ if err == nil || !strings.Contains(err.Error(), "invalid icon URL") {
+ t.Errorf("validateChartIconURL(%s) to return \"invalid icon URL\", got no error", test)
+ }
+ }
+
+ for _, test := range successTest {
+ badChart.Icon = test
+ err := validateChartSources(badChart)
+ if err != nil {
+ t.Errorf("validateChartIconURL(%s) to return no error, got %s", test, err.Error())
+ }
+ }
+}
+
+func TestChartfile(t *testing.T) {
+ t.Run("Chart.yaml basic validity issues", func(t *testing.T) {
+ linter := support.Linter{ChartDir: badChartDir}
+ Chartfile(&linter)
+ msgs := linter.Messages
+ expectedNumberOfErrorMessages := 7
+
+ if len(msgs) != expectedNumberOfErrorMessages {
+ t.Errorf("Expected %d errors, got %d", expectedNumberOfErrorMessages, len(msgs))
+ return
+ }
+
+ if !strings.Contains(msgs[0].Err.Error(), "name is required") {
+ t.Errorf("Unexpected message 0: %s", msgs[0].Err)
+ }
+
+ if !strings.Contains(msgs[1].Err.Error(), "apiVersion is required. The value must be either \"v1\" or \"v2\"") {
+ t.Errorf("Unexpected message 1: %s", msgs[1].Err)
+ }
+
+ if !strings.Contains(msgs[2].Err.Error(), "version '0.0.0.0' is not a valid SemVer") {
+ t.Errorf("Unexpected message 2: %s", msgs[2].Err)
+ }
+
+ if !strings.Contains(msgs[3].Err.Error(), "icon is recommended") {
+ t.Errorf("Unexpected message 3: %s", msgs[3].Err)
+ }
+
+ if !strings.Contains(msgs[4].Err.Error(), "chart type is not valid in apiVersion") {
+ t.Errorf("Unexpected message 4: %s", msgs[4].Err)
+ }
+
+ if !strings.Contains(msgs[5].Err.Error(), "dependencies are not valid in the Chart file with apiVersion") {
+ t.Errorf("Unexpected message 5: %s", msgs[5].Err)
+ }
+ if !strings.Contains(msgs[6].Err.Error(), "version '0.0.0.0' is not a valid SemVerV2") {
+ t.Errorf("Unexpected message 6: %s", msgs[6].Err)
+ }
+ })
+
+ t.Run("Chart.yaml validity issues due to type mismatch", func(t *testing.T) {
+ linter := support.Linter{ChartDir: anotherBadChartDir}
+ Chartfile(&linter)
+ msgs := linter.Messages
+ expectedNumberOfErrorMessages := 4
+
+ if len(msgs) != expectedNumberOfErrorMessages {
+ t.Errorf("Expected %d errors, got %d", expectedNumberOfErrorMessages, len(msgs))
+ return
+ }
+
+ if !strings.Contains(msgs[0].Err.Error(), "version should be of type string") {
+ t.Errorf("Unexpected message 0: %s", msgs[0].Err)
+ }
+
+ if !strings.Contains(msgs[1].Err.Error(), "version '7.2445e+06' is not a valid SemVer") {
+ t.Errorf("Unexpected message 1: %s", msgs[1].Err)
+ }
+
+ if !strings.Contains(msgs[2].Err.Error(), "appVersion should be of type string") {
+ t.Errorf("Unexpected message 2: %s", msgs[2].Err)
+ }
+ if !strings.Contains(msgs[3].Err.Error(), "version '7.2445e+06' is not a valid SemVerV2") {
+ t.Errorf("Unexpected message 3: %s", msgs[3].Err)
+ }
+ })
+}
diff --git a/helm/pkg/chart/v2/lint/rules/crds.go b/helm/pkg/chart/v2/lint/rules/crds.go
new file mode 100644
index 000000000..4bb4d370b
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/crds.go
@@ -0,0 +1,115 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/util/yaml"
+
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+)
+
+// Crds lints the CRDs in the Linter.
+func Crds(linter *support.Linter) {
+ fpath := "crds/"
+ crdsPath := filepath.Join(linter.ChartDir, fpath)
+
+ // crds directory is optional
+ if _, err := os.Stat(crdsPath); errors.Is(err, fs.ErrNotExist) {
+ return
+ }
+
+ crdsDirValid := linter.RunLinterRule(support.ErrorSev, fpath, validateCrdsDir(crdsPath))
+ if !crdsDirValid {
+ return
+ }
+
+ // Load chart and parse CRDs
+ chart, err := loader.Load(linter.ChartDir)
+
+ chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err)
+
+ if !chartLoaded {
+ return
+ }
+
+ /* Iterate over all the CRDs to check:
+ 1. It is a YAML file and not a template
+ 2. The API version is apiextensions.k8s.io
+ 3. The kind is CustomResourceDefinition
+ */
+ for _, crd := range chart.CRDObjects() {
+ fileName := crd.Name
+ fpath = fileName
+
+ decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(crd.File.Data), 4096)
+ for {
+ var yamlStruct *k8sYamlStruct
+
+ err := decoder.Decode(&yamlStruct)
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ // If YAML parsing fails here, it will always fail in the next block as well, so we should return here.
+ // This also confirms the YAML is not a template, since templates can't be decoded into a K8sYamlStruct.
+ if !linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err)) {
+ return
+ }
+
+ if yamlStruct != nil {
+ linter.RunLinterRule(support.ErrorSev, fpath, validateCrdAPIVersion(yamlStruct))
+ linter.RunLinterRule(support.ErrorSev, fpath, validateCrdKind(yamlStruct))
+ }
+ }
+ }
+}
+
+// Validation functions
+func validateCrdsDir(crdsPath string) error {
+ fi, err := os.Stat(crdsPath)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return errors.New("not a directory")
+ }
+ return nil
+}
+
+func validateCrdAPIVersion(obj *k8sYamlStruct) error {
+ if !strings.HasPrefix(obj.APIVersion, "apiextensions.k8s.io") {
+ return fmt.Errorf("apiVersion is not in 'apiextensions.k8s.io'")
+ }
+ return nil
+}
+
+func validateCrdKind(obj *k8sYamlStruct) error {
+ if obj.Kind != "CustomResourceDefinition" {
+ return fmt.Errorf("object kind is not 'CustomResourceDefinition'")
+ }
+ return nil
+}
diff --git a/helm/pkg/chart/v2/lint/rules/crds_test.go b/helm/pkg/chart/v2/lint/rules/crds_test.go
new file mode 100644
index 000000000..228f40a66
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/crds_test.go
@@ -0,0 +1,66 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+)
+
+const invalidCrdsDir = "./testdata/invalidcrdsdir"
+
+func TestInvalidCrdsDir(t *testing.T) {
+ linter := support.Linter{ChartDir: invalidCrdsDir}
+ Crds(&linter)
+ res := linter.Messages
+
+ assert.Len(t, res, 1)
+ assert.ErrorContains(t, res[0].Err, "not a directory")
+}
+
+// multi-document YAML with empty documents would panic
+func TestCrdWithEmptyDocument(t *testing.T) {
+ chartDir := t.TempDir()
+
+ os.WriteFile(filepath.Join(chartDir, "Chart.yaml"), []byte(
+ `apiVersion: v1
+name: test
+version: 0.1.0
+`), 0644)
+
+ // CRD with comments before --- (creates empty document)
+ crdsDir := filepath.Join(chartDir, "crds")
+ os.Mkdir(crdsDir, 0755)
+ os.WriteFile(filepath.Join(crdsDir, "test.yaml"), []byte(
+ `# Comments create empty document
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: test.example.io
+`), 0644)
+
+ linter := support.Linter{ChartDir: chartDir}
+ Crds(&linter)
+
+ assert.Len(t, linter.Messages, 0)
+}
diff --git a/helm/pkg/chart/v2/lint/rules/dependencies.go b/helm/pkg/chart/v2/lint/rules/dependencies.go
new file mode 100644
index 000000000..d944a016d
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/dependencies.go
@@ -0,0 +1,101 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules"
+
+import (
+ "fmt"
+ "strings"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+)
+
+// Dependencies runs lints against a chart's dependencies
+//
+// See https://github.com/helm/helm/issues/7910
+func Dependencies(linter *support.Linter) {
+ c, err := loader.LoadDir(linter.ChartDir)
+ if !linter.RunLinterRule(support.ErrorSev, "", validateChartFormat(err)) {
+ return
+ }
+
+ linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependencyInMetadata(c))
+ linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependenciesUnique(c))
+ linter.RunLinterRule(support.WarningSev, linter.ChartDir, validateDependencyInChartsDir(c))
+}
+
+func validateChartFormat(chartError error) error {
+ if chartError != nil {
+ return fmt.Errorf("unable to load chart\n\t%w", chartError)
+ }
+ return nil
+}
+
+func validateDependencyInChartsDir(c *chart.Chart) (err error) {
+ dependencies := map[string]struct{}{}
+ missing := []string{}
+ for _, dep := range c.Dependencies() {
+ dependencies[dep.Metadata.Name] = struct{}{}
+ }
+ for _, dep := range c.Metadata.Dependencies {
+ if _, ok := dependencies[dep.Name]; !ok {
+ missing = append(missing, dep.Name)
+ }
+ }
+ if len(missing) > 0 {
+ err = fmt.Errorf("chart directory is missing these dependencies: %s", strings.Join(missing, ","))
+ }
+ return err
+}
+
+func validateDependencyInMetadata(c *chart.Chart) (err error) {
+ dependencies := map[string]struct{}{}
+ missing := []string{}
+ for _, dep := range c.Metadata.Dependencies {
+ dependencies[dep.Name] = struct{}{}
+ }
+ for _, dep := range c.Dependencies() {
+ if _, ok := dependencies[dep.Metadata.Name]; !ok {
+ missing = append(missing, dep.Metadata.Name)
+ }
+ }
+ if len(missing) > 0 {
+ err = fmt.Errorf("chart metadata is missing these dependencies: %s", strings.Join(missing, ","))
+ }
+ return err
+}
+
+func validateDependenciesUnique(c *chart.Chart) (err error) {
+ dependencies := map[string]*chart.Dependency{}
+ shadowing := []string{}
+
+ for _, dep := range c.Metadata.Dependencies {
+ key := dep.Name
+ if dep.Alias != "" {
+ key = dep.Alias
+ }
+ if dependencies[key] != nil {
+ shadowing = append(shadowing, key)
+ }
+ dependencies[key] = dep
+ }
+ if len(shadowing) > 0 {
+ err = fmt.Errorf("multiple dependencies with name or alias: %s", strings.Join(shadowing, ","))
+ }
+ return err
+}
diff --git a/helm/pkg/chart/v2/lint/rules/dependencies_test.go b/helm/pkg/chart/v2/lint/rules/dependencies_test.go
new file mode 100644
index 000000000..08a6646cd
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/dependencies_test.go
@@ -0,0 +1,157 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package rules
+
+import (
+ "path/filepath"
+ "testing"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+)
+
+func chartWithBadDependencies() chart.Chart {
+ badChartDeps := chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "sub2",
+ },
+ {
+ Name: "sub3",
+ },
+ },
+ },
+ }
+
+ badChartDeps.SetDependencies(
+ &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "sub1",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ },
+ },
+ &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "sub2",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ },
+ },
+ )
+ return badChartDeps
+}
+
+func TestValidateDependencyInChartsDir(t *testing.T) {
+ c := chartWithBadDependencies()
+
+ if err := validateDependencyInChartsDir(&c); err == nil {
+ t.Error("chart should have been flagged for missing deps in chart directory")
+ }
+}
+
+func TestValidateDependencyInMetadata(t *testing.T) {
+ c := chartWithBadDependencies()
+
+ if err := validateDependencyInMetadata(&c); err == nil {
+ t.Errorf("chart should have been flagged for missing deps in chart metadata")
+ }
+}
+
+func TestValidateDependenciesUnique(t *testing.T) {
+ tests := []struct {
+ chart chart.Chart
+ }{
+ {chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "foo",
+ },
+ {
+ Name: "foo",
+ },
+ },
+ },
+ }},
+ {chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "foo",
+ Alias: "bar",
+ },
+ {
+ Name: "bar",
+ },
+ },
+ },
+ }},
+ {chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "badchart",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "foo",
+ Alias: "baz",
+ },
+ {
+ Name: "bar",
+ Alias: "baz",
+ },
+ },
+ },
+ }},
+ }
+
+ for _, tt := range tests {
+ if err := validateDependenciesUnique(&tt.chart); err == nil {
+ t.Errorf("chart should have been flagged for dependency shadowing")
+ }
+ }
+}
+
+func TestDependencies(t *testing.T) {
+ tmp := t.TempDir()
+
+ c := chartWithBadDependencies()
+ err := chartutil.SaveDir(&c, tmp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ linter := support.Linter{ChartDir: filepath.Join(tmp, c.Metadata.Name)}
+
+ Dependencies(&linter)
+ if l := len(linter.Messages); l != 2 {
+ t.Errorf("expected 2 linter errors for bad chart dependencies. Got %d.", l)
+ for i, msg := range linter.Messages {
+ t.Logf("Message: %d, Error: %#v", i, msg)
+ }
+ }
+}
diff --git a/helm/pkg/chart/v2/lint/rules/deprecations.go b/helm/pkg/chart/v2/lint/rules/deprecations.go
new file mode 100644
index 000000000..7d5245869
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/deprecations.go
@@ -0,0 +1,94 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules"
+
+import (
+ "fmt"
+ "strconv"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apiserver/pkg/endpoints/deprecation"
+ kscheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+// deprecatedAPIError indicates than an API is deprecated in Kubernetes
+type deprecatedAPIError struct {
+ Deprecated string
+ Message string
+}
+
+func (e deprecatedAPIError) Error() string {
+ msg := e.Message
+ return msg
+}
+
+func validateNoDeprecations(resource *k8sYamlStruct, kubeVersion *common.KubeVersion) error {
+ // if `resource` does not have an APIVersion or Kind, we cannot test it for deprecation
+ if resource.APIVersion == "" {
+ return nil
+ }
+ if resource.Kind == "" {
+ return nil
+ }
+
+ if kubeVersion == nil {
+ kubeVersion = &common.DefaultCapabilities.KubeVersion
+ }
+
+ runtimeObject, err := resourceToRuntimeObject(resource)
+ if err != nil {
+ // do not error for non-kubernetes resources
+ if runtime.IsNotRegisteredError(err) {
+ return nil
+ }
+ return err
+ }
+
+ kubeVersionMajor, err := strconv.Atoi(kubeVersion.Major)
+ if err != nil {
+ return err
+ }
+ kubeVersionMinor, err := strconv.Atoi(kubeVersion.Minor)
+ if err != nil {
+ return err
+ }
+
+ if !deprecation.IsDeprecated(runtimeObject, kubeVersionMajor, kubeVersionMinor) {
+ return nil
+ }
+ gvk := fmt.Sprintf("%s %s", resource.APIVersion, resource.Kind)
+ return deprecatedAPIError{
+ Deprecated: gvk,
+ Message: deprecation.WarningMessage(runtimeObject),
+ }
+}
+
+func resourceToRuntimeObject(resource *k8sYamlStruct) (runtime.Object, error) {
+ scheme := runtime.NewScheme()
+ kscheme.AddToScheme(scheme)
+
+ gvk := schema.FromAPIVersionAndKind(resource.APIVersion, resource.Kind)
+ out, err := scheme.New(gvk)
+ if err != nil {
+ return nil, err
+ }
+ out.GetObjectKind().SetGroupVersionKind(gvk)
+ return out, nil
+}
diff --git a/helm/pkg/chart/v2/lint/rules/deprecations_test.go b/helm/pkg/chart/v2/lint/rules/deprecations_test.go
new file mode 100644
index 000000000..e153f67e6
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/deprecations_test.go
@@ -0,0 +1,41 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules // import "helm.sh/helm/v4/pkg/chart/v2/lint/rules"
+
+import "testing"
+
+func TestValidateNoDeprecations(t *testing.T) {
+ deprecated := &k8sYamlStruct{
+ APIVersion: "extensions/v1beta1",
+ Kind: "Deployment",
+ }
+ err := validateNoDeprecations(deprecated, nil)
+ if err == nil {
+ t.Fatal("Expected deprecated extension to be flagged")
+ }
+ depErr := err.(deprecatedAPIError)
+ if depErr.Message == "" {
+ t.Fatalf("Expected error message to be non-blank: %v", err)
+ }
+
+ if err := validateNoDeprecations(&k8sYamlStruct{
+ APIVersion: "v1",
+ Kind: "Pod",
+ }, nil); err != nil {
+ t.Errorf("Expected a v1 Pod to not be deprecated")
+ }
+}
diff --git a/helm/pkg/chart/v2/lint/rules/template.go b/helm/pkg/chart/v2/lint/rules/template.go
new file mode 100644
index 000000000..43665aa3a
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/template.go
@@ -0,0 +1,384 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/validation"
+ apipath "k8s.io/apimachinery/pkg/api/validation/path"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/apimachinery/pkg/util/yaml"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/engine"
+)
+
+// Templates lints the templates in the Linter.
+func Templates(linter *support.Linter, namespace string, values map[string]any, options ...TemplateLinterOption) {
+ templateLinter := newTemplateLinter(linter, namespace, values, options...)
+ templateLinter.Lint()
+}
+
+type TemplateLinterOption func(*templateLinter)
+
+func TemplateLinterKubeVersion(kubeVersion *common.KubeVersion) TemplateLinterOption {
+ return func(tl *templateLinter) {
+ tl.kubeVersion = kubeVersion
+ }
+}
+
+func TemplateLinterSkipSchemaValidation(skipSchemaValidation bool) TemplateLinterOption {
+ return func(tl *templateLinter) {
+ tl.skipSchemaValidation = skipSchemaValidation
+ }
+}
+
+func newTemplateLinter(linter *support.Linter, namespace string, values map[string]any, options ...TemplateLinterOption) templateLinter {
+
+ result := templateLinter{
+ linter: linter,
+ values: values,
+ namespace: namespace,
+ }
+
+ for _, o := range options {
+ o(&result)
+ }
+
+ return result
+}
+
+type templateLinter struct {
+ linter *support.Linter
+ values map[string]any
+ namespace string
+ kubeVersion *common.KubeVersion
+ skipSchemaValidation bool
+}
+
+func (t *templateLinter) Lint() {
+ templatesDir := "templates/"
+ templatesPath := filepath.Join(t.linter.ChartDir, templatesDir)
+
+ templatesDirExists := t.linter.RunLinterRule(support.WarningSev, templatesDir, templatesDirExists(templatesPath))
+ if !templatesDirExists {
+ return
+ }
+
+ validTemplatesDir := t.linter.RunLinterRule(support.ErrorSev, templatesDir, validateTemplatesDir(templatesPath))
+ if !validTemplatesDir {
+ return
+ }
+
+ // Load chart and parse templates
+ chart, err := loader.Load(t.linter.ChartDir)
+
+ chartLoaded := t.linter.RunLinterRule(support.ErrorSev, templatesDir, err)
+
+ if !chartLoaded {
+ return
+ }
+
+ options := common.ReleaseOptions{
+ Name: "test-release",
+ Namespace: t.namespace,
+ }
+
+ caps := common.DefaultCapabilities.Copy()
+ if t.kubeVersion != nil {
+ caps.KubeVersion = *t.kubeVersion
+ }
+
+ // lint ignores import-values
+ // See https://github.com/helm/helm/issues/9658
+ if err := chartutil.ProcessDependencies(chart, t.values); err != nil {
+ return
+ }
+
+ cvals, err := util.CoalesceValues(chart, t.values)
+ if err != nil {
+ return
+ }
+
+ valuesToRender, err := util.ToRenderValuesWithSchemaValidation(chart, cvals, options, caps, t.skipSchemaValidation)
+ if err != nil {
+ t.linter.RunLinterRule(support.ErrorSev, templatesDir, err)
+ return
+ }
+ var e engine.Engine
+ e.LintMode = true
+ renderedContentMap, err := e.Render(chart, valuesToRender)
+
+ renderOk := t.linter.RunLinterRule(support.ErrorSev, templatesDir, err)
+
+ if !renderOk {
+ return
+ }
+
+ /* Iterate over all the templates to check:
+ - It is a .yaml file
+ - All the values in the template file is defined
+ - {{}} include | quote
+ - Generated content is a valid Yaml file
+ - Metadata.Namespace is not set
+ */
+ for _, template := range chart.Templates {
+ fileName := template.Name
+
+ t.linter.RunLinterRule(support.ErrorSev, fileName, validateAllowedExtension(fileName))
+
+ // We only apply the following lint rules to yaml files
+ if !isYamlFileExtension(fileName) {
+ continue
+ }
+
+ // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1463
+ // Check that all the templates have a matching value
+ // linter.RunLinterRule(support.WarningSev, fpath, validateNoMissingValues(templatesPath, valuesToRender, preExecutedTemplate))
+
+ // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1037
+ // linter.RunLinterRule(support.WarningSev, fpath, validateQuotes(string(preExecutedTemplate)))
+
+ renderedContent := renderedContentMap[path.Join(chart.Name(), fileName)]
+ if strings.TrimSpace(renderedContent) != "" {
+ t.linter.RunLinterRule(support.WarningSev, fileName, validateTopIndentLevel(renderedContent))
+
+ decoder := yaml.NewYAMLOrJSONDecoder(strings.NewReader(renderedContent), 4096)
+
+ // Lint all resources if the file contains multiple documents separated by ---
+ for {
+ // Even though k8sYamlStruct only defines a few fields, an error in any other
+ // key will be raised as well
+ var yamlStruct *k8sYamlStruct
+
+ err := decoder.Decode(&yamlStruct)
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ // If YAML linting fails here, it will always fail in the next block as well, so we should return here.
+ // fix https://github.com/helm/helm/issues/11391
+ if !t.linter.RunLinterRule(support.ErrorSev, fileName, validateYamlContent(err)) {
+ return
+ }
+ if yamlStruct != nil {
+ // NOTE: set to warnings to allow users to support out-of-date kubernetes
+ // Refs https://github.com/helm/helm/issues/8596
+ t.linter.RunLinterRule(support.WarningSev, fileName, validateMetadataName(yamlStruct))
+ t.linter.RunLinterRule(support.WarningSev, fileName, validateNoDeprecations(yamlStruct, t.kubeVersion))
+
+ t.linter.RunLinterRule(support.ErrorSev, fileName, validateMatchSelector(yamlStruct, renderedContent))
+ t.linter.RunLinterRule(support.ErrorSev, fileName, validateListAnnotations(yamlStruct, renderedContent))
+ }
+ }
+ }
+ }
+}
+
+// validateTopIndentLevel checks that the content does not start with an indent level > 0.
+//
+// This error can occur when a template accidentally inserts space. It can cause
+// unpredictable errors depending on whether the text is normalized before being passed
+// into the YAML parser. So we trap it here.
+//
+// See https://github.com/helm/helm/issues/8467
+func validateTopIndentLevel(content string) error {
+ // Read lines until we get to a non-empty one
+ scanner := bufio.NewScanner(bytes.NewBufferString(content))
+ for scanner.Scan() {
+ line := scanner.Text()
+ // If line is empty, skip
+ if strings.TrimSpace(line) == "" {
+ continue
+ }
+ // If it starts with one or more spaces, this is an error
+ if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") {
+ return fmt.Errorf("document starts with an illegal indent: %q, which may cause parsing problems", line)
+ }
+ // Any other condition passes.
+ return nil
+ }
+ return scanner.Err()
+}
+
+// Validation functions
+func templatesDirExists(templatesPath string) error {
+ _, err := os.Stat(templatesPath)
+ if errors.Is(err, os.ErrNotExist) {
+ return errors.New("directory does not exist")
+ }
+ return nil
+}
+
+func validateTemplatesDir(templatesPath string) error {
+ fi, err := os.Stat(templatesPath)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return errors.New("not a directory")
+ }
+ return nil
+}
+
+func validateAllowedExtension(fileName string) error {
+ ext := filepath.Ext(fileName)
+ validExtensions := []string{".yaml", ".yml", ".tpl", ".txt"}
+
+ if slices.Contains(validExtensions, ext) {
+ return nil
+ }
+
+ return fmt.Errorf("file extension '%s' not valid. Valid extensions are .yaml, .yml, .tpl, or .txt", ext)
+}
+
+func validateYamlContent(err error) error {
+ if err != nil {
+ return fmt.Errorf("unable to parse YAML: %w", err)
+ }
+
+ return nil
+}
+
+// validateMetadataName uses the correct validation function for the object
+// Kind, or if not set, defaults to the standard definition of a subdomain in
+// DNS (RFC 1123), used by most resources.
+func validateMetadataName(obj *k8sYamlStruct) error {
+ fn := validateMetadataNameFunc(obj)
+ allErrs := field.ErrorList{}
+ for _, msg := range fn(obj.Metadata.Name, false) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), obj.Metadata.Name, msg))
+ }
+ if len(allErrs) > 0 {
+ return fmt.Errorf("object name does not conform to Kubernetes naming requirements: %q: %w", obj.Metadata.Name, allErrs.ToAggregate())
+ }
+ return nil
+}
+
+// validateMetadataNameFunc will return a name validation function for the
+// object kind, if defined below.
+//
+// Rules should match those set in the various api validations:
+// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/core/validation/validation.go#L205-L274
+// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/apps/validation/validation.go#L39
+// ...
+//
+// Implementing here to avoid importing k/k.
+//
+// If no mapping is defined, returns NameIsDNSSubdomain. This is used by object
+// kinds that don't have special requirements, so is the most likely to work if
+// new kinds are added.
+func validateMetadataNameFunc(obj *k8sYamlStruct) validation.ValidateNameFunc {
+ switch strings.ToLower(obj.Kind) {
+ case "pod", "node", "secret", "endpoints", "resourcequota", // core
+ "controllerrevision", "daemonset", "deployment", "replicaset", "statefulset", // apps
+ "autoscaler", // autoscaler
+ "cronjob", "job", // batch
+ "lease", // coordination
+ "endpointslice", // discovery
+ "networkpolicy", "ingress", // networking
+ "podsecuritypolicy", // policy
+ "priorityclass", // scheduling
+ "podpreset", // settings
+ "storageclass", "volumeattachment", "csinode": // storage
+ return validation.NameIsDNSSubdomain
+ case "service":
+ return validation.NameIsDNS1035Label
+ case "namespace":
+ return validation.ValidateNamespaceName
+ case "serviceaccount":
+ return validation.ValidateServiceAccountName
+ case "certificatesigningrequest":
+ // No validation.
+ // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/certificates/validation/validation.go#L137-L140
+ return func(_ string, _ bool) []string { return nil }
+ case "role", "clusterrole", "rolebinding", "clusterrolebinding":
+ // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/rbac/validation/validation.go#L32-L34
+ return func(name string, _ bool) []string {
+ return apipath.IsValidPathSegmentName(name)
+ }
+ default:
+ return validation.NameIsDNSSubdomain
+ }
+}
+
+// validateMatchSelector ensures that template specs have a selector declared.
+// See https://github.com/helm/helm/issues/1990
+func validateMatchSelector(yamlStruct *k8sYamlStruct, manifest string) error {
+ switch yamlStruct.Kind {
+ case "Deployment", "ReplicaSet", "DaemonSet", "StatefulSet":
+ // verify that matchLabels or matchExpressions is present
+ if !strings.Contains(manifest, "matchLabels") && !strings.Contains(manifest, "matchExpressions") {
+ return fmt.Errorf("a %s must contain matchLabels or matchExpressions, and %q does not", yamlStruct.Kind, yamlStruct.Metadata.Name)
+ }
+ }
+ return nil
+}
+
+func validateListAnnotations(yamlStruct *k8sYamlStruct, manifest string) error {
+ if yamlStruct.Kind == "List" {
+ m := struct {
+ Items []struct {
+ Metadata struct {
+ Annotations map[string]string
+ }
+ }
+ }{}
+
+ if err := yaml.Unmarshal([]byte(manifest), &m); err != nil {
+ return validateYamlContent(err)
+ }
+
+ for _, i := range m.Items {
+ if _, ok := i.Metadata.Annotations["helm.sh/resource-policy"]; ok {
+ return errors.New("annotation 'helm.sh/resource-policy' within List objects are ignored")
+ }
+ }
+ }
+ return nil
+}
+
+func isYamlFileExtension(fileName string) bool {
+ ext := strings.ToLower(filepath.Ext(fileName))
+ return ext == ".yaml" || ext == ".yml"
+}
+
+// k8sYamlStruct stubs a Kubernetes YAML file.
+type k8sYamlStruct struct {
+ APIVersion string `json:"apiVersion"`
+ Kind string
+ Metadata k8sYamlMetadata
+}
+
+type k8sYamlMetadata struct {
+ Namespace string
+ Name string
+}
diff --git a/helm/pkg/chart/v2/lint/rules/template_test.go b/helm/pkg/chart/v2/lint/rules/template_test.go
new file mode 100644
index 000000000..c08ba6cc3
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/template_test.go
@@ -0,0 +1,490 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+)
+
+const templateTestBasedir = "./testdata/albatross"
+
+func TestValidateAllowedExtension(t *testing.T) {
+ var failTest = []string{"/foo", "/test.toml"}
+ for _, test := range failTest {
+ err := validateAllowedExtension(test)
+ if err == nil || !strings.Contains(err.Error(), "Valid extensions are .yaml, .yml, .tpl, or .txt") {
+ t.Errorf("validateAllowedExtension('%s') to return \"Valid extensions are .yaml, .yml, .tpl, or .txt\", got no error", test)
+ }
+ }
+ var successTest = []string{"/foo.yaml", "foo.yaml", "foo.tpl", "/foo/bar/baz.yaml", "NOTES.txt"}
+ for _, test := range successTest {
+ err := validateAllowedExtension(test)
+ if err != nil {
+ t.Errorf("validateAllowedExtension('%s') to return no error but got \"%s\"", test, err.Error())
+ }
+ }
+}
+
+var values = map[string]interface{}{"nameOverride": "", "httpPort": 80}
+
+const namespace = "testNamespace"
+
+func TestTemplateParsing(t *testing.T) {
+ linter := support.Linter{ChartDir: templateTestBasedir}
+ Templates(
+ &linter,
+ namespace,
+ values,
+ TemplateLinterSkipSchemaValidation(false))
+ res := linter.Messages
+
+ if len(res) != 1 {
+ t.Fatalf("Expected one error, got %d, %v", len(res), res)
+ }
+
+ if !strings.Contains(res[0].Err.Error(), "deliberateSyntaxError") {
+ t.Errorf("Unexpected error: %s", res[0])
+ }
+}
+
+var wrongTemplatePath = filepath.Join(templateTestBasedir, "templates", "fail.yaml")
+var ignoredTemplatePath = filepath.Join(templateTestBasedir, "fail.yaml.ignored")
+
+// Test a template with all the existing features:
+// namespaces, partial templates
+func TestTemplateIntegrationHappyPath(t *testing.T) {
+ // Rename file so it gets ignored by the linter
+ os.Rename(wrongTemplatePath, ignoredTemplatePath)
+ defer os.Rename(ignoredTemplatePath, wrongTemplatePath)
+
+ linter := support.Linter{ChartDir: templateTestBasedir}
+ Templates(
+ &linter,
+ namespace,
+ values,
+ TemplateLinterSkipSchemaValidation(false))
+ res := linter.Messages
+
+ if len(res) != 0 {
+ t.Fatalf("Expected no error, got %d, %v", len(res), res)
+ }
+}
+
+func TestMultiTemplateFail(t *testing.T) {
+ linter := support.Linter{ChartDir: "./testdata/multi-template-fail"}
+ Templates(
+ &linter,
+ namespace,
+ values,
+ TemplateLinterSkipSchemaValidation(false))
+ res := linter.Messages
+
+ if len(res) != 1 {
+ t.Fatalf("Expected 1 error, got %d, %v", len(res), res)
+ }
+
+ if !strings.Contains(res[0].Err.Error(), "object name does not conform to Kubernetes naming requirements") {
+ t.Errorf("Unexpected error: %s", res[0].Err)
+ }
+}
+
+func TestValidateMetadataName(t *testing.T) {
+ tests := []struct {
+ obj *k8sYamlStruct
+ wantErr bool
+ }{
+ // Most kinds use IsDNS1123Subdomain.
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: ""}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one-two"}}, false},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "-two"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "one_two"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "a..b"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, true},
+ {&k8sYamlStruct{Kind: "Pod", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "ServiceAccount", Metadata: k8sYamlMetadata{Name: "operator:sa"}}, true},
+
+ // Service uses IsDNS1035Label.
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "123baz"}}, true},
+ {&k8sYamlStruct{Kind: "Service", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true},
+
+ // Namespace uses IsDNS1123Label.
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, true},
+ {&k8sYamlStruct{Kind: "Namespace", Metadata: k8sYamlMetadata{Name: "foo-bar"}}, false},
+
+ // CertificateSigningRequest has no validation.
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: ""}}, false},
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "CertificateSigningRequest", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, false},
+
+ // RBAC uses path validation.
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true},
+ {&k8sYamlStruct{Kind: "Role", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "foo.bar"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator/role"}}, true},
+ {&k8sYamlStruct{Kind: "ClusterRole", Metadata: k8sYamlMetadata{Name: "operator%role"}}, true},
+ {&k8sYamlStruct{Kind: "RoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+ {&k8sYamlStruct{Kind: "ClusterRoleBinding", Metadata: k8sYamlMetadata{Name: "operator:role"}}, false},
+
+ // Unknown Kind
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: ""}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.bar1234baz.seventyone"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "FOO"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "123baz"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "foo.BAR.baz"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one-two"}}, false},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "-two"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "one_two"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "a..b"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "%^$%*@^*@^"}}, true},
+ {&k8sYamlStruct{Kind: "FutureKind", Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+
+ // No kind
+ {&k8sYamlStruct{Metadata: k8sYamlMetadata{Name: "foo"}}, false},
+ {&k8sYamlStruct{Metadata: k8sYamlMetadata{Name: "operator:pod"}}, true},
+ }
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("%s/%s", tt.obj.Kind, tt.obj.Metadata.Name), func(t *testing.T) {
+ if err := validateMetadataName(tt.obj); (err != nil) != tt.wantErr {
+ t.Errorf("validateMetadataName() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func TestDeprecatedAPIFails(t *testing.T) {
+ modTime := time.Now()
+ mychart := chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: "v2",
+ Name: "failapi",
+ Version: "0.1.0",
+ Icon: "satisfy-the-linting-gods.gif",
+ },
+ Templates: []*common.File{
+ {
+ Name: "templates/baddeployment.yaml",
+ ModTime: modTime,
+ Data: []byte("apiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: baddep\nspec: {selector: {matchLabels: {foo: bar}}}"),
+ },
+ {
+ Name: "templates/goodsecret.yaml",
+ ModTime: modTime,
+ Data: []byte("apiVersion: v1\nkind: Secret\nmetadata:\n name: goodsecret"),
+ },
+ },
+ }
+ tmpdir := t.TempDir()
+
+ if err := chartutil.SaveDir(&mychart, tmpdir); err != nil {
+ t.Fatal(err)
+ }
+
+ linter := support.Linter{ChartDir: filepath.Join(tmpdir, mychart.Name())}
+ Templates(
+ &linter,
+ namespace,
+ values,
+ TemplateLinterSkipSchemaValidation(false))
+ if l := len(linter.Messages); l != 1 {
+ for i, msg := range linter.Messages {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ t.Fatalf("Expected 1 lint error, got %d", l)
+ }
+
+ err := linter.Messages[0].Err.(deprecatedAPIError)
+ if err.Deprecated != "apps/v1beta1 Deployment" {
+ t.Errorf("Surprised to learn that %q is deprecated", err.Deprecated)
+ }
+}
+
+const manifest = `apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: foo
+data:
+ myval1: {{default "val" .Values.mymap.key1 }}
+ myval2: {{default "val" .Values.mymap.key2 }}
+`
+
+// TestStrictTemplateParsingMapError is a regression test.
+//
+// The template engine should not produce an error when a map in values.yaml does
+// not contain all possible keys.
+//
+// See https://github.com/helm/helm/issues/7483
+func TestStrictTemplateParsingMapError(t *testing.T) {
+
+ ch := chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "regression7483",
+ APIVersion: "v2",
+ Version: "0.1.0",
+ },
+ Values: map[string]interface{}{
+ "mymap": map[string]string{
+ "key1": "val1",
+ },
+ },
+ Templates: []*common.File{
+ {
+ Name: "templates/configmap.yaml",
+ ModTime: time.Now(),
+ Data: []byte(manifest),
+ },
+ },
+ }
+ dir := t.TempDir()
+ if err := chartutil.SaveDir(&ch, dir); err != nil {
+ t.Fatal(err)
+ }
+ linter := &support.Linter{
+ ChartDir: filepath.Join(dir, ch.Metadata.Name),
+ }
+ Templates(
+ linter,
+ namespace,
+ ch.Values,
+ TemplateLinterSkipSchemaValidation(false))
+ if len(linter.Messages) != 0 {
+ t.Errorf("expected zero messages, got %d", len(linter.Messages))
+ for i, msg := range linter.Messages {
+ t.Logf("Message %d: %q", i, msg)
+ }
+ }
+}
+
+func TestValidateMatchSelector(t *testing.T) {
+ md := &k8sYamlStruct{
+ APIVersion: "apps/v1",
+ Kind: "Deployment",
+ Metadata: k8sYamlMetadata{
+ Name: "mydeployment",
+ },
+ }
+ manifest := `
+ apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.14.2
+ `
+ if err := validateMatchSelector(md, manifest); err != nil {
+ t.Error(err)
+ }
+ manifest = `
+ apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 3
+ selector:
+ matchExpressions:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.14.2
+ `
+ if err := validateMatchSelector(md, manifest); err != nil {
+ t.Error(err)
+ }
+ manifest = `
+ apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 3
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.14.2
+ `
+ if err := validateMatchSelector(md, manifest); err == nil {
+ t.Error("expected Deployment with no selector to fail")
+ }
+}
+
+func TestValidateTopIndentLevel(t *testing.T) {
+ for doc, shouldFail := range map[string]bool{
+ // Should not fail
+ "\n\n\n\t\n \t\n": false,
+ "apiVersion:foo\n bar:baz": false,
+ "\n\n\napiVersion:foo\n\n\n": false,
+ // Should fail
+ " apiVersion:foo": true,
+ "\n\n apiVersion:foo\n\n": true,
+ } {
+ if err := validateTopIndentLevel(doc); (err == nil) == shouldFail {
+ t.Errorf("Expected %t for %q", shouldFail, doc)
+ }
+ }
+
+}
+
+// TestEmptyWithCommentsManifests checks the lint is not failing against empty manifests that contains only comments
+// See https://github.com/helm/helm/issues/8621
+func TestEmptyWithCommentsManifests(t *testing.T) {
+ mychart := chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: "v2",
+ Name: "emptymanifests",
+ Version: "0.1.0",
+ Icon: "satisfy-the-linting-gods.gif",
+ },
+ Templates: []*common.File{
+ {
+ Name: "templates/empty-with-comments.yaml",
+ ModTime: time.Now(),
+ Data: []byte("#@formatter:off\n"),
+ },
+ },
+ }
+ tmpdir := t.TempDir()
+
+ if err := chartutil.SaveDir(&mychart, tmpdir); err != nil {
+ t.Fatal(err)
+ }
+
+ linter := support.Linter{ChartDir: filepath.Join(tmpdir, mychart.Name())}
+ Templates(
+ &linter,
+ namespace,
+ values,
+ TemplateLinterSkipSchemaValidation(false))
+ if l := len(linter.Messages); l > 0 {
+ for i, msg := range linter.Messages {
+ t.Logf("Message %d: %s", i, msg)
+ }
+ t.Fatalf("Expected 0 lint errors, got %d", l)
+ }
+}
+func TestValidateListAnnotations(t *testing.T) {
+ md := &k8sYamlStruct{
+ APIVersion: "v1",
+ Kind: "List",
+ Metadata: k8sYamlMetadata{
+ Name: "list",
+ },
+ }
+ manifest := `
+apiVersion: v1
+kind: List
+items:
+ - apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ annotations:
+ helm.sh/resource-policy: keep
+`
+
+ if err := validateListAnnotations(md, manifest); err == nil {
+ t.Fatal("expected list with nested keep annotations to fail")
+ }
+
+ manifest = `
+apiVersion: v1
+kind: List
+metadata:
+ annotations:
+ helm.sh/resource-policy: keep
+items:
+ - apiVersion: v1
+ kind: ConfigMap
+`
+
+ if err := validateListAnnotations(md, manifest); err != nil {
+ t.Fatalf("List objects keep annotations should pass. got: %s", err)
+ }
+}
+
+func TestIsYamlFileExtension(t *testing.T) {
+ tests := []struct {
+ filename string
+ expected bool
+ }{
+ {"test.yaml", true},
+ {"test.yml", true},
+ {"test.txt", false},
+ {"test", false},
+ }
+
+ for _, test := range tests {
+ result := isYamlFileExtension(test.filename)
+ if result != test.expected {
+ t.Errorf("isYamlFileExtension(%s) = %v; want %v", test.filename, result, test.expected)
+ }
+ }
+
+}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/albatross/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/albatross/Chart.yaml
new file mode 100644
index 000000000..21124acfc
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/albatross/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: albatross
+description: testing chart
+version: 199.44.12345-Alpha.1+cafe009
+icon: http://riverrun.io
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/albatross/templates/_helpers.tpl b/helm/pkg/chart/v2/lint/rules/testdata/albatross/templates/_helpers.tpl
new file mode 100644
index 000000000..24f76db73
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/albatross/templates/_helpers.tpl
@@ -0,0 +1,16 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{define "name"}}{{default "nginx" .Values.nameOverride | trunc 63 | trimSuffix "-" }}{{end}}
+
+{{/*
+Create a default fully qualified app name.
+
+We truncate at 63 chars because some Kubernetes name fields are limited to this
+(by the DNS naming spec).
+*/}}
+{{define "fullname"}}
+{{- $name := default "nginx" .Values.nameOverride -}}
+{{printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{end}}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/albatross/templates/fail.yaml b/helm/pkg/chart/v2/lint/rules/testdata/albatross/templates/fail.yaml
new file mode 100644
index 000000000..a11e0e90e
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/albatross/templates/fail.yaml
@@ -0,0 +1 @@
+{{ deliberateSyntaxError }}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/albatross/templates/svc.yaml b/helm/pkg/chart/v2/lint/rules/testdata/albatross/templates/svc.yaml
new file mode 100644
index 000000000..16bb27d55
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/albatross/templates/svc.yaml
@@ -0,0 +1,19 @@
+# This is a service gateway to the replica set created by the deployment.
+# Take a look at the deployment.yaml for general notes about this chart.
+apiVersion: v1
+kind: Service
+metadata:
+ name: "{{ .Values.name }}"
+ labels:
+ app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+ kubeVersion: {{ .Capabilities.KubeVersion.Major }}
+spec:
+ ports:
+ - port: {{default 80 .Values.httpPort | quote}}
+ targetPort: 80
+ protocol: TCP
+ name: http
+ selector:
+ app.kubernetes.io/name: {{template "fullname" .}}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/albatross/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/albatross/values.yaml
new file mode 100644
index 000000000..74cc6a0dc
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/albatross/values.yaml
@@ -0,0 +1 @@
+name: "mariner"
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/anotherbadchartfile/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/anotherbadchartfile/Chart.yaml
new file mode 100644
index 000000000..e6bac7693
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/anotherbadchartfile/Chart.yaml
@@ -0,0 +1,15 @@
+name: "some-chart"
+apiVersion: v2
+description: A Helm chart for Kubernetes
+version: 72445e2
+home: ""
+type: application
+appVersion: 72225e2
+icon: "https://some-url.com/icon.jpeg"
+dependencies:
+ - name: mariadb
+ version: 5.x.x
+ repository: https://charts.helm.sh/stable/
+ condition: mariadb.enabled
+ tags:
+ - database
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badchartfile/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/badchartfile/Chart.yaml
new file mode 100644
index 000000000..3564ede3e
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/badchartfile/Chart.yaml
@@ -0,0 +1,11 @@
+description: A Helm chart for Kubernetes
+version: 0.0.0.0
+home: ""
+type: application
+dependencies:
+- name: mariadb
+ version: 5.x.x
+ repository: https://charts.helm.sh/stable/
+ condition: mariadb.enabled
+ tags:
+ - database
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badchartfile/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/badchartfile/values.yaml
new file mode 100644
index 000000000..9f367033b
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/badchartfile/values.yaml
@@ -0,0 +1 @@
+# Default values for badchartfile.
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badchartname/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/badchartname/Chart.yaml
new file mode 100644
index 000000000..64f8fb8bf
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/badchartname/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v2
+description: A Helm chart for Kubernetes
+version: 0.1.0
+name: "../badchartname"
+type: application
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badchartname/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/badchartname/values.yaml
new file mode 100644
index 000000000..9f367033b
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/badchartname/values.yaml
@@ -0,0 +1 @@
+# Default values for badchartfile.
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/Chart.yaml
new file mode 100644
index 000000000..08c4b61ac
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+description: A Helm chart for Kubernetes
+version: 0.1.0
+name: badcrdfile
+type: application
+icon: http://riverrun.io
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml b/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml
new file mode 100644
index 000000000..468916053
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-apiversion.yaml
@@ -0,0 +1,2 @@
+apiVersion: bad.k8s.io/v1beta1
+kind: CustomResourceDefinition
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml b/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml
new file mode 100644
index 000000000..523b97f85
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/crds/bad-crd.yaml
@@ -0,0 +1,2 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: NotACustomResourceDefinition
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/templates/.gitkeep b/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/templates/.gitkeep
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/values.yaml
new file mode 100644
index 000000000..2fffc7715
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/badcrdfile/values.yaml
@@ -0,0 +1 @@
+# Default values for badcrdfile.
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badvaluesfile/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/badvaluesfile/Chart.yaml
new file mode 100644
index 000000000..632919d03
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/badvaluesfile/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+name: badvaluesfile
+description: A Helm chart for Kubernetes
+version: 0.0.1
+home: ""
+icon: http://riverrun.io
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml b/helm/pkg/chart/v2/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml
new file mode 100644
index 000000000..6c2ceb8db
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/badvaluesfile/templates/badvaluesfile.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{.name | default "foo" | title}}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/badvaluesfile/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/badvaluesfile/values.yaml
new file mode 100644
index 000000000..b5a10271c
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/badvaluesfile/values.yaml
@@ -0,0 +1,2 @@
+# Invalid value for badvaluesfile for testing lint fails with invalid yaml format
+name= "value"
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/goodone/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/goodone/Chart.yaml
new file mode 100644
index 000000000..cb7a4bf20
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/goodone/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: goodone
+description: good testing chart
+version: 199.44.12345-Alpha.1+cafe009
+icon: http://riverrun.io
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/goodone/crds/test-crd.yaml b/helm/pkg/chart/v2/lint/rules/testdata/goodone/crds/test-crd.yaml
new file mode 100644
index 000000000..1d7350f1d
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/goodone/crds/test-crd.yaml
@@ -0,0 +1,19 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: tests.test.io
+spec:
+ group: test.io
+ names:
+ kind: Test
+ listKind: TestList
+ plural: tests
+ singular: test
+ scope: Namespaced
+ versions:
+ - name : v1alpha2
+ served: true
+ storage: true
+ - name : v1alpha1
+ served: true
+ storage: false
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/goodone/templates/goodone.yaml b/helm/pkg/chart/v2/lint/rules/testdata/goodone/templates/goodone.yaml
new file mode 100644
index 000000000..cd46f62c7
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/goodone/templates/goodone.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{ .Values.name | default "foo" | lower }}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/goodone/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/goodone/values.yaml
new file mode 100644
index 000000000..92c3d9bb9
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/goodone/values.yaml
@@ -0,0 +1 @@
+name: "goodone-here"
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/invalidchartfile/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/invalidchartfile/Chart.yaml
new file mode 100644
index 000000000..0fd58d1d4
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/invalidchartfile/Chart.yaml
@@ -0,0 +1,6 @@
+name: some-chart
+apiVersion: v2
+apiVersion: v1
+description: A Helm chart for Kubernetes
+version: 1.3.0
+icon: http://example.com
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/invalidchartfile/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/invalidchartfile/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/Chart.yaml
new file mode 100644
index 000000000..18e30f70f
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+description: A Helm chart for Kubernetes
+version: 0.1.0
+name: invalidcrdsdir
+type: application
+icon: http://riverrun.io
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/crds b/helm/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/crds
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/values.yaml
new file mode 100644
index 000000000..6b1611a64
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/invalidcrdsdir/values.yaml
@@ -0,0 +1 @@
+# Default values for invalidcrdsdir.
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/.helmignore b/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/.helmignore
new file mode 100644
index 000000000..0e8a0eb36
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/Chart.yaml
new file mode 100644
index 000000000..11b2c71c2
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/Chart.yaml
@@ -0,0 +1,25 @@
+apiVersion: v2
+name: test
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.16.0"
+icon: https://riverrun.io
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/templates/bad.yaml b/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/templates/bad.yaml
new file mode 100644
index 000000000..213198fda
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/templates/bad.yaml
@@ -0,0 +1 @@
+{ {- $relname := .Release.Name -}}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/values.yaml
new file mode 100644
index 000000000..1cc3182ea
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/malformed-template/values.yaml
@@ -0,0 +1,82 @@
+# Default values for test.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: nginx
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+podAnnotations: {}
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 80
+
+ingress:
+ enabled: false
+ className: ""
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+autoscaling:
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 100
+ targetCPUUtilizationPercentage: 80
+ # targetMemoryUtilizationPercentage: 80
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/multi-template-fail/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/multi-template-fail/Chart.yaml
new file mode 100644
index 000000000..b57427de9
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/multi-template-fail/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v2
+name: multi-template-fail
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application and it is recommended to use it with quotes.
+appVersion: "1.16.0"
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml b/helm/pkg/chart/v2/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml
new file mode 100644
index 000000000..835be07be
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/multi-template-fail/templates/multi-fail.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: game-config
+data:
+ game.properties: cheat
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: -this:name-is-not_valid$
+data:
+ game.properties: empty
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/Chart.yaml
new file mode 100644
index 000000000..7097e17d8
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v2
+name: v3-fail
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application and it is recommended to use it with quotes.
+appVersion: "1.16.0"
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/_helpers.tpl b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/_helpers.tpl
new file mode 100644
index 000000000..0b89e723b
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "v3-fail.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "v3-fail.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "v3-fail.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "v3-fail.labels" -}}
+helm.sh/chart: {{ include "v3-fail.chart" . }}
+{{ include "v3-fail.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "v3-fail.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "v3-fail.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "v3-fail.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "v3-fail.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/deployment.yaml b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/deployment.yaml
new file mode 100644
index 000000000..6d651ab8e
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/deployment.yaml
@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "v3-fail.fullname" . }}
+ labels:
+ nope: {{ .Release.Time }}
+ {{- include "v3-fail.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "v3-fail.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "v3-fail.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "v3-fail.serviceAccountName" . }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ containers:
+ - name: {{ .Chart.Name }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 12 }}
+ image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: http
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/ingress.yaml b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/ingress.yaml
new file mode 100644
index 000000000..4790650d0
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/ingress.yaml
@@ -0,0 +1,62 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "v3-fail.fullname" . -}}
+{{- $svcPort := .Values.service.port -}}
+{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
+ {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
+ {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
+ {{- end }}
+{{- end }}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- else -}}
+apiVersion: extensions/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ {{- include "v3-fail.labels" . | nindent 4 }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ "helm.sh/hook": crd-install
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
+ ingressClassName: {{ .Values.ingress.className }}
+ {{- end }}
+ {{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ .host | quote }}
+ http:
+ paths:
+ {{- range .paths }}
+ - path: {{ .path }}
+ {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
+ pathType: {{ .pathType }}
+ {{- end }}
+ backend:
+ {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
+ service:
+ name: {{ $fullName }}
+ port:
+ number: {{ $svcPort }}
+ {{- else }}
+ serviceName: {{ $fullName }}
+ servicePort: {{ $svcPort }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/service.yaml b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/service.yaml
new file mode 100644
index 000000000..79a0f40b0
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/templates/service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "v3-fail.fullname" . }}
+ annotations:
+ helm.sh/hook: crd-install
+ labels:
+ {{- include "v3-fail.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "v3-fail.selectorLabels" . | nindent 4 }}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/values.yaml
new file mode 100644
index 000000000..01d99b4e6
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/v3-fail/values.yaml
@@ -0,0 +1,66 @@
+# Default values for v3-fail.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: nginx
+ pullPolicy: IfNotPresent
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 80
+
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths: []
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/Chart.yaml
new file mode 100644
index 000000000..6648daf56
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/Chart.yaml
@@ -0,0 +1,16 @@
+apiVersion: v2
+name: withsubchart
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+appVersion: "1.16.0"
+icon: http://riverrun.io
+
+dependencies:
+ - name: subchart
+ version: 0.1.16
+ repository: "file://../subchart"
+ import-values:
+ - child: subchart
+ parent: subchart
+
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml
new file mode 100644
index 000000000..8610a4f25
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: subchart
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+appVersion: "1.16.0"
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml
new file mode 100644
index 000000000..6cb6cc2af
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/templates/subchart.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{ .Values.subchart.name | lower }}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/values.yaml
new file mode 100644
index 000000000..422a359d5
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/charts/subchart/values.yaml
@@ -0,0 +1,2 @@
+subchart:
+ name: subchart
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/templates/mainchart.yaml b/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/templates/mainchart.yaml
new file mode 100644
index 000000000..6cb6cc2af
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/templates/mainchart.yaml
@@ -0,0 +1,2 @@
+metadata:
+ name: {{ .Values.subchart.name | lower }}
diff --git a/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/values.yaml b/helm/pkg/chart/v2/lint/rules/testdata/withsubchart/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/lint/rules/values.go b/helm/pkg/chart/v2/lint/rules/values.go
new file mode 100644
index 000000000..994a6a463
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/values.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+)
+
+// ValuesWithOverrides tests the values.yaml file.
+//
+// If a schema is present in the chart, values are tested against that. Otherwise,
+// they are only tested for well-formedness.
+//
+// If additional values are supplied, they are coalesced into the values in values.yaml.
+func ValuesWithOverrides(linter *support.Linter, valueOverrides map[string]interface{}, skipSchemaValidation bool) {
+ file := "values.yaml"
+ vf := filepath.Join(linter.ChartDir, file)
+ fileExists := linter.RunLinterRule(support.InfoSev, file, validateValuesFileExistence(vf))
+
+ if !fileExists {
+ return
+ }
+
+ linter.RunLinterRule(support.ErrorSev, file, validateValuesFile(vf, valueOverrides, skipSchemaValidation))
+}
+
+func validateValuesFileExistence(valuesPath string) error {
+ _, err := os.Stat(valuesPath)
+ if err != nil {
+ return fmt.Errorf("file does not exist")
+ }
+ return nil
+}
+
+func validateValuesFile(valuesPath string, overrides map[string]interface{}, skipSchemaValidation bool) error {
+ values, err := common.ReadValuesFile(valuesPath)
+ if err != nil {
+ return fmt.Errorf("unable to parse YAML: %w", err)
+ }
+
+ // Helm 3.0.0 carried over the values linting from Helm 2.x, which only tests the top
+ // level values against the top-level expectations. Subchart values are not linted.
+ // We could change that. For now, though, we retain that strategy, and thus can
+ // coalesce tables (like reuse-values does) instead of doing the full chart
+ // CoalesceValues
+ coalescedValues := util.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides)
+ coalescedValues = util.CoalesceTables(coalescedValues, values)
+
+ ext := filepath.Ext(valuesPath)
+ schemaPath := valuesPath[:len(valuesPath)-len(ext)] + ".schema.json"
+ schema, err := os.ReadFile(schemaPath)
+ if len(schema) == 0 {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if !skipSchemaValidation {
+ return util.ValidateAgainstSingleSchema(coalescedValues, schema)
+ }
+
+ return nil
+}
diff --git a/helm/pkg/chart/v2/lint/rules/values_test.go b/helm/pkg/chart/v2/lint/rules/values_test.go
new file mode 100644
index 000000000..288b77436
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/rules/values_test.go
@@ -0,0 +1,183 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rules
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+)
+
+var nonExistingValuesFilePath = filepath.Join("/fake/dir", "values.yaml")
+
+const testSchema = `
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "helm values test schema",
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "username",
+ "password"
+ ],
+ "properties": {
+ "username": {
+ "description": "Your username",
+ "type": "string"
+ },
+ "password": {
+ "description": "Your password",
+ "type": "string"
+ }
+ }
+}
+`
+
+func TestValidateValuesYamlNotDirectory(t *testing.T) {
+ _ = os.Mkdir(nonExistingValuesFilePath, os.ModePerm)
+ defer os.Remove(nonExistingValuesFilePath)
+
+ err := validateValuesFileExistence(nonExistingValuesFilePath)
+ if err == nil {
+ t.Errorf("validateValuesFileExistence to return a linter error, got no error")
+ }
+}
+
+func TestValidateValuesFileWellFormed(t *testing.T) {
+ badYaml := `
+ not:well[]{}formed
+ `
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(badYaml))
+ valfile := filepath.Join(tmpdir, "values.yaml")
+ if err := validateValuesFile(valfile, map[string]interface{}{}, false); err == nil {
+ t.Fatal("expected values file to fail parsing")
+ }
+}
+
+func TestValidateValuesFileSchema(t *testing.T) {
+ yaml := "username: admin\npassword: swordfish"
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+ if err := validateValuesFile(valfile, map[string]interface{}{}, false); err != nil {
+ t.Fatalf("Failed validation with %s", err)
+ }
+}
+
+func TestValidateValuesFileSchemaFailure(t *testing.T) {
+ // 1234 is an int, not a string. This should fail.
+ yaml := "username: 1234\npassword: swordfish"
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+
+ err := validateValuesFile(valfile, map[string]interface{}{}, false)
+ if err == nil {
+ t.Fatal("expected values file to fail parsing")
+ }
+
+ assert.Contains(t, err.Error(), "- at '/username': got number, want string")
+}
+
+func TestValidateValuesFileSchemaFailureButWithSkipSchemaValidation(t *testing.T) {
+ // 1234 is an int, not a string. This should fail normally but pass with skipSchemaValidation.
+ yaml := "username: 1234\npassword: swordfish"
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+
+ err := validateValuesFile(valfile, map[string]interface{}{}, true)
+ if err != nil {
+ t.Fatal("expected values file to pass parsing because of skipSchemaValidation")
+ }
+}
+
+func TestValidateValuesFileSchemaOverrides(t *testing.T) {
+ yaml := "username: admin"
+ overrides := map[string]interface{}{
+ "password": "swordfish",
+ }
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+ if err := validateValuesFile(valfile, overrides, false); err != nil {
+ t.Fatalf("Failed validation with %s", err)
+ }
+}
+
+func TestValidateValuesFile(t *testing.T) {
+ tests := []struct {
+ name string
+ yaml string
+ overrides map[string]interface{}
+ errorMessage string
+ }{
+ {
+ name: "value added",
+ yaml: "username: admin",
+ overrides: map[string]interface{}{"password": "swordfish"},
+ },
+ {
+ name: "value not overridden",
+ yaml: "username: admin\npassword:",
+ overrides: map[string]interface{}{"username": "anotherUser"},
+ errorMessage: "- at '/password': got null, want string",
+ },
+ {
+ name: "value overridden",
+ yaml: "username: admin\npassword:",
+ overrides: map[string]interface{}{"username": "anotherUser", "password": "swordfish"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tmpdir := ensure.TempFile(t, "values.yaml", []byte(tt.yaml))
+ createTestingSchema(t, tmpdir)
+
+ valfile := filepath.Join(tmpdir, "values.yaml")
+
+ err := validateValuesFile(valfile, tt.overrides, false)
+
+ switch {
+ case err != nil && tt.errorMessage == "":
+ t.Errorf("Failed validation with %s", err)
+ case err == nil && tt.errorMessage != "":
+ t.Error("expected values file to fail parsing")
+ case err != nil && tt.errorMessage != "":
+ assert.Contains(t, err.Error(), tt.errorMessage, "Failed with unexpected error")
+ }
+ })
+ }
+}
+
+func createTestingSchema(t *testing.T, dir string) string {
+ t.Helper()
+ schemafile := filepath.Join(dir, "values.schema.json")
+ if err := os.WriteFile(schemafile, []byte(testSchema), 0700); err != nil {
+ t.Fatalf("Failed to write schema to tmpdir: %s", err)
+ }
+ return schemafile
+}
diff --git a/helm/pkg/chart/v2/lint/support/doc.go b/helm/pkg/chart/v2/lint/support/doc.go
new file mode 100644
index 000000000..7e050b8c2
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/support/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package support contains tools for linting charts.
+
+Linting is the process of testing charts for errors or warnings regarding
+formatting, compilation, or standards compliance.
+*/
+package support // import "helm.sh/helm/v4/pkg/chart/v2/lint/support"
diff --git a/helm/pkg/chart/v2/lint/support/message.go b/helm/pkg/chart/v2/lint/support/message.go
new file mode 100644
index 000000000..5efbc7a61
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/support/message.go
@@ -0,0 +1,76 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package support
+
+import "fmt"
+
+// Severity indicates the severity of a Message.
+const (
+ // UnknownSev indicates that the severity of the error is unknown, and should not stop processing.
+ UnknownSev = iota
+ // InfoSev indicates information, for example missing values.yaml file
+ InfoSev
+ // WarningSev indicates that something does not meet code standards, but will likely function.
+ WarningSev
+ // ErrorSev indicates that something will not likely function.
+ ErrorSev
+)
+
+// sev matches the *Sev states.
+var sev = []string{"UNKNOWN", "INFO", "WARNING", "ERROR"}
+
+// Linter encapsulates a linting run of a particular chart.
+type Linter struct {
+ Messages []Message
+ // The highest severity of all the failing lint rules
+ HighestSeverity int
+ ChartDir string
+}
+
+// Message describes an error encountered while linting.
+type Message struct {
+ // Severity is one of the *Sev constants
+ Severity int
+ Path string
+ Err error
+}
+
+func (m Message) Error() string {
+ return fmt.Sprintf("[%s] %s: %s", sev[m.Severity], m.Path, m.Err.Error())
+}
+
+// NewMessage creates a new Message struct
+func NewMessage(severity int, path string, err error) Message {
+ return Message{Severity: severity, Path: path, Err: err}
+}
+
+// RunLinterRule returns true if the validation passed
+func (l *Linter) RunLinterRule(severity int, path string, err error) bool {
+ // severity is out of bound
+ if severity < 0 || severity >= len(sev) {
+ return false
+ }
+
+ if err != nil {
+ l.Messages = append(l.Messages, NewMessage(severity, path, err))
+
+ if severity > l.HighestSeverity {
+ l.HighestSeverity = severity
+ }
+ }
+ return err == nil
+}
diff --git a/helm/pkg/chart/v2/lint/support/message_test.go b/helm/pkg/chart/v2/lint/support/message_test.go
new file mode 100644
index 000000000..ce5b5e42e
--- /dev/null
+++ b/helm/pkg/chart/v2/lint/support/message_test.go
@@ -0,0 +1,79 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package support
+
+import (
+ "errors"
+ "testing"
+)
+
+var errLint = errors.New("lint failed")
+
+func TestRunLinterRule(t *testing.T) {
+ var tests = []struct {
+ Severity int
+ LintError error
+ ExpectedMessages int
+ ExpectedReturn bool
+ ExpectedHighestSeverity int
+ }{
+ {InfoSev, errLint, 1, false, InfoSev},
+ {WarningSev, errLint, 2, false, WarningSev},
+ {ErrorSev, errLint, 3, false, ErrorSev},
+ // No error so it returns true
+ {ErrorSev, nil, 3, true, ErrorSev},
+ // Retains highest severity
+ {InfoSev, errLint, 4, false, ErrorSev},
+ // Invalid severity values
+ {4, errLint, 4, false, ErrorSev},
+ {22, errLint, 4, false, ErrorSev},
+ {-1, errLint, 4, false, ErrorSev},
+ }
+
+ linter := Linter{}
+ for _, test := range tests {
+ isValid := linter.RunLinterRule(test.Severity, "chart", test.LintError)
+ if len(linter.Messages) != test.ExpectedMessages {
+ t.Errorf("RunLinterRule(%d, \"chart\", %v), linter.Messages should now have %d message, we got %d", test.Severity, test.LintError, test.ExpectedMessages, len(linter.Messages))
+ }
+
+ if linter.HighestSeverity != test.ExpectedHighestSeverity {
+ t.Errorf("RunLinterRule(%d, \"chart\", %v), linter.HighestSeverity should be %d, we got %d", test.Severity, test.LintError, test.ExpectedHighestSeverity, linter.HighestSeverity)
+ }
+
+ if isValid != test.ExpectedReturn {
+ t.Errorf("RunLinterRule(%d, \"chart\", %v), should have returned %t but returned %t", test.Severity, test.LintError, test.ExpectedReturn, isValid)
+ }
+ }
+}
+
+func TestMessage(t *testing.T) {
+ m := Message{ErrorSev, "Chart.yaml", errors.New("Foo")}
+ if m.Error() != "[ERROR] Chart.yaml: Foo" {
+ t.Errorf("Unexpected output: %s", m.Error())
+ }
+
+ m = Message{WarningSev, "templates/", errors.New("Bar")}
+ if m.Error() != "[WARNING] templates/: Bar" {
+ t.Errorf("Unexpected output: %s", m.Error())
+ }
+
+ m = Message{InfoSev, "templates/rc.yaml", errors.New("FooBar")}
+ if m.Error() != "[INFO] templates/rc.yaml: FooBar" {
+ t.Errorf("Unexpected output: %s", m.Error())
+ }
+}
diff --git a/helm/pkg/chart/v2/loader/archive.go b/helm/pkg/chart/v2/loader/archive.go
new file mode 100644
index 000000000..c6885e125
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/archive.go
@@ -0,0 +1,74 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ "helm.sh/helm/v4/pkg/chart/loader/archive"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+// FileLoader loads a chart from a file
+type FileLoader string
+
+// Load loads a chart
+func (l FileLoader) Load() (*chart.Chart, error) {
+ return LoadFile(string(l))
+}
+
+// LoadFile loads from an archive file.
+func LoadFile(name string) (*chart.Chart, error) {
+ if fi, err := os.Stat(name); err != nil {
+ return nil, err
+ } else if fi.IsDir() {
+ return nil, errors.New("cannot load a directory")
+ }
+
+ raw, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer raw.Close()
+
+ err = archive.EnsureArchive(name, raw)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := LoadArchive(raw)
+ if err != nil {
+ if errors.Is(err, gzip.ErrHeader) {
+ return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %w)", name, err)
+ }
+ }
+ return c, err
+}
+
+// LoadArchive loads from a reader containing a compressed tar archive.
+func LoadArchive(in io.Reader) (*chart.Chart, error) {
+ files, err := archive.LoadArchiveFiles(in)
+ if err != nil {
+ return nil, err
+ }
+
+ return LoadFiles(files)
+}
diff --git a/helm/pkg/chart/v2/loader/directory.go b/helm/pkg/chart/v2/loader/directory.go
new file mode 100644
index 000000000..82578d924
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/directory.go
@@ -0,0 +1,122 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "helm.sh/helm/v4/internal/sympath"
+ "helm.sh/helm/v4/pkg/chart/loader/archive"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/ignore"
+)
+
+var utf8bom = []byte{0xEF, 0xBB, 0xBF}
+
+// DirLoader loads a chart from a directory
+type DirLoader string
+
+// Load loads the chart
+func (l DirLoader) Load() (*chart.Chart, error) {
+ return LoadDir(string(l))
+}
+
+// LoadDir loads from a directory.
+//
+// This loads charts only from directories.
+func LoadDir(dir string) (*chart.Chart, error) {
+ topdir, err := filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ // Just used for errors.
+ c := &chart.Chart{}
+
+ rules := ignore.Empty()
+ ifile := filepath.Join(topdir, ignore.HelmIgnore)
+ if _, err := os.Stat(ifile); err == nil {
+ r, err := ignore.ParseFile(ifile)
+ if err != nil {
+ return c, err
+ }
+ rules = r
+ }
+ rules.AddDefaults()
+
+ files := []*archive.BufferedFile{}
+ topdir += string(filepath.Separator)
+
+ walk := func(name string, fi os.FileInfo, err error) error {
+ n := strings.TrimPrefix(name, topdir)
+ if n == "" {
+ // No need to process top level. Avoid bug with helmignore .* matching
+ // empty names. See issue 1779.
+ return nil
+ }
+
+ // Normalize to / since it will also work on Windows
+ n = filepath.ToSlash(n)
+
+ if err != nil {
+ return err
+ }
+ if fi.IsDir() {
+ // Directory-based ignore rules should involve skipping the entire
+ // contents of that directory.
+ if rules.Ignore(n, fi) {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ // If a .helmignore file matches, skip this file.
+ if rules.Ignore(n, fi) {
+ return nil
+ }
+
+ // Irregular files include devices, sockets, and other uses of files that
+ // are not regular files. In Go they have a file mode type bit set.
+ // See https://golang.org/pkg/os/#FileMode for examples.
+ if !fi.Mode().IsRegular() {
+ return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name)
+ }
+
+ if fi.Size() > archive.MaxDecompressedFileSize {
+ return fmt.Errorf("chart file %q is larger than the maximum file size %d", fi.Name(), archive.MaxDecompressedFileSize)
+ }
+
+ data, err := os.ReadFile(name)
+ if err != nil {
+ return fmt.Errorf("error reading %s: %w", n, err)
+ }
+
+ data = bytes.TrimPrefix(data, utf8bom)
+
+ files = append(files, &archive.BufferedFile{Name: n, ModTime: fi.ModTime(), Data: data})
+ return nil
+ }
+ if err = sympath.Walk(topdir, walk); err != nil {
+ return c, err
+ }
+
+ return LoadFiles(files)
+}
diff --git a/helm/pkg/chart/v2/loader/load.go b/helm/pkg/chart/v2/loader/load.go
new file mode 100644
index 000000000..d466e247c
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/load.go
@@ -0,0 +1,249 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "maps"
+ "os"
+ "path/filepath"
+ "strings"
+
+ utilyaml "k8s.io/apimachinery/pkg/util/yaml"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/loader/archive"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+// ChartLoader loads a chart.
+type ChartLoader interface {
+ Load() (*chart.Chart, error)
+}
+
+// Loader returns a new ChartLoader appropriate for the given chart name
+func Loader(name string) (ChartLoader, error) {
+ fi, err := os.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ if fi.IsDir() {
+ return DirLoader(name), nil
+ }
+ return FileLoader(name), nil
+}
+
+// Load takes a string name, tries to resolve it to a file or directory, and then loads it.
+//
+// This is the preferred way to load a chart. It will discover the chart encoding
+// and hand off to the appropriate chart reader.
+//
+// If a .helmignore file is present, the directory loader will skip loading any files
+// matching it. But .helmignore is not evaluated when reading out of an archive.
+func Load(name string) (*chart.Chart, error) {
+ l, err := Loader(name)
+ if err != nil {
+ return nil, err
+ }
+ return l.Load()
+}
+
+// LoadFiles loads from in-memory files.
+func LoadFiles(files []*archive.BufferedFile) (*chart.Chart, error) {
+ c := new(chart.Chart)
+ subcharts := make(map[string][]*archive.BufferedFile)
+
+ // do not rely on assumed ordering of files in the chart and crash
+ // if Chart.yaml was not coming early enough to initialize metadata
+ for _, f := range files {
+ c.Raw = append(c.Raw, &common.File{Name: f.Name, ModTime: f.ModTime, Data: f.Data})
+ if f.Name == "Chart.yaml" {
+ if c.Metadata == nil {
+ c.Metadata = new(chart.Metadata)
+ }
+ if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil {
+ return c, fmt.Errorf("cannot load Chart.yaml: %w", err)
+ }
+ // NOTE(bacongobbler): while the chart specification says that APIVersion must be set,
+ // Helm 2 accepted charts that did not provide an APIVersion in their chart metadata.
+ // Because of that, if APIVersion is unset, we should assume we're loading a v1 chart.
+ if c.Metadata.APIVersion == "" {
+ c.Metadata.APIVersion = chart.APIVersionV1
+ }
+ c.ModTime = f.ModTime
+ }
+ }
+ for _, f := range files {
+ switch {
+ case f.Name == "Chart.yaml":
+ // already processed
+ continue
+ case f.Name == "Chart.lock":
+ c.Lock = new(chart.Lock)
+ if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil {
+ return c, fmt.Errorf("cannot load Chart.lock: %w", err)
+ }
+ case f.Name == "values.yaml":
+ values, err := LoadValues(bytes.NewReader(f.Data))
+ if err != nil {
+ return c, fmt.Errorf("cannot load values.yaml: %w", err)
+ }
+ c.Values = values
+ case f.Name == "values.schema.json":
+ c.Schema = f.Data
+ c.SchemaModTime = f.ModTime
+
+ // Deprecated: requirements.yaml is deprecated use Chart.yaml.
+ // We will handle it for you because we are nice people
+ case f.Name == "requirements.yaml":
+ if c.Metadata == nil {
+ c.Metadata = new(chart.Metadata)
+ }
+ if c.Metadata.APIVersion != chart.APIVersionV1 {
+ log.Printf("Warning: Dependencies are handled in Chart.yaml since apiVersion \"v2\". We recommend migrating dependencies to Chart.yaml.")
+ }
+ if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil {
+ return c, fmt.Errorf("cannot load requirements.yaml: %w", err)
+ }
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ c.Files = append(c.Files, &common.File{Name: f.Name, ModTime: f.ModTime, Data: f.Data})
+ }
+ // Deprecated: requirements.lock is deprecated use Chart.lock.
+ case f.Name == "requirements.lock":
+ c.Lock = new(chart.Lock)
+ if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil {
+ return c, fmt.Errorf("cannot load requirements.lock: %w", err)
+ }
+ if c.Metadata == nil {
+ c.Metadata = new(chart.Metadata)
+ }
+ if c.Metadata.APIVersion != chart.APIVersionV1 {
+ log.Printf("Warning: Dependency locking is handled in Chart.lock since apiVersion \"v2\". We recommend migrating to Chart.lock.")
+ }
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ c.Files = append(c.Files, &common.File{Name: f.Name, ModTime: f.ModTime, Data: f.Data})
+ }
+
+ case strings.HasPrefix(f.Name, "templates/"):
+ c.Templates = append(c.Templates, &common.File{Name: f.Name, ModTime: f.ModTime, Data: f.Data})
+ case strings.HasPrefix(f.Name, "charts/"):
+ if filepath.Ext(f.Name) == ".prov" {
+ c.Files = append(c.Files, &common.File{Name: f.Name, ModTime: f.ModTime, Data: f.Data})
+ continue
+ }
+
+ fname := strings.TrimPrefix(f.Name, "charts/")
+ cname := strings.SplitN(fname, "/", 2)[0]
+ subcharts[cname] = append(subcharts[cname], &archive.BufferedFile{Name: fname, ModTime: f.ModTime, Data: f.Data})
+ default:
+ c.Files = append(c.Files, &common.File{Name: f.Name, ModTime: f.ModTime, Data: f.Data})
+ }
+ }
+
+ if c.Metadata == nil {
+ return c, errors.New("Chart.yaml file is missing") //nolint:staticcheck
+ }
+
+ if err := c.Validate(); err != nil {
+ return c, err
+ }
+
+ for n, files := range subcharts {
+ var sc *chart.Chart
+ var err error
+ switch {
+ case strings.IndexAny(n, "_.") == 0:
+ continue
+ case filepath.Ext(n) == ".tgz":
+ file := files[0]
+ if file.Name != n {
+ return c, fmt.Errorf("error unpacking subchart tar in %s: expected %s, got %s", c.Name(), n, file.Name)
+ }
+ // Untar the chart and add to c.Dependencies
+ sc, err = LoadArchive(bytes.NewBuffer(file.Data))
+ default:
+ // We have to trim the prefix off of every file, and ignore any file
+ // that is in charts/, but isn't actually a chart.
+ buff := make([]*archive.BufferedFile, 0, len(files))
+ for _, f := range files {
+ parts := strings.SplitN(f.Name, "/", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ f.Name = parts[1]
+ buff = append(buff, f)
+ }
+ sc, err = LoadFiles(buff)
+ }
+
+ if err != nil {
+ return c, fmt.Errorf("error unpacking subchart %s in %s: %w", n, c.Name(), err)
+ }
+ c.AddDependency(sc)
+ }
+
+ return c, nil
+}
+
+// LoadValues loads values from a reader.
+//
+// The reader is expected to contain one or more YAML documents, the values of which are merged.
+// And the values can be either a chart's default values or user-supplied values.
+func LoadValues(data io.Reader) (map[string]interface{}, error) {
+ values := map[string]interface{}{}
+ reader := utilyaml.NewYAMLReader(bufio.NewReader(data))
+ for {
+ currentMap := map[string]interface{}{}
+ raw, err := reader.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ return nil, fmt.Errorf("error reading yaml document: %w", err)
+ }
+ if err := yaml.Unmarshal(raw, ¤tMap); err != nil {
+ return nil, fmt.Errorf("cannot unmarshal yaml document: %w", err)
+ }
+ values = MergeMaps(values, currentMap)
+ }
+ return values, nil
+}
+
+// MergeMaps merges two maps. If a key exists in both maps, the value from b will be used.
+// If the value is a map, the maps will be merged recursively.
+func MergeMaps(a, b map[string]interface{}) map[string]interface{} {
+ out := make(map[string]interface{}, len(a))
+ maps.Copy(out, a)
+ for k, v := range b {
+ if v, ok := v.(map[string]interface{}); ok {
+ if bv, ok := out[k]; ok {
+ if bv, ok := bv.(map[string]interface{}); ok {
+ out[k] = MergeMaps(bv, v)
+ continue
+ }
+ }
+ }
+ out[k] = v
+ }
+ return out
+}
diff --git a/helm/pkg/chart/v2/loader/load_test.go b/helm/pkg/chart/v2/loader/load_test.go
new file mode 100644
index 000000000..397745dd6
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/load_test.go
@@ -0,0 +1,779 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/loader/archive"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+func TestLoadDir(t *testing.T) {
+ l, err := Loader("testdata/frobnitz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+}
+
+func TestLoadDirWithDevNull(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("test only works on unix systems with /dev/null present")
+ }
+
+ l, err := Loader("testdata/frobnitz_with_dev_null")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ if _, err := l.Load(); err == nil {
+ t.Errorf("packages with an irregular file (/dev/null) should not load")
+ }
+}
+
+func TestLoadDirWithSymlink(t *testing.T) {
+ sym := filepath.Join("..", "LICENSE")
+ link := filepath.Join("testdata", "frobnitz_with_symlink", "LICENSE")
+
+ if err := os.Symlink(sym, link); err != nil {
+ t.Fatal(err)
+ }
+
+ defer os.Remove(link)
+
+ l, err := Loader("testdata/frobnitz_with_symlink")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+}
+
+func TestBomTestData(t *testing.T) {
+ testFiles := []string{"frobnitz_with_bom/.helmignore", "frobnitz_with_bom/templates/template.tpl", "frobnitz_with_bom/Chart.yaml"}
+ for _, file := range testFiles {
+ data, err := os.ReadFile("testdata/" + file)
+ if err != nil || !bytes.HasPrefix(data, utf8bom) {
+ t.Errorf("Test file has no BOM or is invalid: testdata/%s", file)
+ }
+ }
+
+ archive, err := os.ReadFile("testdata/frobnitz_with_bom.tgz")
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ unzipped, err := gzip.NewReader(bytes.NewReader(archive))
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ defer unzipped.Close()
+ for _, testFile := range testFiles {
+ data := make([]byte, 3)
+ err := unzipped.Reset(bytes.NewReader(archive))
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ tr := tar.NewReader(unzipped)
+ for {
+ file, err := tr.Next()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ }
+ if file != nil && strings.EqualFold(file.Name, testFile) {
+ _, err := tr.Read(data)
+ if err != nil {
+ t.Fatalf("Error reading archive frobnitz_with_bom.tgz: %s", err)
+ } else {
+ break
+ }
+ }
+ }
+ if !bytes.Equal(data, utf8bom) {
+ t.Fatalf("Test file has no BOM or is invalid: frobnitz_with_bom.tgz/%s", testFile)
+ }
+ }
+}
+
+func TestLoadDirWithUTFBOM(t *testing.T) {
+ l, err := Loader("testdata/frobnitz_with_bom")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+ verifyBomStripped(t, c.Files)
+}
+
+func TestLoadArchiveWithUTFBOM(t *testing.T) {
+ l, err := Loader("testdata/frobnitz_with_bom.tgz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+ verifyBomStripped(t, c.Files)
+}
+
+func TestLoadV1(t *testing.T) {
+ l, err := Loader("testdata/frobnitz.v1")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+}
+
+func TestLoadFileV1(t *testing.T) {
+ l, err := Loader("testdata/frobnitz.v1.tgz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+}
+
+func TestLoadFile(t *testing.T) {
+ l, err := Loader("testdata/frobnitz-1.2.3.tgz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyFrobnitz(t, c)
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+}
+
+func TestLoadFiles_BadCases(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ bufferedFiles []*archive.BufferedFile
+ expectError string
+ }{
+ {
+ name: "These files contain only requirements.lock",
+ bufferedFiles: []*archive.BufferedFile{
+ {
+ Name: "requirements.lock",
+ ModTime: time.Now(),
+ Data: []byte(""),
+ },
+ },
+ expectError: "validation: chart.metadata.apiVersion is required"},
+ } {
+ _, err := LoadFiles(tt.bufferedFiles)
+ if err == nil {
+ t.Fatal("expected error when load illegal files")
+ }
+ if !strings.Contains(err.Error(), tt.expectError) {
+ t.Errorf("Expected error to contain %q, got %q for %s", tt.expectError, err.Error(), tt.name)
+ }
+ }
+}
+
+func TestLoadFiles(t *testing.T) {
+ modTime := time.Now()
+ goodFiles := []*archive.BufferedFile{
+ {
+ Name: "Chart.yaml",
+ ModTime: modTime,
+ Data: []byte(`apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+`),
+ },
+ {
+ Name: "values.yaml",
+ ModTime: modTime,
+ Data: []byte("var: some values"),
+ },
+ {
+ Name: "values.schema.json",
+ ModTime: modTime,
+ Data: []byte("type: Values"),
+ },
+ {
+ Name: "templates/deployment.yaml",
+ ModTime: modTime,
+ Data: []byte("some deployment"),
+ },
+ {
+ Name: "templates/service.yaml",
+ ModTime: modTime,
+ Data: []byte("some service"),
+ },
+ }
+
+ c, err := LoadFiles(goodFiles)
+ if err != nil {
+ t.Errorf("Expected good files to be loaded, got %v", err)
+ }
+
+ if c.Name() != "frobnitz" {
+ t.Errorf("Expected chart name to be 'frobnitz', got %s", c.Name())
+ }
+
+ if c.Values["var"] != "some values" {
+ t.Error("Expected chart values to be populated with default values")
+ }
+
+ if len(c.Raw) != 5 {
+ t.Errorf("Expected %d files, got %d", 5, len(c.Raw))
+ }
+
+ if !bytes.Equal(c.Schema, []byte("type: Values")) {
+ t.Error("Expected chart schema to be populated with default values")
+ }
+
+ if len(c.Templates) != 2 {
+ t.Errorf("Expected number of templates == 2, got %d", len(c.Templates))
+ }
+
+ if _, err = LoadFiles([]*archive.BufferedFile{}); err == nil {
+ t.Fatal("Expected err to be non-nil")
+ }
+ if err.Error() != "Chart.yaml file is missing" {
+ t.Errorf("Expected chart metadata missing error, got '%s'", err.Error())
+ }
+}
+
+// Test the order of file loading. The Chart.yaml file needs to come first for
+// later comparison checks. See https://github.com/helm/helm/pull/8948
+func TestLoadFilesOrder(t *testing.T) {
+ modTime := time.Now()
+ goodFiles := []*archive.BufferedFile{
+ {
+ Name: "requirements.yaml",
+ ModTime: modTime,
+ Data: []byte("dependencies:"),
+ },
+ {
+ Name: "values.yaml",
+ ModTime: modTime,
+ Data: []byte("var: some values"),
+ },
+
+ {
+ Name: "templates/deployment.yaml",
+ ModTime: modTime,
+ Data: []byte("some deployment"),
+ },
+ {
+ Name: "templates/service.yaml",
+ ModTime: modTime,
+ Data: []byte("some service"),
+ },
+ {
+ Name: "Chart.yaml",
+ ModTime: modTime,
+ Data: []byte(`apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+`),
+ },
+ }
+
+ // Capture stderr to make sure message about Chart.yaml handle dependencies
+ // is not present
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatalf("Unable to create pipe: %s", err)
+ }
+ stderr := log.Writer()
+ log.SetOutput(w)
+ defer func() {
+ log.SetOutput(stderr)
+ }()
+
+ _, err = LoadFiles(goodFiles)
+ if err != nil {
+ t.Errorf("Expected good files to be loaded, got %v", err)
+ }
+ w.Close()
+
+ var text bytes.Buffer
+ io.Copy(&text, r)
+ if text.String() != "" {
+ t.Errorf("Expected no message to Stderr, got %s", text.String())
+ }
+
+}
+
+// Packaging the chart on a Windows machine will produce an
+// archive that has \\ as delimiters. Test that we support these archives
+func TestLoadFileBackslash(t *testing.T) {
+ c, err := Load("testdata/frobnitz_backslash-1.2.3.tgz")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyChartFileAndTemplate(t, c, "frobnitz_backslash")
+ verifyChart(t, c)
+ verifyDependencies(t, c)
+}
+
+func TestLoadV2WithReqs(t *testing.T) {
+ l, err := Loader("testdata/frobnitz.v2.reqs")
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ c, err := l.Load()
+ if err != nil {
+ t.Fatalf("Failed to load testdata: %s", err)
+ }
+ verifyDependencies(t, c)
+ verifyDependenciesLock(t, c)
+}
+
+func TestLoadInvalidArchive(t *testing.T) {
+ tmpdir := t.TempDir()
+
+ writeTar := func(filename, internalPath string, body []byte) {
+ dest, err := os.Create(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ zipper := gzip.NewWriter(dest)
+ tw := tar.NewWriter(zipper)
+
+ h := &tar.Header{
+ Name: internalPath,
+ Mode: 0755,
+ Size: int64(len(body)),
+ ModTime: time.Now(),
+ }
+ if err := tw.WriteHeader(h); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := tw.Write(body); err != nil {
+ t.Fatal(err)
+ }
+ tw.Close()
+ zipper.Close()
+ dest.Close()
+ }
+
+ for _, tt := range []struct {
+ chartname string
+ internal string
+ expectError string
+ }{
+ {"illegal-dots.tgz", "../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-dots2.tgz", "/foo/../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-dots3.tgz", "/../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-dots4.tgz", "./../../malformed-helm-test", "chart illegally references parent directory"},
+ {"illegal-name.tgz", "./.", "chart illegally contains content outside the base directory"},
+ {"illegal-name2.tgz", "/./.", "chart illegally contains content outside the base directory"},
+ {"illegal-name3.tgz", "missing-leading-slash", "chart illegally contains content outside the base directory"},
+ {"illegal-name4.tgz", "/missing-leading-slash", "Chart.yaml file is missing"},
+ {"illegal-abspath.tgz", "//foo", "chart illegally contains absolute paths"},
+ {"illegal-abspath2.tgz", "///foo", "chart illegally contains absolute paths"},
+ {"illegal-abspath3.tgz", "\\\\foo", "chart illegally contains absolute paths"},
+ {"illegal-abspath3.tgz", "\\..\\..\\foo", "chart illegally references parent directory"},
+
+ // Under special circumstances, this can get normalized to things that look like absolute Windows paths
+ {"illegal-abspath4.tgz", "\\.\\c:\\\\foo", "chart contains illegally named files"},
+ {"illegal-abspath5.tgz", "/./c://foo", "chart contains illegally named files"},
+ {"illegal-abspath6.tgz", "\\\\?\\Some\\windows\\magic", "chart illegally contains absolute paths"},
+ } {
+ illegalChart := filepath.Join(tmpdir, tt.chartname)
+ writeTar(illegalChart, tt.internal, []byte("hello: world"))
+ _, err := Load(illegalChart)
+ if err == nil {
+ t.Fatal("expected error when unpacking illegal files")
+ }
+ if !strings.Contains(err.Error(), tt.expectError) {
+ t.Errorf("Expected error to contain %q, got %q for %s", tt.expectError, err.Error(), tt.chartname)
+ }
+ }
+
+ // Make sure that absolute path gets interpreted as relative
+ illegalChart := filepath.Join(tmpdir, "abs-path.tgz")
+ writeTar(illegalChart, "/Chart.yaml", []byte("hello: world"))
+ _, err := Load(illegalChart)
+ if err.Error() != "validation: chart.metadata.name is required" {
+ t.Error(err)
+ }
+
+ // And just to validate that the above was not spurious
+ illegalChart = filepath.Join(tmpdir, "abs-path2.tgz")
+ writeTar(illegalChart, "files/whatever.yaml", []byte("hello: world"))
+ _, err = Load(illegalChart)
+ if err.Error() != "Chart.yaml file is missing" {
+ t.Errorf("Unexpected error message: %s", err)
+ }
+
+ // Finally, test that drive letter gets stripped off on Windows
+ illegalChart = filepath.Join(tmpdir, "abs-winpath.tgz")
+ writeTar(illegalChart, "c:\\Chart.yaml", []byte("hello: world"))
+ _, err = Load(illegalChart)
+ if err.Error() != "validation: chart.metadata.name is required" {
+ t.Error(err)
+ }
+}
+
+func TestLoadValues(t *testing.T) {
+ testCases := map[string]struct {
+ data []byte
+ expctedValues map[string]interface{}
+ }{
+ "It should load values correctly": {
+ data: []byte(`
+foo:
+ image: foo:v1
+bar:
+ version: v2
+`),
+ expctedValues: map[string]interface{}{
+ "foo": map[string]interface{}{
+ "image": "foo:v1",
+ },
+ "bar": map[string]interface{}{
+ "version": "v2",
+ },
+ },
+ },
+ "It should load values correctly with multiple documents in one file": {
+ data: []byte(`
+foo:
+ image: foo:v1
+bar:
+ version: v2
+---
+foo:
+ image: foo:v2
+`),
+ expctedValues: map[string]interface{}{
+ "foo": map[string]interface{}{
+ "image": "foo:v2",
+ },
+ "bar": map[string]interface{}{
+ "version": "v2",
+ },
+ },
+ },
+ }
+ for testName, testCase := range testCases {
+ t.Run(testName, func(tt *testing.T) {
+ values, err := LoadValues(bytes.NewReader(testCase.data))
+ if err != nil {
+ tt.Fatal(err)
+ }
+ if !reflect.DeepEqual(values, testCase.expctedValues) {
+ tt.Errorf("Expected values: %v, got %v", testCase.expctedValues, values)
+ }
+ })
+ }
+}
+
+func TestMergeValuesV2(t *testing.T) {
+ nestedMap := map[string]interface{}{
+ "foo": "bar",
+ "baz": map[string]string{
+ "cool": "stuff",
+ },
+ }
+ anotherNestedMap := map[string]interface{}{
+ "foo": "bar",
+ "baz": map[string]string{
+ "cool": "things",
+ "awesome": "stuff",
+ },
+ }
+ flatMap := map[string]interface{}{
+ "foo": "bar",
+ "baz": "stuff",
+ }
+ anotherFlatMap := map[string]interface{}{
+ "testing": "fun",
+ }
+
+ testMap := MergeMaps(flatMap, nestedMap)
+ equal := reflect.DeepEqual(testMap, nestedMap)
+ if !equal {
+ t.Errorf("Expected a nested map to overwrite a flat value. Expected: %v, got %v", nestedMap, testMap)
+ }
+
+ testMap = MergeMaps(nestedMap, flatMap)
+ equal = reflect.DeepEqual(testMap, flatMap)
+ if !equal {
+ t.Errorf("Expected a flat value to overwrite a map. Expected: %v, got %v", flatMap, testMap)
+ }
+
+ testMap = MergeMaps(nestedMap, anotherNestedMap)
+ equal = reflect.DeepEqual(testMap, anotherNestedMap)
+ if !equal {
+ t.Errorf("Expected a nested map to overwrite another nested map. Expected: %v, got %v", anotherNestedMap, testMap)
+ }
+
+ testMap = MergeMaps(anotherFlatMap, anotherNestedMap)
+ expectedMap := map[string]interface{}{
+ "testing": "fun",
+ "foo": "bar",
+ "baz": map[string]string{
+ "cool": "things",
+ "awesome": "stuff",
+ },
+ }
+ equal = reflect.DeepEqual(testMap, expectedMap)
+ if !equal {
+ t.Errorf("Expected a map with different keys to merge properly with another map. Expected: %v, got %v", expectedMap, testMap)
+ }
+}
+
+func verifyChart(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ if c.Name() == "" {
+ t.Fatalf("No chart metadata found on %v", c)
+ }
+ t.Logf("Verifying chart %s", c.Name())
+ if len(c.Templates) != 1 {
+ t.Errorf("Expected 1 template, got %d", len(c.Templates))
+ }
+
+ numfiles := 6
+ if len(c.Files) != numfiles {
+ t.Errorf("Expected %d extra files, got %d", numfiles, len(c.Files))
+ for _, n := range c.Files {
+ t.Logf("\t%s", n.Name)
+ }
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Errorf("Expected 2 dependencies, got %d (%v)", len(c.Dependencies()), c.Dependencies())
+ for _, d := range c.Dependencies() {
+ t.Logf("\tSubchart: %s\n", d.Name())
+ }
+ }
+
+ expect := map[string]map[string]string{
+ "alpine": {
+ "version": "0.1.0",
+ },
+ "mariner": {
+ "version": "4.3.2",
+ },
+ }
+
+ for _, dep := range c.Dependencies() {
+ if dep.Metadata == nil {
+ t.Fatalf("expected metadata on dependency: %v", dep)
+ }
+ exp, ok := expect[dep.Name()]
+ if !ok {
+ t.Fatalf("Unknown dependency %s", dep.Name())
+ }
+ if exp["version"] != dep.Metadata.Version {
+ t.Errorf("Expected %s version %s, got %s", dep.Name(), exp["version"], dep.Metadata.Version)
+ }
+ }
+
+}
+
+func verifyDependencies(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ if len(c.Metadata.Dependencies) != 2 {
+ t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
+ }
+ tests := []*chart.Dependency{
+ {Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
+ {Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
+ }
+ for i, tt := range tests {
+ d := c.Metadata.Dependencies[i]
+ if d.Name != tt.Name {
+ t.Errorf("Expected dependency named %q, got %q", tt.Name, d.Name)
+ }
+ if d.Version != tt.Version {
+ t.Errorf("Expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, d.Version)
+ }
+ if d.Repository != tt.Repository {
+ t.Errorf("Expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, d.Repository)
+ }
+ }
+}
+
+func verifyDependenciesLock(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ if len(c.Metadata.Dependencies) != 2 {
+ t.Errorf("Expected 2 dependencies, got %d", len(c.Metadata.Dependencies))
+ }
+ tests := []*chart.Dependency{
+ {Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
+ {Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
+ }
+ for i, tt := range tests {
+ d := c.Metadata.Dependencies[i]
+ if d.Name != tt.Name {
+ t.Errorf("Expected dependency named %q, got %q", tt.Name, d.Name)
+ }
+ if d.Version != tt.Version {
+ t.Errorf("Expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, d.Version)
+ }
+ if d.Repository != tt.Repository {
+ t.Errorf("Expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, d.Repository)
+ }
+ }
+}
+
+func verifyFrobnitz(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ verifyChartFileAndTemplate(t, c, "frobnitz")
+}
+
+func verifyChartFileAndTemplate(t *testing.T, c *chart.Chart, name string) {
+ t.Helper()
+ if c.Metadata == nil {
+ t.Fatal("Metadata is nil")
+ }
+ if c.Name() != name {
+ t.Errorf("Expected %s, got %s", name, c.Name())
+ }
+ if len(c.Templates) != 1 {
+ t.Fatalf("Expected 1 template, got %d", len(c.Templates))
+ }
+ if c.Templates[0].Name != "templates/template.tpl" {
+ t.Errorf("Unexpected template: %s", c.Templates[0].Name)
+ }
+ if len(c.Templates[0].Data) == 0 {
+ t.Error("No template data.")
+ }
+ if len(c.Files) != 6 {
+ t.Fatalf("Expected 6 Files, got %d", len(c.Files))
+ }
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("Expected 2 Dependency, got %d", len(c.Dependencies()))
+ }
+ if len(c.Metadata.Dependencies) != 2 {
+ t.Fatalf("Expected 2 Dependencies.Dependency, got %d", len(c.Metadata.Dependencies))
+ }
+ if len(c.Lock.Dependencies) != 2 {
+ t.Fatalf("Expected 2 Lock.Dependency, got %d", len(c.Lock.Dependencies))
+ }
+
+ for _, dep := range c.Dependencies() {
+ switch dep.Name() {
+ case "mariner":
+ case "alpine":
+ if len(dep.Templates) != 1 {
+ t.Fatalf("Expected 1 template, got %d", len(dep.Templates))
+ }
+ if dep.Templates[0].Name != "templates/alpine-pod.yaml" {
+ t.Errorf("Unexpected template: %s", dep.Templates[0].Name)
+ }
+ if len(dep.Templates[0].Data) == 0 {
+ t.Error("No template data.")
+ }
+ if len(dep.Files) != 1 {
+ t.Fatalf("Expected 1 Files, got %d", len(dep.Files))
+ }
+ if len(dep.Dependencies()) != 2 {
+ t.Fatalf("Expected 2 Dependency, got %d", len(dep.Dependencies()))
+ }
+ default:
+ t.Errorf("Unexpected dependency %s", dep.Name())
+ }
+ }
+}
+
+func verifyBomStripped(t *testing.T, files []*common.File) {
+ t.Helper()
+ for _, file := range files {
+ if bytes.HasPrefix(file.Data, utf8bom) {
+ t.Errorf("Byte Order Mark still present in processed file %s", file.Name)
+ }
+ }
+}
diff --git a/helm/pkg/chart/v2/loader/testdata/LICENSE b/helm/pkg/chart/v2/loader/testdata/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/loader/testdata/albatross/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/albatross/Chart.yaml
new file mode 100644
index 000000000..eeef737ff
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/albatross/Chart.yaml
@@ -0,0 +1,4 @@
+name: albatross
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/loader/testdata/albatross/values.yaml b/helm/pkg/chart/v2/loader/testdata/albatross/values.yaml
new file mode 100644
index 000000000..3121cd7ce
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/albatross/values.yaml
@@ -0,0 +1,4 @@
+albatross: "true"
+
+global:
+ author: Coleridge
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz-1.2.3.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz-1.2.3.tgz
new file mode 100644
index 000000000..b2b76a83c
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz-1.2.3.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1.tgz
new file mode 100644
index 000000000..6282f9b73
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/.helmignore b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/Chart.lock b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/Chart.yaml
new file mode 100644
index 000000000..134cd1109
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/Chart.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/INSTALL.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/LICENSE b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/_ignore_me b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/mariner-4.3.2.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/charts/mariner-4.3.2.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/docs/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/icon.svg b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/ignore/me.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/requirements.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/requirements.yaml
new file mode 100644
index 000000000..5eb0bc98b
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/requirements.yaml
@@ -0,0 +1,7 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/templates/template.tpl b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v1/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/.helmignore b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/Chart.yaml
new file mode 100644
index 000000000..f3ab30291
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/Chart.yaml
@@ -0,0 +1,20 @@
+apiVersion: v2
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/INSTALL.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/LICENSE b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/_ignore_me b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/mariner-4.3.2.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/charts/mariner-4.3.2.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/docs/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/icon.svg b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/ignore/me.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/requirements.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/requirements.yaml
new file mode 100644
index 000000000..5eb0bc98b
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/requirements.yaml
@@ -0,0 +1,7 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/templates/template.tpl b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz.v2.reqs/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/.helmignore b/helm/pkg/chart/v2/loader/testdata/frobnitz/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/Chart.lock b/helm/pkg/chart/v2/loader/testdata/frobnitz/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz/Chart.yaml
new file mode 100644
index 000000000..fcd4a4a37
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/INSTALL.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/LICENSE b/helm/pkg/chart/v2/loader/testdata/frobnitz/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/_ignore_me b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/mariner-4.3.2.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz/charts/mariner-4.3.2.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/docs/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/icon.svg b/helm/pkg/chart/v2/loader/testdata/frobnitz/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/ignore/me.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/templates/template.tpl b/helm/pkg/chart/v2/loader/testdata/frobnitz/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash-1.2.3.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash-1.2.3.tgz
new file mode 100644
index 000000000..a9d4c11d8
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash-1.2.3.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/.helmignore b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/.helmignore
new file mode 100755
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/Chart.lock b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/Chart.lock
new file mode 100755
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/Chart.yaml
new file mode 100755
index 000000000..b1dd40a5d
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v1
+name: frobnitz_backslash
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/INSTALL.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/INSTALL.txt
new file mode 100755
index 000000000..2010438c2
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/LICENSE b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/LICENSE
new file mode 100755
index 000000000..6121943b1
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/README.md
new file mode 100755
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/_ignore_me b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/_ignore_me
new file mode 100755
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/Chart.yaml
new file mode 100755
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/README.md
new file mode 100755
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/Chart.yaml
new file mode 100755
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/values.yaml
new file mode 100755
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100755
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/templates/alpine-pod.yaml
new file mode 100755
index 000000000..0ac5ca6a8
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service | quote }}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/values.yaml
new file mode 100755
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/mariner-4.3.2.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/mariner-4.3.2.tgz
new file mode 100755
index 000000000..3190136b0
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/charts/mariner-4.3.2.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/docs/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/docs/README.md
new file mode 100755
index 000000000..d40747caf
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/icon.svg b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/icon.svg
new file mode 100755
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/ignore/me.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/ignore/me.txt
new file mode 100755
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/templates/template.tpl b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/templates/template.tpl
new file mode 100755
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/values.yaml
new file mode 100755
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_backslash/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom.tgz
new file mode 100644
index 000000000..be0cd027d
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/.helmignore b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/.helmignore
new file mode 100644
index 000000000..7a4b92da2
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/Chart.lock b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/Chart.lock
new file mode 100644
index 000000000..ed43b227f
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/Chart.yaml
new file mode 100644
index 000000000..21b21f0b5
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/INSTALL.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/INSTALL.txt
new file mode 100644
index 000000000..77c4e724a
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/LICENSE b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/LICENSE
new file mode 100644
index 000000000..c27b00bf2
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/README.md
new file mode 100644
index 000000000..e9c40031b
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/_ignore_me b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/_ignore_me
new file mode 100644
index 000000000..a7e3a38b7
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..adb9853c6
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/README.md
new file mode 100644
index 000000000..ea7526bee
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1ad84b346
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..f690d53c4
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..f3e662a28
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/values.yaml
new file mode 100644
index 000000000..6b7cb2596
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/mariner-4.3.2.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/charts/mariner-4.3.2.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/docs/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/docs/README.md
new file mode 100644
index 000000000..816c3e431
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/icon.svg b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/ignore/me.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/templates/template.tpl b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/templates/template.tpl
new file mode 100644
index 000000000..bb29c5491
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/values.yaml
new file mode 100644
index 000000000..c24ceadf9
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_bom/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/.helmignore b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/Chart.lock b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/Chart.yaml
new file mode 100644
index 000000000..fcd4a4a37
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/INSTALL.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/LICENSE b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/_ignore_me b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/mariner-4.3.2.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/charts/mariner-4.3.2.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/docs/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/icon.svg b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/ignore/me.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/null b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/null
new file mode 120000
index 000000000..dc1dc0cde
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/null
@@ -0,0 +1 @@
+/dev/null
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/templates/template.tpl b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_dev_null/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/.helmignore b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/Chart.lock b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/Chart.yaml
new file mode 100644
index 000000000..fcd4a4a37
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/INSTALL.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/_ignore_me b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..21ae20aad
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ app.kubernetes.io/name: {{.Chart.Name}}
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/mariner-4.3.2.tgz b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/charts/mariner-4.3.2.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/docs/README.md b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/icon.svg b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/ignore/me.txt b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/templates/template.tpl b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/values.yaml b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/frobnitz_with_symlink/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/loader/testdata/genfrob.sh b/helm/pkg/chart/v2/loader/testdata/genfrob.sh
new file mode 100755
index 000000000..35fdd59f2
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/genfrob.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+# Pack the albatross chart into the mariner chart.
+echo "Packing albatross into mariner"
+tar -zcvf mariner/charts/albatross-0.1.0.tgz albatross
+
+echo "Packing mariner into frobnitz"
+tar -zcvf frobnitz/charts/mariner-4.3.2.tgz mariner
+tar -zcvf frobnitz_backslash/charts/mariner-4.3.2.tgz mariner
+
+# Pack the frobnitz chart.
+echo "Packing frobnitz"
+tar --exclude=ignore/* -zcvf frobnitz-1.2.3.tgz frobnitz
+tar --exclude=ignore/* -zcvf frobnitz_backslash-1.2.3.tgz frobnitz_backslash
diff --git a/helm/pkg/chart/v2/loader/testdata/mariner/Chart.yaml b/helm/pkg/chart/v2/loader/testdata/mariner/Chart.yaml
new file mode 100644
index 000000000..92dc4b390
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/mariner/Chart.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+name: mariner
+description: A Helm chart for Kubernetes
+version: 4.3.2
+home: ""
+dependencies:
+ - name: albatross
+ repository: https://example.com/mariner/charts
+ version: "0.1.0"
diff --git a/helm/pkg/chart/v2/loader/testdata/mariner/charts/albatross-0.1.0.tgz b/helm/pkg/chart/v2/loader/testdata/mariner/charts/albatross-0.1.0.tgz
new file mode 100644
index 000000000..128ef82f7
Binary files /dev/null and b/helm/pkg/chart/v2/loader/testdata/mariner/charts/albatross-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/loader/testdata/mariner/templates/placeholder.tpl b/helm/pkg/chart/v2/loader/testdata/mariner/templates/placeholder.tpl
new file mode 100644
index 000000000..29c11843a
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/mariner/templates/placeholder.tpl
@@ -0,0 +1 @@
+# This is a placeholder.
diff --git a/helm/pkg/chart/v2/loader/testdata/mariner/values.yaml b/helm/pkg/chart/v2/loader/testdata/mariner/values.yaml
new file mode 100644
index 000000000..b0ccb0086
--- /dev/null
+++ b/helm/pkg/chart/v2/loader/testdata/mariner/values.yaml
@@ -0,0 +1,7 @@
+# Default values for .
+# This is a YAML-formatted file. https://github.com/toml-lang/toml
+# Declare name/value pairs to be passed into your templates.
+# name: "value"
+
+:
+ test: true
diff --git a/helm/pkg/chart/v2/metadata.go b/helm/pkg/chart/v2/metadata.go
new file mode 100644
index 000000000..c46007863
--- /dev/null
+++ b/helm/pkg/chart/v2/metadata.go
@@ -0,0 +1,178 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2
+
+import (
+ "path/filepath"
+ "strings"
+ "unicode"
+
+ "github.com/Masterminds/semver/v3"
+)
+
+// Maintainer describes a Chart maintainer.
+type Maintainer struct {
+ // Name is a user name or organization name
+ Name string `json:"name,omitempty"`
+ // Email is an optional email address to contact the named maintainer
+ Email string `json:"email,omitempty"`
+ // URL is an optional URL to an address for the named maintainer
+ URL string `json:"url,omitempty"`
+}
+
+// Validate checks valid data and sanitizes string characters.
+func (m *Maintainer) Validate() error {
+ if m == nil {
+ return ValidationError("maintainers must not contain empty or null nodes")
+ }
+ m.Name = sanitizeString(m.Name)
+ m.Email = sanitizeString(m.Email)
+ m.URL = sanitizeString(m.URL)
+ return nil
+}
+
+// Metadata for a Chart file. This models the structure of a Chart.yaml file.
+type Metadata struct {
+ // The name of the chart. Required.
+ Name string `json:"name,omitempty"`
+ // The URL to a relevant project page, git repo, or contact person
+ Home string `json:"home,omitempty"`
+ // Source is the URL to the source code of this chart
+ Sources []string `json:"sources,omitempty"`
+ // A version string of the chart. Required.
+ Version string `json:"version,omitempty"`
+ // A one-sentence description of the chart
+ Description string `json:"description,omitempty"`
+ // A list of string keywords
+ Keywords []string `json:"keywords,omitempty"`
+ // A list of name and URL/email address combinations for the maintainer(s)
+ Maintainers []*Maintainer `json:"maintainers,omitempty"`
+ // The URL to an icon file.
+ Icon string `json:"icon,omitempty"`
+ // The API Version of this chart. Required.
+ APIVersion string `json:"apiVersion,omitempty"`
+ // The condition to check to enable chart
+ Condition string `json:"condition,omitempty"`
+ // The tags to check to enable chart
+ Tags string `json:"tags,omitempty"`
+ // The version of the application enclosed inside of this chart.
+ AppVersion string `json:"appVersion,omitempty"`
+ // Whether or not this chart is deprecated
+ Deprecated bool `json:"deprecated,omitempty"`
+ // Annotations are additional mappings uninterpreted by Helm,
+ // made available for inspection by other applications.
+ Annotations map[string]string `json:"annotations,omitempty"`
+ // KubeVersion is a SemVer constraint specifying the version of Kubernetes required.
+ KubeVersion string `json:"kubeVersion,omitempty"`
+ // Dependencies are a list of dependencies for a chart.
+ Dependencies []*Dependency `json:"dependencies,omitempty"`
+ // Specifies the chart type: application or library
+ Type string `json:"type,omitempty"`
+}
+
+// Validate checks the metadata for known issues and sanitizes string
+// characters.
+func (md *Metadata) Validate() error {
+ if md == nil {
+ return ValidationError("chart.metadata is required")
+ }
+
+ md.Name = sanitizeString(md.Name)
+ md.Description = sanitizeString(md.Description)
+ md.Home = sanitizeString(md.Home)
+ md.Icon = sanitizeString(md.Icon)
+ md.Condition = sanitizeString(md.Condition)
+ md.Tags = sanitizeString(md.Tags)
+ md.AppVersion = sanitizeString(md.AppVersion)
+ md.KubeVersion = sanitizeString(md.KubeVersion)
+ for i := range md.Sources {
+ md.Sources[i] = sanitizeString(md.Sources[i])
+ }
+ for i := range md.Keywords {
+ md.Keywords[i] = sanitizeString(md.Keywords[i])
+ }
+
+ if md.APIVersion == "" {
+ return ValidationError("chart.metadata.apiVersion is required")
+ }
+ if md.Name == "" {
+ return ValidationError("chart.metadata.name is required")
+ }
+
+ if md.Name != filepath.Base(md.Name) {
+ return ValidationErrorf("chart.metadata.name %q is invalid", md.Name)
+ }
+
+ if md.Version == "" {
+ return ValidationError("chart.metadata.version is required")
+ }
+ if !isValidSemver(md.Version) {
+ return ValidationErrorf("chart.metadata.version %q is invalid", md.Version)
+ }
+ if !isValidChartType(md.Type) {
+ return ValidationError("chart.metadata.type must be application or library")
+ }
+
+ for _, m := range md.Maintainers {
+ if err := m.Validate(); err != nil {
+ return err
+ }
+ }
+
+ // Aliases need to be validated here to make sure that the alias name does
+ // not contain any illegal characters.
+ dependencies := map[string]*Dependency{}
+ for _, dependency := range md.Dependencies {
+ if err := dependency.Validate(); err != nil {
+ return err
+ }
+ key := dependency.Name
+ if dependency.Alias != "" {
+ key = dependency.Alias
+ }
+ if dependencies[key] != nil {
+ return ValidationErrorf("more than one dependency with name or alias %q", key)
+ }
+ dependencies[key] = dependency
+ }
+ return nil
+}
+
+func isValidChartType(in string) bool {
+ switch in {
+ case "", "application", "library":
+ return true
+ }
+ return false
+}
+
+func isValidSemver(v string) bool {
+ _, err := semver.NewVersion(v)
+ return err == nil
+}
+
+// sanitizeString normalize spaces and removes non-printable characters.
+func sanitizeString(str string) string {
+ return strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return ' '
+ }
+ if unicode.IsPrint(r) {
+ return r
+ }
+ return -1
+ }, str)
+}
diff --git a/helm/pkg/chart/v2/metadata_test.go b/helm/pkg/chart/v2/metadata_test.go
new file mode 100644
index 000000000..7892f0209
--- /dev/null
+++ b/helm/pkg/chart/v2/metadata_test.go
@@ -0,0 +1,201 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package v2
+
+import (
+ "testing"
+)
+
+func TestValidate(t *testing.T) {
+ tests := []struct {
+ name string
+ md *Metadata
+ err error
+ }{
+ {
+ "chart without metadata",
+ nil,
+ ValidationError("chart.metadata is required"),
+ },
+ {
+ "chart without apiVersion",
+ &Metadata{Name: "test", Version: "1.0"},
+ ValidationError("chart.metadata.apiVersion is required"),
+ },
+ {
+ "chart without name",
+ &Metadata{APIVersion: "v2", Version: "1.0"},
+ ValidationError("chart.metadata.name is required"),
+ },
+ {
+ "chart without name",
+ &Metadata{Name: "../../test", APIVersion: "v2", Version: "1.0"},
+ ValidationError("chart.metadata.name \"../../test\" is invalid"),
+ },
+ {
+ "chart without version",
+ &Metadata{Name: "test", APIVersion: "v2"},
+ ValidationError("chart.metadata.version is required"),
+ },
+ {
+ "chart with bad type",
+ &Metadata{Name: "test", APIVersion: "v2", Version: "1.0", Type: "test"},
+ ValidationError("chart.metadata.type must be application or library"),
+ },
+ {
+ "chart without dependency",
+ &Metadata{Name: "test", APIVersion: "v2", Version: "1.0", Type: "application"},
+ nil,
+ },
+ {
+ "dependency with valid alias",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v2",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "dependency", Alias: "legal-alias"},
+ },
+ },
+ nil,
+ },
+ {
+ "dependency with bad characters in alias",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v2",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "bad", Alias: "illegal alias"},
+ },
+ },
+ ValidationError("dependency \"bad\" has disallowed characters in the alias"),
+ },
+ {
+ "same dependency twice",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v2",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Alias: ""},
+ {Name: "foo", Alias: ""},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ "two dependencies with alias from second dependency shadowing first one",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v2",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Alias: ""},
+ {Name: "bar", Alias: "foo"},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ // this case would make sense and could work in future versions of Helm, currently template rendering would
+ // result in undefined behaviour
+ "same dependency twice with different version",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v2",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Alias: "", Version: "1.2.3"},
+ {Name: "foo", Alias: "", Version: "1.0.0"},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ // this case would make sense and could work in future versions of Helm, currently template rendering would
+ // result in undefined behaviour
+ "two dependencies with same name but different repos",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v2",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ {Name: "foo", Repository: "repo-0"},
+ {Name: "foo", Repository: "repo-1"},
+ },
+ },
+ ValidationError("more than one dependency with name or alias \"foo\""),
+ },
+ {
+ "dependencies has nil",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v2",
+ Version: "1.0",
+ Type: "application",
+ Dependencies: []*Dependency{
+ nil,
+ },
+ },
+ ValidationError("dependencies must not contain empty or null nodes"),
+ },
+ {
+ "maintainer not empty",
+ &Metadata{
+ Name: "test",
+ APIVersion: "v2",
+ Version: "1.0",
+ Type: "application",
+ Maintainers: []*Maintainer{
+ nil,
+ },
+ },
+ ValidationError("maintainers must not contain empty or null nodes"),
+ },
+ {
+ "version invalid",
+ &Metadata{APIVersion: "v2", Name: "test", Version: "1.2.3.4"},
+ ValidationError("chart.metadata.version \"1.2.3.4\" is invalid"),
+ },
+ }
+
+ for _, tt := range tests {
+ result := tt.md.Validate()
+ if result != tt.err {
+ t.Errorf("expected %q, got %q in test %q", tt.err, result, tt.name)
+ }
+ }
+}
+
+func TestValidate_sanitize(t *testing.T) {
+ md := &Metadata{APIVersion: "v2", Name: "test", Version: "1.0", Description: "\adescr\u0081iption\rtest", Maintainers: []*Maintainer{{Name: "\r"}}}
+ if err := md.Validate(); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if md.Description != "description test" {
+ t.Fatalf("description was not sanitized: %q", md.Description)
+ }
+ if md.Maintainers[0].Name != " " {
+ t.Fatal("maintainer name was not sanitized")
+ }
+}
diff --git a/helm/pkg/chart/v2/util/chartfile.go b/helm/pkg/chart/v2/util/chartfile.go
new file mode 100644
index 000000000..1f9c712b2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/chartfile.go
@@ -0,0 +1,105 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+// LoadChartfile loads a Chart.yaml file into a *chart.Metadata.
+func LoadChartfile(filename string) (*chart.Metadata, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := new(chart.Metadata)
+ err = yaml.Unmarshal(b, y)
+ return y, err
+}
+
+// StrictLoadChartfile loads a Chart.yaml into a *chart.Metadata using a strict unmarshaling
+func StrictLoadChartfile(filename string) (*chart.Metadata, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ y := new(chart.Metadata)
+ err = yaml.UnmarshalStrict(b, y)
+ return y, err
+}
+
+// SaveChartfile saves the given metadata as a Chart.yaml file at the given path.
+//
+// 'filename' should be the complete path and filename ('foo/Chart.yaml')
+func SaveChartfile(filename string, cf *chart.Metadata) error {
+ // Pull out the dependencies of a v1 Chart, since there's no way
+ // to tell the serializer to skip a field for just this use case
+ savedDependencies := cf.Dependencies
+ if cf.APIVersion == chart.APIVersionV1 {
+ cf.Dependencies = nil
+ }
+ out, err := yaml.Marshal(cf)
+ if cf.APIVersion == chart.APIVersionV1 {
+ cf.Dependencies = savedDependencies
+ }
+ if err != nil {
+ return err
+ }
+ return os.WriteFile(filename, out, 0644)
+}
+
+// IsChartDir validate a chart directory.
+//
+// Checks for a valid Chart.yaml.
+func IsChartDir(dirName string) (bool, error) {
+ if fi, err := os.Stat(dirName); err != nil {
+ return false, err
+ } else if !fi.IsDir() {
+ return false, fmt.Errorf("%q is not a directory", dirName)
+ }
+
+ chartYaml := filepath.Join(dirName, ChartfileName)
+ if _, err := os.Stat(chartYaml); errors.Is(err, fs.ErrNotExist) {
+ return false, fmt.Errorf("no %s exists in directory %q", ChartfileName, dirName)
+ }
+
+ chartYamlContent, err := os.ReadFile(chartYaml)
+ if err != nil {
+ return false, fmt.Errorf("cannot read %s in directory %q", ChartfileName, dirName)
+ }
+
+ chartContent := new(chart.Metadata)
+ if err := yaml.Unmarshal(chartYamlContent, &chartContent); err != nil {
+ return false, err
+ }
+ if chartContent == nil {
+ return false, fmt.Errorf("chart metadata (%s) missing", ChartfileName)
+ }
+ if chartContent.Name == "" {
+ return false, fmt.Errorf("invalid chart (%s): name must not be empty", ChartfileName)
+ }
+
+ return true, nil
+}
diff --git a/helm/pkg/chart/v2/util/chartfile_test.go b/helm/pkg/chart/v2/util/chartfile_test.go
new file mode 100644
index 000000000..00c530b8a
--- /dev/null
+++ b/helm/pkg/chart/v2/util/chartfile_test.go
@@ -0,0 +1,121 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "testing"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+const testfile = "testdata/chartfiletest.yaml"
+
+func TestLoadChartfile(t *testing.T) {
+ f, err := LoadChartfile(testfile)
+ if err != nil {
+ t.Errorf("Failed to open %s: %s", testfile, err)
+ return
+ }
+ verifyChartfile(t, f, "frobnitz")
+}
+
+func verifyChartfile(t *testing.T, f *chart.Metadata, name string) {
+ t.Helper()
+ if f == nil { //nolint:staticcheck
+ t.Fatal("Failed verifyChartfile because f is nil")
+ }
+
+ if f.APIVersion != chart.APIVersionV1 { //nolint:staticcheck
+ t.Errorf("Expected API Version %q, got %q", chart.APIVersionV1, f.APIVersion)
+ }
+
+ if f.Name != name {
+ t.Errorf("Expected %s, got %s", name, f.Name)
+ }
+
+ if f.Description != "This is a frobnitz." {
+ t.Errorf("Unexpected description %q", f.Description)
+ }
+
+ if f.Version != "1.2.3" {
+ t.Errorf("Unexpected version %q", f.Version)
+ }
+
+ if len(f.Maintainers) != 2 {
+ t.Errorf("Expected 2 maintainers, got %d", len(f.Maintainers))
+ }
+
+ if f.Maintainers[0].Name != "The Helm Team" {
+ t.Errorf("Unexpected maintainer name.")
+ }
+
+ if f.Maintainers[1].Email != "nobody@example.com" {
+ t.Errorf("Unexpected maintainer email.")
+ }
+
+ if len(f.Sources) != 1 {
+ t.Fatalf("Unexpected number of sources")
+ }
+
+ if f.Sources[0] != "https://example.com/foo/bar" {
+ t.Errorf("Expected https://example.com/foo/bar, got %s", f.Sources)
+ }
+
+ if f.Home != "http://example.com" {
+ t.Error("Unexpected home.")
+ }
+
+ if f.Icon != "https://example.com/64x64.png" {
+ t.Errorf("Unexpected icon: %q", f.Icon)
+ }
+
+ if len(f.Keywords) != 3 {
+ t.Error("Unexpected keywords")
+ }
+
+ if len(f.Annotations) != 2 {
+ t.Fatalf("Unexpected annotations")
+ }
+
+ if want, got := "extravalue", f.Annotations["extrakey"]; want != got {
+ t.Errorf("Want %q, but got %q", want, got)
+ }
+
+ if want, got := "anothervalue", f.Annotations["anotherkey"]; want != got {
+ t.Errorf("Want %q, but got %q", want, got)
+ }
+
+ kk := []string{"frobnitz", "sprocket", "dodad"}
+ for i, k := range f.Keywords {
+ if kk[i] != k {
+ t.Errorf("Expected %q, got %q", kk[i], k)
+ }
+ }
+}
+
+func TestIsChartDir(t *testing.T) {
+ validChartDir, err := IsChartDir("testdata/frobnitz")
+ if !validChartDir {
+ t.Errorf("unexpected error while reading chart-directory: (%v)", err)
+ return
+ }
+ validChartDir, err = IsChartDir("testdata")
+ if validChartDir || err == nil {
+ t.Errorf("expected error but did not get any")
+ return
+ }
+}
diff --git a/helm/pkg/chart/v2/util/compatible.go b/helm/pkg/chart/v2/util/compatible.go
new file mode 100644
index 000000000..d384d2d45
--- /dev/null
+++ b/helm/pkg/chart/v2/util/compatible.go
@@ -0,0 +1,34 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import "github.com/Masterminds/semver/v3"
+
+// IsCompatibleRange compares a version to a constraint.
+// It returns true if the version matches the constraint, and false in all other cases.
+func IsCompatibleRange(constraint, ver string) bool {
+ sv, err := semver.NewVersion(ver)
+ if err != nil {
+ return false
+ }
+
+ c, err := semver.NewConstraint(constraint)
+ if err != nil {
+ return false
+ }
+ return c.Check(sv)
+}
diff --git a/helm/pkg/chart/v2/util/compatible_test.go b/helm/pkg/chart/v2/util/compatible_test.go
new file mode 100644
index 000000000..e17d33e35
--- /dev/null
+++ b/helm/pkg/chart/v2/util/compatible_test.go
@@ -0,0 +1,43 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package version represents the current version of the project.
+package util
+
+import "testing"
+
+func TestIsCompatibleRange(t *testing.T) {
+ tests := []struct {
+ constraint string
+ ver string
+ expected bool
+ }{
+ {"v2.0.0-alpha.4", "v2.0.0-alpha.4", true},
+ {"v2.0.0-alpha.3", "v2.0.0-alpha.4", false},
+ {"v2.0.0", "v2.0.0-alpha.4", false},
+ {"v2.0.0-alpha.4", "v2.0.0", false},
+ {"~v2.0.0", "v2.0.1", true},
+ {"v2", "v2.0.0", true},
+ {">2.0.0", "v2.1.1", true},
+ {"v2.1.*", "v2.1.1", true},
+ }
+
+ for _, tt := range tests {
+ if IsCompatibleRange(tt.constraint, tt.ver) != tt.expected {
+ t.Errorf("expected constraint %s to be %v for %s", tt.constraint, tt.expected, tt.ver)
+ }
+ }
+}
diff --git a/helm/pkg/chart/v2/util/create.go b/helm/pkg/chart/v2/util/create.go
new file mode 100644
index 000000000..bf572c707
--- /dev/null
+++ b/helm/pkg/chart/v2/util/create.go
@@ -0,0 +1,833 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+)
+
+// chartName is a regular expression for testing the supplied name of a chart.
+// This regular expression is probably stricter than it needs to be. We can relax it
+// somewhat. Newline characters, as well as $, quotes, +, parens, and % are known to be
+// problematic.
+var chartName = regexp.MustCompile("^[a-zA-Z0-9._-]+$")
+
+const (
+ // ChartfileName is the default Chart file name.
+ ChartfileName = "Chart.yaml"
+ // ValuesfileName is the default values file name.
+ ValuesfileName = "values.yaml"
+ // SchemafileName is the default values schema file name.
+ SchemafileName = "values.schema.json"
+ // TemplatesDir is the relative directory name for templates.
+ TemplatesDir = "templates"
+ // ChartsDir is the relative directory name for charts dependencies.
+ ChartsDir = "charts"
+ // TemplatesTestsDir is the relative directory name for tests.
+ TemplatesTestsDir = TemplatesDir + sep + "tests"
+ // IgnorefileName is the name of the Helm ignore file.
+ IgnorefileName = ".helmignore"
+ // IngressFileName is the name of the example ingress file.
+ IngressFileName = TemplatesDir + sep + "ingress.yaml"
+ // HTTPRouteFileName is the name of the example HTTPRoute file.
+ HTTPRouteFileName = TemplatesDir + sep + "httproute.yaml"
+ // DeploymentName is the name of the example deployment file.
+ DeploymentName = TemplatesDir + sep + "deployment.yaml"
+ // ServiceName is the name of the example service file.
+ ServiceName = TemplatesDir + sep + "service.yaml"
+ // ServiceAccountName is the name of the example serviceaccount file.
+ ServiceAccountName = TemplatesDir + sep + "serviceaccount.yaml"
+ // HorizontalPodAutoscalerName is the name of the example hpa file.
+ HorizontalPodAutoscalerName = TemplatesDir + sep + "hpa.yaml"
+ // NotesName is the name of the example NOTES.txt file.
+ NotesName = TemplatesDir + sep + "NOTES.txt"
+ // HelpersName is the name of the example helpers file.
+ HelpersName = TemplatesDir + sep + "_helpers.tpl"
+ // TestConnectionName is the name of the example test file.
+ TestConnectionName = TemplatesTestsDir + sep + "test-connection.yaml"
+)
+
+// maxChartNameLength is lower than the limits we know of with certain file systems,
+// and with certain Kubernetes fields.
+const maxChartNameLength = 250
+
+const sep = string(filepath.Separator)
+
+const defaultChartfile = `apiVersion: v2
+name: %s
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.16.0"
+`
+
+const defaultValues = `# Default values for %s.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
+replicaCount: 1
+
+# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
+image:
+ repository: nginx
+ # This sets the pull policy for images.
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+
+# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+imagePullSecrets: []
+# This is to override the chart name.
+nameOverride: ""
+fullnameOverride: ""
+
+# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
+serviceAccount:
+ # Specifies whether a service account should be created.
+ create: true
+ # Automatically mount a ServiceAccount's API credentials?
+ automount: true
+ # Annotations to add to the service account.
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template.
+ name: ""
+
+# This is for setting Kubernetes Annotations to a Pod.
+# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+podAnnotations: {}
+# This is for setting Kubernetes Labels to a Pod.
+# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+podLabels: {}
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
+service:
+ # This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: ClusterIP
+ # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
+ port: 80
+
+# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
+ingress:
+ enabled: false
+ className: ""
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+# -- Expose the service via gateway-api HTTPRoute
+# Requires Gateway API resources and suitable controller installed within the cluster
+# (see: https://gateway-api.sigs.k8s.io/guides/)
+httpRoute:
+ # HTTPRoute enabled.
+ enabled: false
+ # HTTPRoute annotations.
+ annotations: {}
+ # Which Gateways this Route is attached to.
+ parentRefs:
+ - name: gateway
+ sectionName: http
+ # namespace: default
+ # Hostnames matching HTTP header.
+ hostnames:
+ - chart-example.local
+ # List of rules and filters applied.
+ rules:
+ - matches:
+ - path:
+ type: PathPrefix
+ value: /headers
+ # filters:
+ # - type: RequestHeaderModifier
+ # requestHeaderModifier:
+ # set:
+ # - name: My-Overwrite-Header
+ # value: this-is-the-only-value
+ # remove:
+ # - User-Agent
+ # - matches:
+ # - path:
+ # type: PathPrefix
+ # value: /echo
+ # headers:
+ # - name: version
+ # value: v2
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
+livenessProbe:
+ httpGet:
+ path: /
+ port: http
+readinessProbe:
+ httpGet:
+ path: /
+ port: http
+
+# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
+autoscaling:
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 100
+ targetCPUUtilizationPercentage: 80
+ # targetMemoryUtilizationPercentage: 80
+
+# Additional volumes on the output Deployment definition.
+volumes: []
+ # - name: foo
+ # secret:
+ # secretName: mysecret
+ # optional: false
+
+# Additional volumeMounts on the output Deployment definition.
+volumeMounts: []
+ # - name: foo
+ # mountPath: "/etc/foo"
+ # readOnly: true
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+`
+
+const defaultIgnore = `# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
+`
+
+const defaultIngress = `{{- if .Values.ingress.enabled -}}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ {{- with .Values.ingress.className }}
+ ingressClassName: {{ . }}
+ {{- end }}
+ {{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ .host | quote }}
+ http:
+ paths:
+ {{- range .paths }}
+ - path: {{ .path }}
+ {{- with .pathType }}
+ pathType: {{ . }}
+ {{- end }}
+ backend:
+ service:
+ name: {{ include ".fullname" $ }}
+ port:
+ number: {{ $.Values.service.port }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+`
+
+const defaultHTTPRoute = `{{- if .Values.httpRoute.enabled -}}
+{{- $fullName := include ".fullname" . -}}
+{{- $svcPort := .Values.service.port -}}
+apiVersion: gateway.networking.k8s.io/v1
+kind: HTTPRoute
+metadata:
+ name: {{ $fullName }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ {{- with .Values.httpRoute.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ parentRefs:
+ {{- with .Values.httpRoute.parentRefs }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.httpRoute.hostnames }}
+ hostnames:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ rules:
+ {{- range .Values.httpRoute.rules }}
+ {{- with .matches }}
+ - matches:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .filters }}
+ filters:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ backendRefs:
+ - name: {{ $fullName }}
+ port: {{ $svcPort }}
+ weight: 1
+ {{- end }}
+{{- end }}
+`
+
+const defaultDeployment = `apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+spec:
+ {{- if not .Values.autoscaling.enabled }}
+ replicas: {{ .Values.replicaCount }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include ".selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include ".labels" . | nindent 8 }}
+ {{- with .Values.podLabels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include ".serviceAccountName" . }}
+ {{- with .Values.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}
+ {{- with .Values.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.service.port }}
+ protocol: TCP
+ {{- with .Values.livenessProbe }}
+ livenessProbe:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.readinessProbe }}
+ readinessProbe:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.volumeMounts }}
+ volumeMounts:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.volumes }}
+ volumes:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+`
+
+const defaultService = `apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include ".selectorLabels" . | nindent 4 }}
+`
+
+const defaultServiceAccount = `{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include ".serviceAccountName" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
+{{- end }}
+`
+
+const defaultHorizontalPodAutoscaler = `{{- if .Values.autoscaling.enabled }}
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ include ".fullname" . }}
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: {{ include ".fullname" . }}
+ minReplicas: {{ .Values.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.autoscaling.maxReplicas }}
+ metrics:
+ {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
+ {{- end }}
+ {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: memory
+ target:
+ type: Utilization
+ averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
+ {{- end }}
+{{- end }}
+`
+
+const defaultNotes = `1. Get the application URL by running these commands:
+{{- if .Values.httpRoute.enabled }}
+{{- if .Values.httpRoute.hostnames }}
+ export APP_HOSTNAME={{ .Values.httpRoute.hostnames | first }}
+{{- else }}
+ export APP_HOSTNAME=$(kubectl get --namespace {{(first .Values.httpRoute.parentRefs).namespace | default .Release.Namespace }} gateway/{{ (first .Values.httpRoute.parentRefs).name }} -o jsonpath="{.spec.listeners[0].hostname}")
+ {{- end }}
+{{- if and .Values.httpRoute.rules (first .Values.httpRoute.rules).matches (first (first .Values.httpRoute.rules).matches).path.value }}
+ echo "Visit http://$APP_HOSTNAME{{ (first (first .Values.httpRoute.rules).matches).path.value }} to use your application"
+
+ NOTE: Your HTTPRoute depends on the listener configuration of your gateway and your HTTPRoute rules.
+ The rules can be set for path, method, header and query parameters.
+ You can check the gateway configuration with 'kubectl get --namespace {{(first .Values.httpRoute.parentRefs).namespace | default .Release.Namespace }} gateway/{{ (first .Values.httpRoute.parentRefs).name }} -o yaml'
+{{- end }}
+{{- else if .Values.ingress.enabled }}
+{{- range $host := .Values.ingress.hosts }}
+ {{- range .paths }}
+ http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
+ {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include ".fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+ echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
+{{- end }}
+`
+
+const defaultHelpers = `{{/*
+Expand the name of the chart.
+*/}}
+{{- define ".name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define ".fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define ".chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define ".labels" -}}
+helm.sh/chart: {{ include ".chart" . }}
+{{ include ".selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define ".selectorLabels" -}}
+app.kubernetes.io/name: {{ include ".name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define ".serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include ".fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
+`
+
+const defaultTestConnection = `apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include ".fullname" . }}-test-connection"
+ labels:
+ {{- include ".labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: wget
+ image: busybox
+ command: ['wget']
+ args: ['{{ include ".fullname" . }}:{{ .Values.service.port }}']
+ restartPolicy: Never
+`
+
+// Stderr is an io.Writer to which error messages can be written
+//
+// In Helm 4, this will be replaced. It is needed in Helm 3 to preserve API backward
+// compatibility.
+var Stderr io.Writer = os.Stderr
+
+// CreateFrom creates a new chart, but scaffolds it from the src chart.
+func CreateFrom(chartfile *chart.Metadata, dest, src string) error {
+ schart, err := loader.Load(src)
+ if err != nil {
+ return fmt.Errorf("could not load %s: %w", src, err)
+ }
+
+ schart.Metadata = chartfile
+
+ var updatedTemplates []*common.File
+
+ for _, template := range schart.Templates {
+ newData := transform(string(template.Data), schart.Name())
+ updatedTemplates = append(updatedTemplates, &common.File{Name: template.Name, ModTime: template.ModTime, Data: newData})
+ }
+
+ schart.Templates = updatedTemplates
+ b, err := yaml.Marshal(schart.Values)
+ if err != nil {
+ return fmt.Errorf("reading values file: %w", err)
+ }
+
+ var m map[string]interface{}
+ if err := yaml.Unmarshal(transform(string(b), schart.Name()), &m); err != nil {
+ return fmt.Errorf("transforming values file: %w", err)
+ }
+ schart.Values = m
+
+ // SaveDir looks for the file values.yaml when saving rather than the values
+ // key in order to preserve the comments in the YAML. The name placeholder
+ // needs to be replaced on that file.
+ for _, f := range schart.Raw {
+ if f.Name == ValuesfileName {
+ f.Data = transform(string(f.Data), schart.Name())
+ }
+ }
+
+ return SaveDir(schart, dest)
+}
+
+// Create creates a new chart in a directory.
+//
+// Inside of dir, this will create a directory based on the name of
+// chartfile.Name. It will then write the Chart.yaml into this directory and
+// create the (empty) appropriate directories.
+//
+// The returned string will point to the newly created directory. It will be
+// an absolute path, even if the provided base directory was relative.
+//
+// If dir does not exist, this will return an error.
+// If Chart.yaml or any directories cannot be created, this will return an
+// error. In such a case, this will attempt to clean up by removing the
+// new chart directory.
+func Create(name, dir string) (string, error) {
+
+ // Sanity-check the name of a chart so user doesn't create one that causes problems.
+ if err := validateChartName(name); err != nil {
+ return "", err
+ }
+
+ path, err := filepath.Abs(dir)
+ if err != nil {
+ return path, err
+ }
+
+ if fi, err := os.Stat(path); err != nil {
+ return path, err
+ } else if !fi.IsDir() {
+ return path, fmt.Errorf("no such directory %s", path)
+ }
+
+ cdir := filepath.Join(path, name)
+ if fi, err := os.Stat(cdir); err == nil && !fi.IsDir() {
+ return cdir, fmt.Errorf("file %s already exists and is not a directory", cdir)
+ }
+
+ // Note: If adding a new template below (i.e., to `helm create`) which is disabled by default (similar to hpa and
+ // ingress below); or making an existing template disabled by default, add the enabling condition in
+ // `TestHelmCreateChart_CheckDeprecatedWarnings` in `pkg/lint/lint_test.go` to make it run through deprecation checks
+ // with latest Kubernetes version.
+ files := []struct {
+ path string
+ content []byte
+ }{
+ {
+ // Chart.yaml
+ path: filepath.Join(cdir, ChartfileName),
+ content: fmt.Appendf(nil, defaultChartfile, name),
+ },
+ {
+ // values.yaml
+ path: filepath.Join(cdir, ValuesfileName),
+ content: fmt.Appendf(nil, defaultValues, name),
+ },
+ {
+ // .helmignore
+ path: filepath.Join(cdir, IgnorefileName),
+ content: []byte(defaultIgnore),
+ },
+ {
+ // ingress.yaml
+ path: filepath.Join(cdir, IngressFileName),
+ content: transform(defaultIngress, name),
+ },
+ {
+ // httproute.yaml
+ path: filepath.Join(cdir, HTTPRouteFileName),
+ content: transform(defaultHTTPRoute, name),
+ },
+ {
+ // deployment.yaml
+ path: filepath.Join(cdir, DeploymentName),
+ content: transform(defaultDeployment, name),
+ },
+ {
+ // service.yaml
+ path: filepath.Join(cdir, ServiceName),
+ content: transform(defaultService, name),
+ },
+ {
+ // serviceaccount.yaml
+ path: filepath.Join(cdir, ServiceAccountName),
+ content: transform(defaultServiceAccount, name),
+ },
+ {
+ // hpa.yaml
+ path: filepath.Join(cdir, HorizontalPodAutoscalerName),
+ content: transform(defaultHorizontalPodAutoscaler, name),
+ },
+ {
+ // NOTES.txt
+ path: filepath.Join(cdir, NotesName),
+ content: transform(defaultNotes, name),
+ },
+ {
+ // _helpers.tpl
+ path: filepath.Join(cdir, HelpersName),
+ content: transform(defaultHelpers, name),
+ },
+ {
+ // test-connection.yaml
+ path: filepath.Join(cdir, TestConnectionName),
+ content: transform(defaultTestConnection, name),
+ },
+ }
+
+ for _, file := range files {
+ if _, err := os.Stat(file.path); err == nil {
+ // There is no handle to a preferred output stream here.
+ fmt.Fprintf(Stderr, "WARNING: File %q already exists. Overwriting.\n", file.path)
+ }
+ if err := writeFile(file.path, file.content); err != nil {
+ return cdir, err
+ }
+ }
+ // Need to add the ChartsDir explicitly as it does not contain any file OOTB
+ if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0755); err != nil {
+ return cdir, err
+ }
+ return cdir, nil
+}
+
+// transform performs a string replacement of the specified source for
+// a given key with the replacement string
+func transform(src, replacement string) []byte {
+ return []byte(strings.ReplaceAll(src, "", replacement))
+}
+
+func writeFile(name string, content []byte) error {
+ if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil {
+ return err
+ }
+ return os.WriteFile(name, content, 0644)
+}
+
+func validateChartName(name string) error {
+ if name == "" || len(name) > maxChartNameLength {
+ return fmt.Errorf("chart name must be between 1 and %d characters", maxChartNameLength)
+ }
+ if !chartName.MatchString(name) {
+ return fmt.Errorf("chart name must match the regular expression %q", chartName.String())
+ }
+ return nil
+}
diff --git a/helm/pkg/chart/v2/util/create_test.go b/helm/pkg/chart/v2/util/create_test.go
new file mode 100644
index 000000000..086c4e5c8
--- /dev/null
+++ b/helm/pkg/chart/v2/util/create_test.go
@@ -0,0 +1,172 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "testing"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+)
+
+func TestCreate(t *testing.T) {
+ tdir := t.TempDir()
+
+ c, err := Create("foo", tdir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ dir := filepath.Join(tdir, "foo")
+
+ mychart, err := loader.LoadDir(c)
+ if err != nil {
+ t.Fatalf("Failed to load newly created chart %q: %s", c, err)
+ }
+
+ if mychart.Name() != "foo" {
+ t.Errorf("Expected name to be 'foo', got %q", mychart.Name())
+ }
+
+ for _, f := range []string{
+ ChartfileName,
+ DeploymentName,
+ HelpersName,
+ IgnorefileName,
+ NotesName,
+ ServiceAccountName,
+ ServiceName,
+ TemplatesDir,
+ TemplatesTestsDir,
+ TestConnectionName,
+ ValuesfileName,
+ } {
+ if _, err := os.Stat(filepath.Join(dir, f)); err != nil {
+ t.Errorf("Expected %s file: %s", f, err)
+ }
+ }
+}
+
+func TestCreateFrom(t *testing.T) {
+ tdir := t.TempDir()
+
+ cf := &chart.Metadata{
+ APIVersion: chart.APIVersionV1,
+ Name: "foo",
+ Version: "0.1.0",
+ }
+ srcdir := "./testdata/frobnitz/charts/mariner"
+
+ if err := CreateFrom(cf, tdir, srcdir); err != nil {
+ t.Fatal(err)
+ }
+
+ dir := filepath.Join(tdir, "foo")
+ c := filepath.Join(tdir, cf.Name)
+ mychart, err := loader.LoadDir(c)
+ if err != nil {
+ t.Fatalf("Failed to load newly created chart %q: %s", c, err)
+ }
+
+ if mychart.Name() != "foo" {
+ t.Errorf("Expected name to be 'foo', got %q", mychart.Name())
+ }
+
+ for _, f := range []string{
+ ChartfileName,
+ ValuesfileName,
+ filepath.Join(TemplatesDir, "placeholder.tpl"),
+ } {
+ if _, err := os.Stat(filepath.Join(dir, f)); err != nil {
+ t.Errorf("Expected %s file: %s", f, err)
+ }
+
+ // Check each file to make sure has been replaced
+ b, err := os.ReadFile(filepath.Join(dir, f))
+ if err != nil {
+ t.Errorf("Unable to read file %s: %s", f, err)
+ }
+ if bytes.Contains(b, []byte("")) {
+ t.Errorf("File %s contains ", f)
+ }
+ }
+}
+
+// TestCreate_Overwrite is a regression test for making sure that files are overwritten.
+func TestCreate_Overwrite(t *testing.T) {
+ tdir := t.TempDir()
+
+ var errlog bytes.Buffer
+
+ if _, err := Create("foo", tdir); err != nil {
+ t.Fatal(err)
+ }
+
+ dir := filepath.Join(tdir, "foo")
+
+ tplname := filepath.Join(dir, "templates/hpa.yaml")
+ writeFile(tplname, []byte("FOO"))
+
+ // Now re-run the create
+ Stderr = &errlog
+ if _, err := Create("foo", tdir); err != nil {
+ t.Fatal(err)
+ }
+
+ data, err := os.ReadFile(tplname)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(data) == "FOO" {
+ t.Fatal("File that should have been modified was not.")
+ }
+
+ if errlog.Len() == 0 {
+ t.Errorf("Expected warnings about overwriting files.")
+ }
+}
+
+func TestValidateChartName(t *testing.T) {
+ for name, shouldPass := range map[string]bool{
+ "": false,
+ "abcdefghijklmnopqrstuvwxyz-_.": true,
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ-_.": true,
+ "$hello": false,
+ "Hellô": false,
+ "he%%o": false,
+ "he\nllo": false,
+
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "abcdefghijklmnopqrstuvwxyz-_." +
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ-_.": false,
+ } {
+ if err := validateChartName(name); (err != nil) == shouldPass {
+ t.Errorf("test for %q failed", name)
+ }
+ }
+}
diff --git a/helm/pkg/chart/v2/util/dependencies.go b/helm/pkg/chart/v2/util/dependencies.go
new file mode 100644
index 000000000..c7bb6621e
--- /dev/null
+++ b/helm/pkg/chart/v2/util/dependencies.go
@@ -0,0 +1,380 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "log/slog"
+ "strings"
+
+ "helm.sh/helm/v4/internal/copystructure"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+// ProcessDependencies checks through this chart's dependencies, processing accordingly.
+func ProcessDependencies(c *chart.Chart, v common.Values) error {
+ if err := processDependencyEnabled(c, v, ""); err != nil {
+ return err
+ }
+ return processDependencyImportValues(c, true)
+}
+
+// processDependencyConditions disables charts based on condition path value in values
+func processDependencyConditions(reqs []*chart.Dependency, cvals common.Values, cpath string) {
+ if reqs == nil {
+ return
+ }
+ for _, r := range reqs {
+ for c := range strings.SplitSeq(strings.TrimSpace(r.Condition), ",") {
+ if len(c) > 0 {
+ // retrieve value
+ vv, err := cvals.PathValue(cpath + c)
+ if err == nil {
+ // if not bool, warn
+ if bv, ok := vv.(bool); ok {
+ r.Enabled = bv
+ break
+ }
+ slog.Warn("returned non-bool value", "path", c, "chart", r.Name)
+ } else if _, ok := err.(common.ErrNoValue); !ok {
+ // this is a real error
+ slog.Warn("the method PathValue returned error", slog.Any("error", err))
+ }
+ }
+ }
+ }
+}
+
+// processDependencyTags disables charts based on tags in values
+func processDependencyTags(reqs []*chart.Dependency, cvals common.Values) {
+ if reqs == nil {
+ return
+ }
+ vt, err := cvals.Table("tags")
+ if err != nil {
+ return
+ }
+ for _, r := range reqs {
+ var hasTrue, hasFalse bool
+ for _, k := range r.Tags {
+ if b, ok := vt[k]; ok {
+ // if not bool, warn
+ if bv, ok := b.(bool); ok {
+ if bv {
+ hasTrue = true
+ } else {
+ hasFalse = true
+ }
+ } else {
+ slog.Warn("returned non-bool value", "tag", k, "chart", r.Name)
+ }
+ }
+ }
+ if !hasTrue && hasFalse {
+ r.Enabled = false
+ } else if hasTrue || !hasTrue && !hasFalse {
+ r.Enabled = true
+ }
+ }
+}
+
+// getAliasDependency finds the chart for an alias dependency and copies parts that will be modified
+func getAliasDependency(charts []*chart.Chart, dep *chart.Dependency) *chart.Chart {
+ for _, c := range charts {
+ if c == nil {
+ continue
+ }
+ if c.Name() != dep.Name {
+ continue
+ }
+ if !IsCompatibleRange(dep.Version, c.Metadata.Version) {
+ continue
+ }
+
+ out := *c
+ out.Metadata = copyMetadata(c.Metadata)
+
+ // empty dependencies and shallow copy all dependencies, otherwise parent info may be corrupted if
+ // there is more than one dependency aliasing this chart
+ out.SetDependencies()
+ for _, dependency := range c.Dependencies() {
+ cpy := *dependency
+ out.AddDependency(&cpy)
+ }
+
+ if dep.Alias != "" {
+ out.Metadata.Name = dep.Alias
+ }
+ return &out
+ }
+ return nil
+}
+
+func copyMetadata(metadata *chart.Metadata) *chart.Metadata {
+ md := *metadata
+
+ if md.Dependencies != nil {
+ dependencies := make([]*chart.Dependency, len(md.Dependencies))
+ for i := range md.Dependencies {
+ dependency := *md.Dependencies[i]
+ dependencies[i] = &dependency
+ }
+ md.Dependencies = dependencies
+ }
+ return &md
+}
+
+// processDependencyEnabled removes disabled charts from dependencies
+func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path string) error {
+ if c.Metadata.Dependencies == nil {
+ return nil
+ }
+
+ var chartDependencies []*chart.Chart
+ // If any dependency is not a part of Chart.yaml
+ // then this should be added to chartDependencies.
+ // However, if the dependency is already specified in Chart.yaml
+ // we should not add it, as it would be processed from Chart.yaml anyway.
+
+Loop:
+ for _, existing := range c.Dependencies() {
+ for _, req := range c.Metadata.Dependencies {
+ if existing.Name() == req.Name && IsCompatibleRange(req.Version, existing.Metadata.Version) {
+ continue Loop
+ }
+ }
+ chartDependencies = append(chartDependencies, existing)
+ }
+
+ for _, req := range c.Metadata.Dependencies {
+ if req == nil {
+ continue
+ }
+ if chartDependency := getAliasDependency(c.Dependencies(), req); chartDependency != nil {
+ chartDependencies = append(chartDependencies, chartDependency)
+ }
+ if req.Alias != "" {
+ req.Name = req.Alias
+ }
+ }
+ c.SetDependencies(chartDependencies...)
+
+ // set all to true
+ for _, lr := range c.Metadata.Dependencies {
+ lr.Enabled = true
+ }
+ cvals, err := util.CoalesceValues(c, v)
+ if err != nil {
+ return err
+ }
+ // flag dependencies as enabled/disabled
+ processDependencyTags(c.Metadata.Dependencies, cvals)
+ processDependencyConditions(c.Metadata.Dependencies, cvals, path)
+ // make a map of charts to remove
+ rm := map[string]struct{}{}
+ for _, r := range c.Metadata.Dependencies {
+ if !r.Enabled {
+ // remove disabled chart
+ rm[r.Name] = struct{}{}
+ }
+ }
+ // don't keep disabled charts in new slice
+ cd := []*chart.Chart{}
+ copy(cd, c.Dependencies()[:0])
+ for _, n := range c.Dependencies() {
+ if _, ok := rm[n.Metadata.Name]; !ok {
+ cd = append(cd, n)
+ }
+ }
+ // don't keep disabled charts in metadata
+ cdMetadata := []*chart.Dependency{}
+ copy(cdMetadata, c.Metadata.Dependencies[:0])
+ for _, n := range c.Metadata.Dependencies {
+ if _, ok := rm[n.Name]; !ok {
+ cdMetadata = append(cdMetadata, n)
+ }
+ }
+
+ // recursively call self to process sub dependencies
+ for _, t := range cd {
+ subpath := path + t.Metadata.Name + "."
+ if err := processDependencyEnabled(t, cvals, subpath); err != nil {
+ return err
+ }
+ }
+ // set the correct dependencies in metadata
+ c.Metadata.Dependencies = nil
+ c.Metadata.Dependencies = append(c.Metadata.Dependencies, cdMetadata...)
+ c.SetDependencies(cd...)
+
+ return nil
+}
+
+// pathToMap creates a nested map given a YAML path in dot notation.
+func pathToMap(path string, data map[string]interface{}) map[string]interface{} {
+ if path == "." {
+ return data
+ }
+ return set(parsePath(path), data)
+}
+
+func parsePath(key string) []string { return strings.Split(key, ".") }
+
+func set(path []string, data map[string]interface{}) map[string]interface{} {
+ if len(path) == 0 {
+ return nil
+ }
+ cur := data
+ for i := len(path) - 1; i >= 0; i-- {
+ cur = map[string]interface{}{path[i]: cur}
+ }
+ return cur
+}
+
+// processImportValues merges values from child to parent based on the chart's dependencies' ImportValues field.
+func processImportValues(c *chart.Chart, merge bool) error {
+ if c.Metadata.Dependencies == nil {
+ return nil
+ }
+ // combine chart values and empty config to get Values
+ var cvals common.Values
+ var err error
+ if merge {
+ cvals, err = util.MergeValues(c, nil)
+ } else {
+ cvals, err = util.CoalesceValues(c, nil)
+ }
+ if err != nil {
+ return err
+ }
+ b := make(map[string]interface{})
+ // import values from each dependency if specified in import-values
+ for _, r := range c.Metadata.Dependencies {
+ var outiv []interface{}
+ for _, riv := range r.ImportValues {
+ switch iv := riv.(type) {
+ case map[string]interface{}:
+ child := fmt.Sprintf("%v", iv["child"])
+ parent := fmt.Sprintf("%v", iv["parent"])
+
+ outiv = append(outiv, map[string]string{
+ "child": child,
+ "parent": parent,
+ })
+
+ // get child table
+ vv, err := cvals.Table(r.Name + "." + child)
+ if err != nil {
+ slog.Warn(
+ "ImportValues missing table from chart",
+ slog.String("chart", r.Name),
+ slog.Any("error", err),
+ )
+ continue
+ }
+ // create value map from child to be merged into parent
+ if merge {
+ b = util.MergeTables(b, pathToMap(parent, vv.AsMap()))
+ } else {
+ b = util.CoalesceTables(b, pathToMap(parent, vv.AsMap()))
+ }
+ case string:
+ child := "exports." + iv
+ outiv = append(outiv, map[string]string{
+ "child": child,
+ "parent": ".",
+ })
+ vm, err := cvals.Table(r.Name + "." + child)
+ if err != nil {
+ slog.Warn("ImportValues missing table", slog.Any("error", err))
+ continue
+ }
+ if merge {
+ b = util.MergeTables(b, vm.AsMap())
+ } else {
+ b = util.CoalesceTables(b, vm.AsMap())
+ }
+ }
+ }
+ r.ImportValues = outiv
+ }
+
+ // Imported values from a child to a parent chart have a lower priority than
+ // the parents values. This enables parent charts to import a large section
+ // from a child and then override select parts. This is why b is merged into
+ // cvals in the code below and not the other way around.
+ if merge {
+ // deep copying the cvals as there are cases where pointers can end
+ // up in the cvals when they are copied onto b in ways that break things.
+ cvals = deepCopyMap(cvals)
+ c.Values = util.MergeTables(cvals, b)
+ } else {
+ // Trimming the nil values from cvals is needed for backwards compatibility.
+ // Previously, the b value had been populated with cvals along with some
+ // overrides. This caused the coalescing functionality to remove the
+ // nil/null values. This trimming is for backwards compat.
+ cvals = trimNilValues(cvals)
+ c.Values = util.CoalesceTables(cvals, b)
+ }
+
+ return nil
+}
+
+func deepCopyMap(vals map[string]interface{}) map[string]interface{} {
+ valsCopy, err := copystructure.Copy(vals)
+ if err != nil {
+ return vals
+ }
+ return valsCopy.(map[string]interface{})
+}
+
+func trimNilValues(vals map[string]interface{}) map[string]interface{} {
+ valsCopy, err := copystructure.Copy(vals)
+ if err != nil {
+ return vals
+ }
+ valsCopyMap := valsCopy.(map[string]interface{})
+ for key, val := range valsCopyMap {
+ if val == nil {
+ // Iterate over the values and remove nil keys
+ delete(valsCopyMap, key)
+ } else if istable(val) {
+ // Recursively call into ourselves to remove keys from inner tables
+ valsCopyMap[key] = trimNilValues(val.(map[string]interface{}))
+ }
+ }
+
+ return valsCopyMap
+}
+
+// istable is a special-purpose function to see if the present thing matches the definition of a YAML table.
+func istable(v interface{}) bool {
+ _, ok := v.(map[string]interface{})
+ return ok
+}
+
+// processDependencyImportValues imports specified chart values from child to parent.
+func processDependencyImportValues(c *chart.Chart, merge bool) error {
+ for _, d := range c.Dependencies() {
+ // recurse
+ if err := processDependencyImportValues(d, merge); err != nil {
+ return err
+ }
+ }
+ return processImportValues(c, merge)
+}
diff --git a/helm/pkg/chart/v2/util/dependencies_test.go b/helm/pkg/chart/v2/util/dependencies_test.go
new file mode 100644
index 000000000..c817b0b89
--- /dev/null
+++ b/helm/pkg/chart/v2/util/dependencies_test.go
@@ -0,0 +1,570 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package util
+
+import (
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+)
+
+func loadChart(t *testing.T, path string) *chart.Chart {
+ t.Helper()
+ c, err := loader.Load(path)
+ if err != nil {
+ t.Fatalf("failed to load testdata: %s", err)
+ }
+ return c
+}
+
+func TestLoadDependency(t *testing.T) {
+ tests := []*chart.Dependency{
+ {Name: "alpine", Version: "0.1.0", Repository: "https://example.com/charts"},
+ {Name: "mariner", Version: "4.3.2", Repository: "https://example.com/charts"},
+ }
+
+ check := func(deps []*chart.Dependency) {
+ if len(deps) != 2 {
+ t.Errorf("expected 2 dependencies, got %d", len(deps))
+ }
+ for i, tt := range tests {
+ if deps[i].Name != tt.Name {
+ t.Errorf("expected dependency named %q, got %q", tt.Name, deps[i].Name)
+ }
+ if deps[i].Version != tt.Version {
+ t.Errorf("expected dependency named %q to have version %q, got %q", tt.Name, tt.Version, deps[i].Version)
+ }
+ if deps[i].Repository != tt.Repository {
+ t.Errorf("expected dependency named %q to have repository %q, got %q", tt.Name, tt.Repository, deps[i].Repository)
+ }
+ }
+ }
+ c := loadChart(t, "testdata/frobnitz")
+ check(c.Metadata.Dependencies)
+ check(c.Lock.Dependencies)
+}
+
+func TestDependencyEnabled(t *testing.T) {
+ type M = map[string]interface{}
+ tests := []struct {
+ name string
+ v M
+ e []string // expected charts including duplicates in alphanumeric order
+ }{{
+ "tags with no effect",
+ M{"tags": M{"nothinguseful": false}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb"},
+ }, {
+ "tags disabling a group",
+ M{"tags": M{"front-end": false}},
+ []string{"parentchart"},
+ }, {
+ "tags disabling a group and enabling a different group",
+ M{"tags": M{"front-end": false, "back-end": true}},
+ []string{"parentchart", "parentchart.subchart2", "parentchart.subchart2.subchartb", "parentchart.subchart2.subchartc"},
+ }, {
+ "tags disabling only children, children still enabled since tag front-end=true in values.yaml",
+ M{"tags": M{"subcharta": false, "subchartb": false}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb"},
+ }, {
+ "tags disabling all parents/children with additional tag re-enabling a parent",
+ M{"tags": M{"front-end": false, "subchart1": true, "back-end": false}},
+ []string{"parentchart", "parentchart.subchart1"},
+ }, {
+ "conditions enabling the parent charts, but back-end (b, c) is still disabled via values.yaml",
+ M{"subchart1": M{"enabled": true}, "subchart2": M{"enabled": true}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb", "parentchart.subchart2"},
+ }, {
+ "conditions disabling the parent charts, effectively disabling children",
+ M{"subchart1": M{"enabled": false}, "subchart2": M{"enabled": false}},
+ []string{"parentchart"},
+ }, {
+ "conditions a child using the second condition path of child's condition",
+ M{"subchart1": M{"subcharta": M{"enabled": false}}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subchartb"},
+ }, {
+ "tags enabling a parent/child group with condition disabling one child",
+ M{"subchart2": M{"subchartc": M{"enabled": false}}, "tags": M{"back-end": true}},
+ []string{"parentchart", "parentchart.subchart1", "parentchart.subchart1.subcharta", "parentchart.subchart1.subchartb", "parentchart.subchart2", "parentchart.subchart2.subchartb"},
+ }, {
+ "tags will not enable a child if parent is explicitly disabled with condition",
+ M{"subchart1": M{"enabled": false}, "tags": M{"front-end": true}},
+ []string{"parentchart"},
+ }, {
+ "subcharts with alias also respect conditions",
+ M{"subchart1": M{"enabled": false}, "subchart2alias": M{"enabled": true, "subchartb": M{"enabled": true}}},
+ []string{"parentchart", "parentchart.subchart2alias", "parentchart.subchart2alias.subchartb"},
+ }}
+
+ for _, tc := range tests {
+ c := loadChart(t, "testdata/subpop")
+ t.Run(tc.name, func(t *testing.T) {
+ if err := processDependencyEnabled(c, tc.v, ""); err != nil {
+ t.Fatalf("error processing enabled dependencies %v", err)
+ }
+
+ names := extractChartNames(c)
+ if len(names) != len(tc.e) {
+ t.Fatalf("slice lengths do not match got %v, expected %v", len(names), len(tc.e))
+ }
+ for i := range names {
+ if names[i] != tc.e[i] {
+ t.Fatalf("slice values do not match got %v, expected %v", names, tc.e)
+ }
+ }
+ })
+ }
+}
+
+// extractChartNames recursively searches chart dependencies returning all charts found
+func extractChartNames(c *chart.Chart) []string {
+ var out []string
+ var fn func(c *chart.Chart)
+ fn = func(c *chart.Chart) {
+ out = append(out, c.ChartPath())
+ for _, d := range c.Dependencies() {
+ fn(d)
+ }
+ }
+ fn(c)
+ sort.Strings(out)
+ return out
+}
+
+func TestProcessDependencyImportValues(t *testing.T) {
+ c := loadChart(t, "testdata/subpop")
+
+ e := make(map[string]string)
+
+ e["imported-chart1.SC1bool"] = "true"
+ e["imported-chart1.SC1float"] = "3.14"
+ e["imported-chart1.SC1int"] = "100"
+ e["imported-chart1.SC1string"] = "dollywood"
+ e["imported-chart1.SC1extra1"] = "11"
+ e["imported-chart1.SPextra1"] = "helm rocks"
+ e["imported-chart1.SC1extra1"] = "11"
+
+ e["imported-chartA.SCAbool"] = "false"
+ e["imported-chartA.SCAfloat"] = "3.1"
+ e["imported-chartA.SCAint"] = "55"
+ e["imported-chartA.SCAstring"] = "jabba"
+ e["imported-chartA.SPextra3"] = "1.337"
+ e["imported-chartA.SC1extra2"] = "1.337"
+ e["imported-chartA.SCAnested1.SCAnested2"] = "true"
+
+ e["imported-chartA-B.SCAbool"] = "false"
+ e["imported-chartA-B.SCAfloat"] = "3.1"
+ e["imported-chartA-B.SCAint"] = "55"
+ e["imported-chartA-B.SCAstring"] = "jabba"
+
+ e["imported-chartA-B.SCBbool"] = "true"
+ e["imported-chartA-B.SCBfloat"] = "7.77"
+ e["imported-chartA-B.SCBint"] = "33"
+ e["imported-chartA-B.SCBstring"] = "boba"
+ e["imported-chartA-B.SPextra5"] = "k8s"
+ e["imported-chartA-B.SC1extra5"] = "tiller"
+
+ // These values are imported from the child chart to the parent. Parent
+ // values take precedence over imported values. This enables importing a
+ // large section from a child chart and overriding a selection from it.
+ e["overridden-chart1.SC1bool"] = "false"
+ e["overridden-chart1.SC1float"] = "3.141592"
+ e["overridden-chart1.SC1int"] = "99"
+ e["overridden-chart1.SC1string"] = "pollywog"
+ e["overridden-chart1.SPextra2"] = "42"
+
+ e["overridden-chartA.SCAbool"] = "true"
+ e["overridden-chartA.SCAfloat"] = "41.3"
+ e["overridden-chartA.SCAint"] = "808"
+ e["overridden-chartA.SCAstring"] = "jabberwocky"
+ e["overridden-chartA.SPextra4"] = "true"
+
+ // These values are imported from the child chart to the parent. Parent
+ // values take precedence over imported values. This enables importing a
+ // large section from a child chart and overriding a selection from it.
+ e["overridden-chartA-B.SCAbool"] = "true"
+ e["overridden-chartA-B.SCAfloat"] = "41.3"
+ e["overridden-chartA-B.SCAint"] = "808"
+ e["overridden-chartA-B.SCAstring"] = "jabberwocky"
+ e["overridden-chartA-B.SCBbool"] = "false"
+ e["overridden-chartA-B.SCBfloat"] = "1.99"
+ e["overridden-chartA-B.SCBint"] = "77"
+ e["overridden-chartA-B.SCBstring"] = "jango"
+ e["overridden-chartA-B.SPextra6"] = "111"
+ e["overridden-chartA-B.SCAextra1"] = "23"
+ e["overridden-chartA-B.SCBextra1"] = "13"
+ e["overridden-chartA-B.SC1extra6"] = "77"
+
+ // `exports` style
+ e["SCBexported1B"] = "1965"
+ e["SC1extra7"] = "true"
+ e["SCBexported2A"] = "blaster"
+ e["global.SC1exported2.all.SC1exported3"] = "SC1expstr"
+
+ if err := processDependencyImportValues(c, false); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ cc := common.Values(c.Values)
+ for kk, vv := range e {
+ pv, err := cc.PathValue(kk)
+ if err != nil {
+ t.Fatalf("retrieving import values table %v %v", kk, err)
+ }
+
+ switch pv := pv.(type) {
+ case float64:
+ if s := strconv.FormatFloat(pv, 'f', -1, 64); s != vv {
+ t.Errorf("failed to match imported float value %v with expected %v for key %q", s, vv, kk)
+ }
+ case bool:
+ if b := strconv.FormatBool(pv); b != vv {
+ t.Errorf("failed to match imported bool value %v with expected %v for key %q", b, vv, kk)
+ }
+ default:
+ if pv != vv {
+ t.Errorf("failed to match imported string value %q with expected %q for key %q", pv, vv, kk)
+ }
+ }
+ }
+
+ // Since this was processed with coalescing there should be no null values.
+ // Here we verify that.
+ _, err := cc.PathValue("ensurenull")
+ if err == nil {
+ t.Error("expect nil value not found but found it")
+ }
+ switch xerr := err.(type) {
+ case common.ErrNoValue:
+ // We found what we expected
+ default:
+ t.Errorf("expected an ErrNoValue but got %q instead", xerr)
+ }
+
+ c = loadChart(t, "testdata/subpop")
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ cc = common.Values(c.Values)
+ val, err := cc.PathValue("ensurenull")
+ if err != nil {
+ t.Error("expect value but ensurenull was not found")
+ }
+ if val != nil {
+ t.Errorf("expect nil value but got %q instead", val)
+ }
+}
+
+func TestProcessDependencyImportValuesFromSharedDependencyToAliases(t *testing.T) {
+ c := loadChart(t, "testdata/chart-with-import-from-aliased-dependencies")
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ e := make(map[string]string)
+
+ e["foo-defaults.defaultValue"] = "42"
+ e["bar-defaults.defaultValue"] = "42"
+
+ e["foo.defaults.defaultValue"] = "42"
+ e["bar.defaults.defaultValue"] = "42"
+
+ e["foo.grandchild.defaults.defaultValue"] = "42"
+ e["bar.grandchild.defaults.defaultValue"] = "42"
+
+ cValues := common.Values(c.Values)
+ for kk, vv := range e {
+ pv, err := cValues.PathValue(kk)
+ if err != nil {
+ t.Fatalf("retrieving import values table %v %v", kk, err)
+ }
+ if pv != vv {
+ t.Errorf("failed to match imported value %v with expected %v", pv, vv)
+ }
+ }
+}
+
+func TestProcessDependencyImportValuesMultiLevelPrecedence(t *testing.T) {
+ c := loadChart(t, "testdata/three-level-dependent-chart/umbrella")
+
+ e := make(map[string]string)
+
+ // The order of precedence should be:
+ // 1. User specified values (e.g CLI)
+ // 2. Parent chart values
+ // 3. Imported values
+ // 4. Sub-chart values
+ // The 4 app charts here deal with things differently:
+ // - app1 has a port value set in the umbrella chart. It does not import any
+ // values so the value from the umbrella chart should be used.
+ // - app2 has a value in the app chart and imports from the library. The
+ // app chart value should take precedence.
+ // - app3 has no value in the app chart and imports the value from the library
+ // chart. The library chart value should be used.
+ // - app4 has a value in the app chart and does not import the value from the
+ // library chart. The app charts value should be used.
+ e["app1.service.port"] = "3456"
+ e["app2.service.port"] = "8080"
+ e["app3.service.port"] = "9090"
+ e["app4.service.port"] = "1234"
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+ cc := common.Values(c.Values)
+ for kk, vv := range e {
+ pv, err := cc.PathValue(kk)
+ if err != nil {
+ t.Fatalf("retrieving import values table %v %v", kk, err)
+ }
+
+ switch pv := pv.(type) {
+ case float64:
+ if s := strconv.FormatFloat(pv, 'f', -1, 64); s != vv {
+ t.Errorf("failed to match imported float value %v with expected %v", s, vv)
+ }
+ default:
+ if pv != vv {
+ t.Errorf("failed to match imported string value %q with expected %q", pv, vv)
+ }
+ }
+ }
+}
+
+func TestProcessDependencyImportValuesForEnabledCharts(t *testing.T) {
+ c := loadChart(t, "testdata/import-values-from-enabled-subchart/parent-chart")
+ nameOverride := "parent-chart-prod"
+
+ if err := processDependencyImportValues(c, true); err != nil {
+ t.Fatalf("processing import values dependencies %v", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 1 {
+ t.Fatal("expected no changes in dependencies")
+ }
+
+ if len(c.Metadata.Dependencies) != 1 {
+ t.Fatalf("expected 1 dependency specified in Chart.yaml, got %d", len(c.Metadata.Dependencies))
+ }
+
+ prodDependencyValues := c.Dependencies()[0].Values
+ if prodDependencyValues["nameOverride"] != nameOverride {
+ t.Fatalf("dependency chart name should be %s but got %s", nameOverride, prodDependencyValues["nameOverride"])
+ }
+}
+
+func TestGetAliasDependency(t *testing.T) {
+ c := loadChart(t, "testdata/frobnitz")
+ req := c.Metadata.Dependencies
+
+ if len(req) == 0 {
+ t.Fatalf("there are no dependencies to test")
+ }
+
+ // Success case
+ aliasChart := getAliasDependency(c.Dependencies(), req[0])
+ if aliasChart == nil {
+ t.Fatalf("failed to get dependency chart for alias %s", req[0].Name)
+ }
+ if req[0].Alias != "" {
+ if aliasChart.Name() != req[0].Alias {
+ t.Fatalf("dependency chart name should be %s but got %s", req[0].Alias, aliasChart.Name())
+ }
+ } else if aliasChart.Name() != req[0].Name {
+ t.Fatalf("dependency chart name should be %s but got %s", req[0].Name, aliasChart.Name())
+ }
+
+ if req[0].Version != "" {
+ if !IsCompatibleRange(req[0].Version, aliasChart.Metadata.Version) {
+ t.Fatalf("dependency chart version is not in the compatible range")
+ }
+ }
+
+ // Failure case
+ req[0].Name = "something-else"
+ if aliasChart := getAliasDependency(c.Dependencies(), req[0]); aliasChart != nil {
+ t.Fatalf("expected no chart but got %s", aliasChart.Name())
+ }
+
+ req[0].Version = "something else which is not in the compatible range"
+ if IsCompatibleRange(req[0].Version, aliasChart.Metadata.Version) {
+ t.Fatalf("dependency chart version which is not in the compatible range should cause a failure other than a success ")
+ }
+}
+
+func TestDependentChartAliases(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-alias")
+ req := c.Metadata.Dependencies
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 3 {
+ t.Fatal("expected alias dependencies to be added")
+ }
+
+ if len(c.Dependencies()) != len(c.Metadata.Dependencies) {
+ t.Fatalf("expected number of chart dependencies %d, but got %d", len(c.Metadata.Dependencies), len(c.Dependencies()))
+ }
+
+ aliasChart := getAliasDependency(c.Dependencies(), req[2])
+
+ if aliasChart == nil {
+ t.Fatalf("failed to get dependency chart for alias %s", req[2].Name)
+ }
+ if aliasChart.Parent() != c {
+ t.Fatalf("dependency chart has wrong parent, expected %s but got %s", c.Name(), aliasChart.Parent().Name())
+ }
+ if req[2].Alias != "" {
+ if aliasChart.Name() != req[2].Alias {
+ t.Fatalf("dependency chart name should be %s but got %s", req[2].Alias, aliasChart.Name())
+ }
+ } else if aliasChart.Name() != req[2].Name {
+ t.Fatalf("dependency chart name should be %s but got %s", req[2].Name, aliasChart.Name())
+ }
+
+ req[2].Name = "dummy-name"
+ if aliasChart := getAliasDependency(c.Dependencies(), req[2]); aliasChart != nil {
+ t.Fatalf("expected no chart but got %s", aliasChart.Name())
+ }
+
+}
+
+func TestDependentChartWithSubChartsAbsentInDependency(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-no-requirements-yaml")
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected no changes in dependencies")
+ }
+}
+
+func TestDependentChartWithSubChartsHelmignore(t *testing.T) {
+ // FIXME what does this test?
+ loadChart(t, "testdata/dependent-chart-helmignore")
+}
+
+func TestDependentChartsWithSubChartsSymlink(t *testing.T) {
+ joonix := filepath.Join("testdata", "joonix")
+ if err := os.Symlink(filepath.Join("..", "..", "frobnitz"), filepath.Join(joonix, "charts", "frobnitz")); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(filepath.Join(joonix, "charts", "frobnitz"))
+ c := loadChart(t, joonix)
+
+ if c.Name() != "joonix" {
+ t.Fatalf("unexpected chart name: %s", c.Name())
+ }
+ if n := len(c.Dependencies()); n != 1 {
+ t.Fatalf("expected 1 dependency for this chart, but got %d", n)
+ }
+}
+
+func TestDependentChartsWithSubchartsAllSpecifiedInDependency(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-with-all-in-requirements-yaml")
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected no changes in dependencies")
+ }
+
+ if len(c.Dependencies()) != len(c.Metadata.Dependencies) {
+ t.Fatalf("expected number of chart dependencies %d, but got %d", len(c.Metadata.Dependencies), len(c.Dependencies()))
+ }
+}
+
+func TestDependentChartsWithSomeSubchartsSpecifiedInDependency(t *testing.T) {
+ c := loadChart(t, "testdata/dependent-chart-with-mixed-requirements-yaml")
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatalf("expected 2 dependencies for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected no changes in dependencies")
+ }
+
+ if len(c.Metadata.Dependencies) != 1 {
+ t.Fatalf("expected 1 dependency specified in Chart.yaml, got %d", len(c.Metadata.Dependencies))
+ }
+}
+
+func validateDependencyTree(t *testing.T, c *chart.Chart) {
+ t.Helper()
+ for _, dependency := range c.Dependencies() {
+ if dependency.Parent() != c {
+ if dependency.Parent() != c {
+ t.Fatalf("dependency chart %s has wrong parent, expected %s but got %s", dependency.Name(), c.Name(), dependency.Parent().Name())
+ }
+ }
+ // recurse entire tree
+ validateDependencyTree(t, dependency)
+ }
+}
+
+func TestChartWithDependencyAliasedTwiceAndDoublyReferencedSubDependency(t *testing.T) {
+ c := loadChart(t, "testdata/chart-with-dependency-aliased-twice")
+
+ if len(c.Dependencies()) != 1 {
+ t.Fatalf("expected one dependency for this chart, but got %d", len(c.Dependencies()))
+ }
+
+ if err := processDependencyEnabled(c, c.Values, ""); err != nil {
+ t.Fatalf("expected no errors but got %q", err)
+ }
+
+ if len(c.Dependencies()) != 2 {
+ t.Fatal("expected two dependencies after processing aliases")
+ }
+ validateDependencyTree(t, c)
+}
diff --git a/helm/pkg/chart/v2/util/doc.go b/helm/pkg/chart/v2/util/doc.go
new file mode 100644
index 000000000..141062074
--- /dev/null
+++ b/helm/pkg/chart/v2/util/doc.go
@@ -0,0 +1,45 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+package util contains tools for working with charts.
+
+Charts are described in the chart package (pkg/chart).
+This package provides utilities for serializing and deserializing charts.
+
+A chart can be represented on the file system in one of two ways:
+
+ - As a directory that contains a Chart.yaml file and other chart things.
+ - As a tarred gzipped file containing a directory that then contains a
+ Chart.yaml file.
+
+This package provides utilities for working with those file formats.
+
+The preferred way of loading a chart is using 'loader.Load`:
+
+ chart, err := loader.Load(filename)
+
+This will attempt to discover whether the file at 'filename' is a directory or
+a chart archive. It will then load accordingly.
+
+For accepting raw compressed tar file data from an io.Reader, the
+'loader.LoadArchive()' will read in the data, uncompress it, and unpack it
+into a Chart.
+
+When creating charts in memory, use the 'helm.sh/helm/pkg/chart'
+package directly.
+*/
+package util // import chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
diff --git a/helm/pkg/chart/v2/util/expand.go b/helm/pkg/chart/v2/util/expand.go
new file mode 100644
index 000000000..077dfbf38
--- /dev/null
+++ b/helm/pkg/chart/v2/util/expand.go
@@ -0,0 +1,94 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ securejoin "github.com/cyphar/filepath-securejoin"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/chart/loader/archive"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+// Expand uncompresses and extracts a chart into the specified directory.
+func Expand(dir string, r io.Reader) error {
+ files, err := archive.LoadArchiveFiles(r)
+ if err != nil {
+ return err
+ }
+
+ // Get the name of the chart
+ var chartName string
+ for _, file := range files {
+ if file.Name == "Chart.yaml" {
+ ch := &chart.Metadata{}
+ if err := yaml.Unmarshal(file.Data, ch); err != nil {
+ return fmt.Errorf("cannot load Chart.yaml: %w", err)
+ }
+ chartName = ch.Name
+ }
+ }
+ if chartName == "" {
+ return errors.New("chart name not specified")
+ }
+
+ // Find the base directory
+ // The directory needs to be cleaned prior to passing to SecureJoin or the location may end up
+ // being wrong or returning an error. This was introduced in v0.4.0.
+ dir = filepath.Clean(dir)
+ chartdir, err := securejoin.SecureJoin(dir, chartName)
+ if err != nil {
+ return err
+ }
+
+ // Copy all files verbatim. We don't parse these files because parsing can remove
+ // comments.
+ for _, file := range files {
+ outpath, err := securejoin.SecureJoin(chartdir, file.Name)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the necessary subdirs get created.
+ basedir := filepath.Dir(outpath)
+ if err := os.MkdirAll(basedir, 0755); err != nil {
+ return err
+ }
+
+ if err := os.WriteFile(outpath, file.Data, 0644); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ExpandFile expands the src file into the dest directory.
+func ExpandFile(dest, src string) error {
+ h, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer h.Close()
+ return Expand(dest, h)
+}
diff --git a/helm/pkg/chart/v2/util/expand_test.go b/helm/pkg/chart/v2/util/expand_test.go
new file mode 100644
index 000000000..280995f7e
--- /dev/null
+++ b/helm/pkg/chart/v2/util/expand_test.go
@@ -0,0 +1,124 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestExpand(t *testing.T) {
+ dest := t.TempDir()
+
+ reader, err := os.Open("testdata/frobnitz-1.2.3.tgz")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := Expand(dest, reader); err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChartPath := filepath.Join(dest, "frobnitz")
+ fi, err := os.Stat(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !fi.IsDir() {
+ t.Fatalf("expected a chart directory at %s", expectedChartPath)
+ }
+
+ dir, err := os.Open(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fis, err := dir.Readdir(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectLen := 11
+ if len(fis) != expectLen {
+ t.Errorf("Expected %d files, but got %d", expectLen, len(fis))
+ }
+
+ for _, fi := range fis {
+ expect, err := os.Stat(filepath.Join("testdata", "frobnitz", fi.Name()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // os.Stat can return different values for directories, based on the OS
+ // for Linux, for example, os.Stat always returns the size of the directory
+ // (value-4096) regardless of the size of the contents of the directory
+ mode := expect.Mode()
+ if !mode.IsDir() {
+ if fi.Size() != expect.Size() {
+ t.Errorf("Expected %s to have size %d, got %d", fi.Name(), expect.Size(), fi.Size())
+ }
+ }
+ }
+}
+
+func TestExpandFile(t *testing.T) {
+ dest := t.TempDir()
+
+ if err := ExpandFile(dest, "testdata/frobnitz-1.2.3.tgz"); err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChartPath := filepath.Join(dest, "frobnitz")
+ fi, err := os.Stat(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !fi.IsDir() {
+ t.Fatalf("expected a chart directory at %s", expectedChartPath)
+ }
+
+ dir, err := os.Open(expectedChartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fis, err := dir.Readdir(0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectLen := 11
+ if len(fis) != expectLen {
+ t.Errorf("Expected %d files, but got %d", expectLen, len(fis))
+ }
+
+ for _, fi := range fis {
+ expect, err := os.Stat(filepath.Join("testdata", "frobnitz", fi.Name()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // os.Stat can return different values for directories, based on the OS
+ // for Linux, for example, os.Stat always returns the size of the directory
+ // (value-4096) regardless of the size of the contents of the directory
+ mode := expect.Mode()
+ if !mode.IsDir() {
+ if fi.Size() != expect.Size() {
+ t.Errorf("Expected %s to have size %d, got %d", fi.Name(), expect.Size(), fi.Size())
+ }
+ }
+ }
+}
diff --git a/helm/pkg/chart/v2/util/save.go b/helm/pkg/chart/v2/util/save.go
new file mode 100644
index 000000000..e66d86991
--- /dev/null
+++ b/helm/pkg/chart/v2/util/save.go
@@ -0,0 +1,269 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "archive/tar"
+ "compress/gzip"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "time"
+
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+var headerBytes = []byte("+aHR0cHM6Ly95b3V0dS5iZS96OVV6MWljandyTQo=")
+
+// SaveDir saves a chart as files in a directory.
+//
+// This takes the chart name, and creates a new subdirectory inside of the given dest
+// directory, writing the chart's contents to that subdirectory.
+func SaveDir(c *chart.Chart, dest string) error {
+ // Create the chart directory
+ err := validateName(c.Name())
+ if err != nil {
+ return err
+ }
+ outdir := filepath.Join(dest, c.Name())
+ if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() {
+ return fmt.Errorf("file %s already exists and is not a directory", outdir)
+ }
+ if err := os.MkdirAll(outdir, 0755); err != nil {
+ return err
+ }
+
+ // Save the chart file.
+ if err := SaveChartfile(filepath.Join(outdir, ChartfileName), c.Metadata); err != nil {
+ return err
+ }
+
+ // Save values.yaml
+ for _, f := range c.Raw {
+ if f.Name == ValuesfileName {
+ vf := filepath.Join(outdir, ValuesfileName)
+ if err := writeFile(vf, f.Data); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save values.schema.json if it exists
+ if c.Schema != nil {
+ filename := filepath.Join(outdir, SchemafileName)
+ if err := writeFile(filename, c.Schema); err != nil {
+ return err
+ }
+ }
+
+ // Save templates and files
+ for _, o := range [][]*common.File{c.Templates, c.Files} {
+ for _, f := range o {
+ n := filepath.Join(outdir, f.Name)
+ if err := writeFile(n, f.Data); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save dependencies
+ base := filepath.Join(outdir, ChartsDir)
+ for _, dep := range c.Dependencies() {
+ // Here, we write each dependency as a tar file.
+ if _, err := Save(dep, base); err != nil {
+ return fmt.Errorf("saving %s: %w", dep.ChartFullPath(), err)
+ }
+ }
+ return nil
+}
+
+// Save creates an archived chart to the given directory.
+//
+// This takes an existing chart and a destination directory.
+//
+// If the directory is /foo, and the chart is named bar, with version 1.0.0, this
+// will generate /foo/bar-1.0.0.tgz.
+//
+// This returns the absolute path to the chart archive file.
+func Save(c *chart.Chart, outDir string) (string, error) {
+ if err := c.Validate(); err != nil {
+ return "", fmt.Errorf("chart validation: %w", err)
+ }
+
+ filename := fmt.Sprintf("%s-%s.tgz", c.Name(), c.Metadata.Version)
+ filename = filepath.Join(outDir, filename)
+ dir := filepath.Dir(filename)
+ if stat, err := os.Stat(dir); err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ if err2 := os.MkdirAll(dir, 0755); err2 != nil {
+ return "", err2
+ }
+ } else {
+ return "", fmt.Errorf("stat %s: %w", dir, err)
+ }
+ } else if !stat.IsDir() {
+ return "", fmt.Errorf("is not a directory: %s", dir)
+ }
+
+ f, err := os.Create(filename)
+ if err != nil {
+ return "", err
+ }
+
+ // Wrap in gzip writer
+ zipper := gzip.NewWriter(f)
+ zipper.Extra = headerBytes
+ zipper.Comment = "Helm"
+
+ // Wrap in tar writer
+ twriter := tar.NewWriter(zipper)
+ rollback := false
+ defer func() {
+ twriter.Close()
+ zipper.Close()
+ f.Close()
+ if rollback {
+ os.Remove(filename)
+ }
+ }()
+
+ if err := writeTarContents(twriter, c, ""); err != nil {
+ rollback = true
+ return filename, err
+ }
+ return filename, nil
+}
+
+func writeTarContents(out *tar.Writer, c *chart.Chart, prefix string) error {
+ err := validateName(c.Name())
+ if err != nil {
+ return err
+ }
+ base := filepath.Join(prefix, c.Name())
+
+ // Pull out the dependencies of a v1 Chart, since there's no way
+ // to tell the serializer to skip a field for just this use case
+ savedDependencies := c.Metadata.Dependencies
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ c.Metadata.Dependencies = nil
+ }
+ // Save Chart.yaml
+ cdata, err := yaml.Marshal(c.Metadata)
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ c.Metadata.Dependencies = savedDependencies
+ }
+ if err != nil {
+ return err
+ }
+ if err := writeToTar(out, filepath.Join(base, ChartfileName), cdata, c.ModTime); err != nil {
+ return err
+ }
+
+ // Save Chart.lock
+ // TODO: remove the APIVersion check when APIVersionV1 is not used anymore
+ if c.Metadata.APIVersion == chart.APIVersionV2 {
+ if c.Lock != nil {
+ ldata, err := yaml.Marshal(c.Lock)
+ if err != nil {
+ return err
+ }
+ if err := writeToTar(out, filepath.Join(base, "Chart.lock"), ldata, c.Lock.Generated); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save values.yaml
+ for _, f := range c.Raw {
+ if f.Name == ValuesfileName {
+ if err := writeToTar(out, filepath.Join(base, ValuesfileName), f.Data, f.ModTime); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Save values.schema.json if it exists
+ if c.Schema != nil {
+ if !json.Valid(c.Schema) {
+ return errors.New("invalid JSON in " + SchemafileName)
+ }
+ if err := writeToTar(out, filepath.Join(base, SchemafileName), c.Schema, c.SchemaModTime); err != nil {
+ return err
+ }
+ }
+
+ // Save templates
+ for _, f := range c.Templates {
+ n := filepath.Join(base, f.Name)
+ if err := writeToTar(out, n, f.Data, f.ModTime); err != nil {
+ return err
+ }
+ }
+
+ // Save files
+ for _, f := range c.Files {
+ n := filepath.Join(base, f.Name)
+ if err := writeToTar(out, n, f.Data, f.ModTime); err != nil {
+ return err
+ }
+ }
+
+ // Save dependencies
+ for _, dep := range c.Dependencies() {
+ if err := writeTarContents(out, dep, filepath.Join(base, ChartsDir)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// writeToTar writes a single file to a tar archive.
+func writeToTar(out *tar.Writer, name string, body []byte, modTime time.Time) error {
+ // TODO: Do we need to create dummy parent directory names if none exist?
+ h := &tar.Header{
+ Name: filepath.ToSlash(name),
+ Mode: 0644,
+ Size: int64(len(body)),
+ ModTime: modTime,
+ }
+ if h.ModTime.IsZero() {
+ h.ModTime = time.Now()
+ }
+ if err := out.WriteHeader(h); err != nil {
+ return err
+ }
+ _, err := out.Write(body)
+ return err
+}
+
+// If the name has directory name has characters which would change the location
+// they need to be removed.
+func validateName(name string) error {
+ nname := filepath.Base(name)
+
+ if nname != name {
+ return common.ErrInvalidChartName{Name: name}
+ }
+
+ return nil
+}
diff --git a/helm/pkg/chart/v2/util/save_test.go b/helm/pkg/chart/v2/util/save_test.go
new file mode 100644
index 000000000..6d4e2c8cd
--- /dev/null
+++ b/helm/pkg/chart/v2/util/save_test.go
@@ -0,0 +1,361 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+)
+
+func TestSave(t *testing.T) {
+ tmp := t.TempDir()
+
+ for _, dest := range []string{tmp, filepath.Join(tmp, "newdir")} {
+ t.Run("outDir="+dest, func(t *testing.T) {
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV1,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ Lock: &chart.Lock{
+ Digest: "testdigest",
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: time.Now(), Data: []byte("1,001 Nights")},
+ },
+ Schema: []byte("{\n \"title\": \"Values\"\n}"),
+ }
+ chartWithInvalidJSON := withSchema(*c, []byte("{"))
+
+ where, err := Save(c, dest)
+ if err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+ if !strings.HasPrefix(where, dest) {
+ t.Fatalf("Expected %q to start with %q", where, dest)
+ }
+ if !strings.HasSuffix(where, ".tgz") {
+ t.Fatalf("Expected %q to end with .tgz", where)
+ }
+
+ c2, err := loader.LoadFile(where)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c2.Name() != c.Name() {
+ t.Fatalf("Expected chart archive to have %q, got %q", c.Name(), c2.Name())
+ }
+ if len(c2.Files) != 1 || c2.Files[0].Name != "scheherazade/shahryar.txt" {
+ t.Fatal("Files data did not match")
+ }
+ if c2.Lock != nil {
+ t.Fatal("Expected v1 chart archive not to contain Chart.lock file")
+ }
+
+ if !bytes.Equal(c.Schema, c2.Schema) {
+ indentation := 4
+ formattedExpected := Indent(indentation, string(c.Schema))
+ formattedActual := Indent(indentation, string(c2.Schema))
+ t.Fatalf("Schema data did not match.\nExpected:\n%s\nActual:\n%s", formattedExpected, formattedActual)
+ }
+ if _, err := Save(&chartWithInvalidJSON, dest); err == nil {
+ t.Fatalf("Invalid JSON was not caught while saving chart")
+ }
+
+ c.Metadata.APIVersion = chart.APIVersionV2
+ where, err = Save(c, dest)
+ if err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+ c2, err = loader.LoadFile(where)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c2.Lock == nil {
+ t.Fatal("Expected v2 chart archive to contain a Chart.lock file")
+ }
+ if c2.Lock.Digest != c.Lock.Digest {
+ t.Fatal("Chart.lock data did not match")
+ }
+ })
+ }
+
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV1,
+ Name: "../ahab",
+ Version: "1.2.3",
+ },
+ Lock: &chart.Lock{
+ Digest: "testdigest",
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: time.Now(), Data: []byte("1,001 Nights")},
+ },
+ }
+ _, err := Save(c, tmp)
+ if err == nil {
+ t.Fatal("Expected error saving chart with invalid name")
+ }
+}
+
+// Creates a copy with a different schema; does not modify anything.
+func withSchema(chart chart.Chart, schema []byte) chart.Chart {
+ chart.Schema = schema
+ return chart
+}
+
+func Indent(n int, text string) string {
+ startOfLine := regexp.MustCompile(`(?m)^`)
+ indentation := strings.Repeat(" ", n)
+ return startOfLine.ReplaceAllLiteralString(text, indentation)
+}
+
+func TestSavePreservesTimestamps(t *testing.T) {
+ // Test executes so quickly that if we don't subtract a second, the
+ // check will fail because `initialCreateTime` will be identical to the
+ // written timestamp for the files.
+ initialCreateTime := time.Now().Add(-1 * time.Second)
+
+ tmp := t.TempDir()
+
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV1,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ ModTime: initialCreateTime,
+ Values: map[string]interface{}{
+ "imageName": "testimage",
+ "imageId": 42,
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: initialCreateTime, Data: []byte("1,001 Nights")},
+ },
+ Schema: []byte("{\n \"title\": \"Values\"\n}"),
+ SchemaModTime: initialCreateTime,
+ }
+
+ where, err := Save(c, tmp)
+ if err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+
+ allHeaders, err := retrieveAllHeadersFromTar(where)
+ if err != nil {
+ t.Fatalf("Failed to parse tar: %v", err)
+ }
+
+ roundedTime := initialCreateTime.Round(time.Second)
+ for _, header := range allHeaders {
+ if !header.ModTime.Equal(roundedTime) {
+ t.Fatalf("File timestamp not preserved: %v", header.ModTime)
+ }
+ }
+}
+
+// We could refactor `load.go` to use this `retrieveAllHeadersFromTar` function
+// as well, so we are not duplicating components of the code which iterate
+// through the tar.
+func retrieveAllHeadersFromTar(path string) ([]*tar.Header, error) {
+ raw, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer raw.Close()
+
+ unzipped, err := gzip.NewReader(raw)
+ if err != nil {
+ return nil, err
+ }
+ defer unzipped.Close()
+
+ tr := tar.NewReader(unzipped)
+ headers := []*tar.Header{}
+ for {
+ hd, err := tr.Next()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ headers = append(headers, hd)
+ }
+
+ return headers, nil
+}
+
+func TestSaveDir(t *testing.T) {
+ tmp := t.TempDir()
+
+ modTime := time.Now()
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV1,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: modTime, Data: []byte("1,001 Nights")},
+ },
+ Templates: []*common.File{
+ {Name: path.Join(TemplatesDir, "nested", "dir", "thing.yaml"), ModTime: modTime, Data: []byte("abc: {{ .Values.abc }}")},
+ },
+ }
+
+ if err := SaveDir(c, tmp); err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+
+ c2, err := loader.LoadDir(tmp + "/ahab")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if c2.Name() != c.Name() {
+ t.Fatalf("Expected chart archive to have %q, got %q", c.Name(), c2.Name())
+ }
+
+ if len(c2.Templates) != 1 || c2.Templates[0].Name != c.Templates[0].Name {
+ t.Fatal("Templates data did not match")
+ }
+
+ if len(c2.Files) != 1 || c2.Files[0].Name != c.Files[0].Name {
+ t.Fatal("Files data did not match")
+ }
+
+ tmp2 := t.TempDir()
+ c.Metadata.Name = "../ahab"
+ pth := filepath.Join(tmp2, "tmpcharts")
+ if err := os.MkdirAll(filepath.Join(pth), 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := SaveDir(c, pth); err.Error() != "\"../ahab\" is not a valid chart name" {
+ t.Fatalf("Did not get expected error for chart named %q", c.Name())
+ }
+}
+
+func TestRepeatableSave(t *testing.T) {
+ tmp := t.TempDir()
+ defer os.RemoveAll(tmp)
+ modTime := time.Date(2021, 9, 1, 20, 34, 58, 651387237, time.UTC)
+ tests := []struct {
+ name string
+ chart *chart.Chart
+ want string
+ }{
+ {
+ name: "Package 1 file",
+ chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV2,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ ModTime: modTime,
+ Lock: &chart.Lock{
+ Digest: "testdigest",
+ Generated: modTime,
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: modTime, Data: []byte("1,001 Nights")},
+ },
+ Schema: []byte("{\n \"title\": \"Values\"\n}"),
+ SchemaModTime: modTime,
+ },
+ want: "fea2662522317b65c2788ff9e5fc446a9264830038dac618d4449493d99b3257",
+ },
+ {
+ name: "Package 2 files",
+ chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV2,
+ Name: "ahab",
+ Version: "1.2.3",
+ },
+ ModTime: modTime,
+ Lock: &chart.Lock{
+ Digest: "testdigest",
+ Generated: modTime,
+ },
+ Files: []*common.File{
+ {Name: "scheherazade/shahryar.txt", ModTime: modTime, Data: []byte("1,001 Nights")},
+ {Name: "scheherazade/dunyazad.txt", ModTime: modTime, Data: []byte("1,001 Nights again")},
+ },
+ Schema: []byte("{\n \"title\": \"Values\"\n}"),
+ SchemaModTime: modTime,
+ },
+ want: "7ae92b2f274bb51ea3f1969e4187d78cc52b5f6f663b44b8fb3b40bcb8ee46f3",
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ // create package
+ dest := path.Join(tmp, "newdir")
+ where, err := Save(test.chart, dest)
+ if err != nil {
+ t.Fatalf("Failed to save: %s", err)
+ }
+ // get shasum for package
+ result, err := sha256Sum(where)
+ if err != nil {
+ t.Fatalf("Failed to check shasum: %s", err)
+ }
+ // assert that the package SHA is what we wanted.
+ if result != test.want {
+ t.Errorf("FormatName() result = %v, want %v", result, test.want)
+ }
+ })
+ }
+}
+
+func sha256Sum(filePath string) (string, error) {
+ f, err := os.Open(filePath)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ h := sha256.New()
+ if _, err := io.Copy(h, f); err != nil {
+ return "", err
+ }
+
+ return fmt.Sprintf("%x", h.Sum(nil)), nil
+}
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml
new file mode 100644
index 000000000..d778f8fe9
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/Chart.yaml
@@ -0,0 +1,14 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: chart-with-dependency-aliased-twice
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: child
+ alias: foo
+ version: 1.0.0
+ - name: child
+ alias: bar
+ version: 1.0.0
+
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml
new file mode 100644
index 000000000..220fda663
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: child
+type: application
+version: 1.0.0
+
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml
new file mode 100644
index 000000000..50e620a8d
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: grandchild
+type: application
+version: 1.0.0
+
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml
new file mode 100644
index 000000000..1830492ef
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/charts/grandchild/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}-{{ .Values.from }}
+data:
+ {{- toYaml .Values | nindent 2 }}
+
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml
new file mode 100644
index 000000000..b5d55af7c
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/charts/child/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{- toYaml .Values | nindent 2 }}
+
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/values.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/values.yaml
new file mode 100644
index 000000000..695521a4a
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-dependency-aliased-twice/values.yaml
@@ -0,0 +1,7 @@
+foo:
+ grandchild:
+ from: foo
+bar:
+ grandchild:
+ from: bar
+
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml
new file mode 100644
index 000000000..c408f0ca8
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/Chart.yaml
@@ -0,0 +1,20 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: chart-with-dependency-aliased-twice
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: child
+ alias: foo
+ version: 1.0.0
+ import-values:
+ - parent: foo-defaults
+ child: defaults
+ - name: child
+ alias: bar
+ version: 1.0.0
+ import-values:
+ - parent: bar-defaults
+ child: defaults
+
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml
new file mode 100644
index 000000000..ecdaf04dc
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/Chart.yaml
@@ -0,0 +1,12 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: child
+type: application
+version: 1.0.0
+
+dependencies:
+ - name: grandchild
+ version: 1.0.0
+ import-values:
+ - parent: defaults
+ child: defaults
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml
new file mode 100644
index 000000000..50e620a8d
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+appVersion: 1.0.0
+name: grandchild
+type: application
+version: 1.0.0
+
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml
new file mode 100644
index 000000000..f51c594f4
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/charts/grandchild/values.yaml
@@ -0,0 +1,2 @@
+defaults:
+ defaultValue: "42"
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml
new file mode 100644
index 000000000..3140f53dd
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/charts/child/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{ .Values.defaults | toYaml }}
+
diff --git a/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml
new file mode 100644
index 000000000..a2b62c95a
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chart-with-import-from-aliased-dependencies/templates/dummy.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}
+data:
+ {{ toYaml .Values.defaults | indent 2 }}
+
diff --git a/helm/pkg/chart/v2/util/testdata/chartfiletest.yaml b/helm/pkg/chart/v2/util/testdata/chartfiletest.yaml
new file mode 100644
index 000000000..134cd1109
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/chartfiletest.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/.helmignore b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/Chart.lock b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/Chart.yaml
new file mode 100644
index 000000000..751a3aa67
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/Chart.yaml
@@ -0,0 +1,29 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+ alias: mariners2
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+ alias: mariners1
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/INSTALL.txt b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/LICENSE b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/_ignore_me b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/mariner-4.3.2.tgz b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/charts/mariner-4.3.2.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/docs/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/icon.svg b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/ignore/me.txt b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/templates/template.tpl b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-alias/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/.helmignore b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/.helmignore
new file mode 100644
index 000000000..8a71bc82e
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/.helmignore
@@ -0,0 +1,2 @@
+ignore/
+.*
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/Chart.yaml
new file mode 100644
index 000000000..7c071c27b
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/.ignore_me b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/.ignore_me
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/_ignore_me b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/templates/template.tpl b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-helmignore/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/.helmignore b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/Chart.yaml
new file mode 100644
index 000000000..7c071c27b
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/INSTALL.txt b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/LICENSE b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/_ignore_me b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/mariner-4.3.2.tgz b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/charts/mariner-4.3.2.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/docs/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/icon.svg b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/ignore/me.txt b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/templates/template.tpl b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-no-requirements-yaml/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/.helmignore b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/Chart.yaml
new file mode 100644
index 000000000..fe7a99681
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/Chart.yaml
@@ -0,0 +1,24 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/INSTALL.txt b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/LICENSE b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/_ignore_me b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/mariner-4.3.2.tgz b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/charts/mariner-4.3.2.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/docs/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/icon.svg b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/ignore/me.txt b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/templates/template.tpl b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-all-in-requirements-yaml/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/.helmignore b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/Chart.yaml
new file mode 100644
index 000000000..7fc39e28d
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/INSTALL.txt b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/LICENSE b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/_ignore_me b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/mariner-4.3.2.tgz b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/mariner-4.3.2.tgz
new file mode 100644
index 000000000..3190136b0
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/charts/mariner-4.3.2.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/docs/README.md b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/icon.svg b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/ignore/me.txt b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/templates/template.tpl b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/values.yaml b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/dependent-chart-with-mixed-requirements-yaml/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz-1.2.3.tgz b/helm/pkg/chart/v2/util/testdata/frobnitz-1.2.3.tgz
new file mode 100644
index 000000000..8731dce02
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/frobnitz-1.2.3.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/.helmignore b/helm/pkg/chart/v2/util/testdata/frobnitz/.helmignore
new file mode 100644
index 000000000..9973a57b8
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/.helmignore
@@ -0,0 +1 @@
+ignore/
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/Chart.lock b/helm/pkg/chart/v2/util/testdata/frobnitz/Chart.lock
new file mode 100644
index 000000000..6fcc2ed9f
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/Chart.lock
@@ -0,0 +1,8 @@
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
+digest: invalid
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/Chart.yaml b/helm/pkg/chart/v2/util/testdata/frobnitz/Chart.yaml
new file mode 100644
index 000000000..fcd4a4a37
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/Chart.yaml
@@ -0,0 +1,27 @@
+apiVersion: v1
+name: frobnitz
+description: This is a frobnitz.
+version: "1.2.3"
+keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+maintainers:
+ - name: The Helm Team
+ email: helm@example.com
+ - name: Someone Else
+ email: nobody@example.com
+sources:
+ - https://example.com/foo/bar
+home: http://example.com
+icon: https://example.com/64x64.png
+annotations:
+ extrakey: extravalue
+ anotherkey: anothervalue
+dependencies:
+ - name: alpine
+ version: "0.1.0"
+ repository: https://example.com/charts
+ - name: mariner
+ version: "4.3.2"
+ repository: https://example.com/charts
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/INSTALL.txt b/helm/pkg/chart/v2/util/testdata/frobnitz/INSTALL.txt
new file mode 100644
index 000000000..2010438c2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/INSTALL.txt
@@ -0,0 +1 @@
+This is an install document. The client may display this.
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/LICENSE b/helm/pkg/chart/v2/util/testdata/frobnitz/LICENSE
new file mode 100644
index 000000000..6121943b1
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/LICENSE
@@ -0,0 +1 @@
+LICENSE placeholder.
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/README.md b/helm/pkg/chart/v2/util/testdata/frobnitz/README.md
new file mode 100644
index 000000000..8cf4cc3d7
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/README.md
@@ -0,0 +1,11 @@
+# Frobnitz
+
+This is an example chart.
+
+## Usage
+
+This is an example. It has no usage.
+
+## Development
+
+For developer info, see the top-level repository.
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/_ignore_me b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/_ignore_me
new file mode 100644
index 000000000..2cecca682
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/_ignore_me
@@ -0,0 +1 @@
+This should be ignored by the loader, but may be included in a chart.
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/Chart.yaml b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/Chart.yaml
new file mode 100644
index 000000000..79e0d65db
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: alpine
+description: Deploy a basic Alpine Linux pod
+version: 0.1.0
+home: https://helm.sh/helm
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/README.md b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/README.md
new file mode 100644
index 000000000..b30b949dd
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.toml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
new file mode 100644
index 000000000..1c9dd5fa4
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/charts/mast1/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: mast1
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
new file mode 100644
index 000000000..42c39c262
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/charts/mast1/values.yaml
@@ -0,0 +1,4 @@
+# Default values for mast1.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name = "value"
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz
new file mode 100644
index 000000000..61cb62051
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/charts/mast2-0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/values.yaml b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/values.yaml
new file mode 100644
index 000000000..6c2aab7ba
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: "my-alpine"
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/Chart.yaml b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/Chart.yaml
new file mode 100644
index 000000000..92dc4b390
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/Chart.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+name: mariner
+description: A Helm chart for Kubernetes
+version: 4.3.2
+home: ""
+dependencies:
+ - name: albatross
+ repository: https://example.com/mariner/charts
+ version: "0.1.0"
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/charts/albatross/Chart.yaml b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/charts/albatross/Chart.yaml
new file mode 100644
index 000000000..b5188fde0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/charts/albatross/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+name: albatross
+description: A Helm chart for Kubernetes
+version: 0.1.0
+home: ""
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/charts/albatross/values.yaml b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/charts/albatross/values.yaml
new file mode 100644
index 000000000..3121cd7ce
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/charts/albatross/values.yaml
@@ -0,0 +1,4 @@
+albatross: "true"
+
+global:
+ author: Coleridge
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/templates/placeholder.tpl b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/templates/placeholder.tpl
new file mode 100644
index 000000000..29c11843a
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/templates/placeholder.tpl
@@ -0,0 +1 @@
+# This is a placeholder.
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/values.yaml b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/values.yaml
new file mode 100644
index 000000000..b0ccb0086
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/charts/mariner/values.yaml
@@ -0,0 +1,7 @@
+# Default values for .
+# This is a YAML-formatted file. https://github.com/toml-lang/toml
+# Declare name/value pairs to be passed into your templates.
+# name: "value"
+
+:
+ test: true
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/docs/README.md b/helm/pkg/chart/v2/util/testdata/frobnitz/docs/README.md
new file mode 100644
index 000000000..d40747caf
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/docs/README.md
@@ -0,0 +1 @@
+This is a placeholder for documentation.
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/icon.svg b/helm/pkg/chart/v2/util/testdata/frobnitz/icon.svg
new file mode 100644
index 000000000..892130606
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/icon.svg
@@ -0,0 +1,8 @@
+
+
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/ignore/me.txt b/helm/pkg/chart/v2/util/testdata/frobnitz/ignore/me.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/templates/template.tpl b/helm/pkg/chart/v2/util/testdata/frobnitz/templates/template.tpl
new file mode 100644
index 000000000..c651ee6a0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/templates/template.tpl
@@ -0,0 +1 @@
+Hello {{.Name | default "world"}}
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz/values.yaml b/helm/pkg/chart/v2/util/testdata/frobnitz/values.yaml
new file mode 100644
index 000000000..61f501258
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/frobnitz/values.yaml
@@ -0,0 +1,6 @@
+# A values file contains configuration.
+
+name: "Some Name"
+
+section:
+ name: "Name in a section"
diff --git a/helm/pkg/chart/v2/util/testdata/frobnitz_backslash-1.2.3.tgz b/helm/pkg/chart/v2/util/testdata/frobnitz_backslash-1.2.3.tgz
new file mode 100644
index 000000000..692965951
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/frobnitz_backslash-1.2.3.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/genfrob.sh b/helm/pkg/chart/v2/util/testdata/genfrob.sh
new file mode 100755
index 000000000..35fdd59f2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/genfrob.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+# Pack the albatross chart into the mariner chart.
+echo "Packing albatross into mariner"
+tar -zcvf mariner/charts/albatross-0.1.0.tgz albatross
+
+echo "Packing mariner into frobnitz"
+tar -zcvf frobnitz/charts/mariner-4.3.2.tgz mariner
+tar -zcvf frobnitz_backslash/charts/mariner-4.3.2.tgz mariner
+
+# Pack the frobnitz chart.
+echo "Packing frobnitz"
+tar --exclude=ignore/* -zcvf frobnitz-1.2.3.tgz frobnitz
+tar --exclude=ignore/* -zcvf frobnitz_backslash-1.2.3.tgz frobnitz_backslash
diff --git a/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.lock b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.lock
new file mode 100644
index 000000000..b2f17fb39
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.lock
@@ -0,0 +1,9 @@
+dependencies:
+- name: dev
+ repository: file://envs/dev
+ version: v0.1.0
+- name: prod
+ repository: file://envs/prod
+ version: v0.1.0
+digest: sha256:9403fc24f6cf9d6055820126cf7633b4bd1fed3c77e4880c674059f536346182
+generated: "2020-02-03T10:38:51.180474+01:00"
diff --git a/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.yaml b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.yaml
new file mode 100644
index 000000000..24b26d9e5
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/Chart.yaml
@@ -0,0 +1,22 @@
+apiVersion: v2
+name: parent-chart
+version: v0.1.0
+appVersion: v0.1.0
+dependencies:
+ - name: dev
+ repository: "file://envs/dev"
+ version: ">= 0.0.1"
+ condition: dev.enabled,global.dev.enabled
+ tags:
+ - dev
+ import-values:
+ - data
+
+ - name: prod
+ repository: "file://envs/prod"
+ version: ">= 0.0.1"
+ condition: prod.enabled,global.prod.enabled
+ tags:
+ - prod
+ import-values:
+ - data
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/dev-v0.1.0.tgz b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/dev-v0.1.0.tgz
new file mode 100644
index 000000000..d28e1621c
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/dev-v0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/prod-v0.1.0.tgz b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/prod-v0.1.0.tgz
new file mode 100644
index 000000000..a0c5aa84b
Binary files /dev/null and b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/charts/prod-v0.1.0.tgz differ
diff --git a/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/Chart.yaml b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/Chart.yaml
new file mode 100644
index 000000000..80a52f538
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v2
+name: dev
+version: v0.1.0
+appVersion: v0.1.0
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/values.yaml b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/values.yaml
new file mode 100644
index 000000000..38f03484d
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/dev/values.yaml
@@ -0,0 +1,9 @@
+# Dev values parent-chart
+nameOverride: parent-chart-dev
+exports:
+ data:
+ resources:
+ autoscaler:
+ minReplicas: 1
+ maxReplicas: 3
+ targetCPUUtilizationPercentage: 80
diff --git a/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/Chart.yaml b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/Chart.yaml
new file mode 100644
index 000000000..bda4be458
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v2
+name: prod
+version: v0.1.0
+appVersion: v0.1.0
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/values.yaml b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/values.yaml
new file mode 100644
index 000000000..10cc756b2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/envs/prod/values.yaml
@@ -0,0 +1,9 @@
+# Prod values parent-chart
+nameOverride: parent-chart-prod
+exports:
+ data:
+ resources:
+ autoscaler:
+ minReplicas: 2
+ maxReplicas: 5
+ targetCPUUtilizationPercentage: 90
diff --git a/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/templates/autoscaler.yaml b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/templates/autoscaler.yaml
new file mode 100644
index 000000000..976e5a8f1
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/templates/autoscaler.yaml
@@ -0,0 +1,16 @@
+###################################################################################################
+# parent-chart horizontal pod autoscaler
+###################################################################################################
+apiVersion: autoscaling/v1
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ .Release.Name }}-autoscaler
+ namespace: {{ .Release.Namespace }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1beta1
+ kind: Deployment
+ name: {{ .Release.Name }}
+ minReplicas: {{ required "A valid .Values.resources.autoscaler.minReplicas entry required!" .Values.resources.autoscaler.minReplicas }}
+ maxReplicas: {{ required "A valid .Values.resources.autoscaler.maxReplicas entry required!" .Values.resources.autoscaler.maxReplicas }}
+ targetCPUUtilizationPercentage: {{ required "A valid .Values.resources.autoscaler.targetCPUUtilizationPercentage!" .Values.resources.autoscaler.targetCPUUtilizationPercentage }}
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/values.yaml b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/values.yaml
new file mode 100644
index 000000000..b812f0a33
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/import-values-from-enabled-subchart/parent-chart/values.yaml
@@ -0,0 +1,10 @@
+# Default values for parent-chart.
+nameOverride: parent-chart
+tags:
+ dev: false
+ prod: true
+resources:
+ autoscaler:
+ minReplicas: 0
+ maxReplicas: 0
+ targetCPUUtilizationPercentage: 99
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/util/testdata/joonix/Chart.yaml b/helm/pkg/chart/v2/util/testdata/joonix/Chart.yaml
new file mode 100644
index 000000000..c3464c56e
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/joonix/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: joonix
+version: 1.2.3
diff --git a/helm/pkg/chart/v2/util/testdata/joonix/charts/.gitkeep b/helm/pkg/chart/v2/util/testdata/joonix/charts/.gitkeep
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/Chart.yaml b/helm/pkg/chart/v2/util/testdata/subpop/Chart.yaml
new file mode 100644
index 000000000..27118672a
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/Chart.yaml
@@ -0,0 +1,41 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: parentchart
+version: 0.1.0
+dependencies:
+ - name: subchart1
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchart1.enabled
+ tags:
+ - front-end
+ - subchart1
+ import-values:
+ - child: SC1data
+ parent: imported-chart1
+ - child: SC1data
+ parent: overridden-chart1
+ - child: imported-chartA
+ parent: imported-chartA
+ - child: imported-chartA-B
+ parent: imported-chartA-B
+ - child: overridden-chartA-B
+ parent: overridden-chartA-B
+ - child: SCBexported1A
+ parent: .
+ - SCBexported2
+ - SC1exported1
+
+ - name: subchart2
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchart2.enabled
+ tags:
+ - back-end
+ - subchart2
+
+ - name: subchart2
+ alias: subchart2alias
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchart2alias.enabled
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/README.md b/helm/pkg/chart/v2/util/testdata/subpop/README.md
new file mode 100644
index 000000000..e43fbfe9c
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/README.md
@@ -0,0 +1,18 @@
+## Subpop
+
+This chart is for testing the processing of enabled/disabled charts
+via conditions and tags.
+
+Currently there are three levels:
+
+````
+parent
+-1 tags: front-end, subchart1
+--A tags: front-end, subchartA
+--B tags: front-end, subchartB
+-2 tags: back-end, subchart2
+--B tags: back-end, subchartB
+--C tags: back-end, subchartC
+````
+
+Tags and conditions are currently in requirements.yaml files.
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/Chart.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/Chart.yaml
new file mode 100644
index 000000000..9d8c03ee1
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/Chart.yaml
@@ -0,0 +1,36 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: subchart1
+version: 0.1.0
+dependencies:
+ - name: subcharta
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subcharta.enabled
+ tags:
+ - front-end
+ - subcharta
+ import-values:
+ - child: SCAdata
+ parent: imported-chartA
+ - child: SCAdata
+ parent: overridden-chartA
+ - child: SCAdata
+ parent: imported-chartA-B
+
+ - name: subchartb
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchartb.enabled
+ import-values:
+ - child: SCBdata
+ parent: imported-chartB
+ - child: SCBdata
+ parent: imported-chartA-B
+ - child: exports.SCBexported2
+ parent: exports.SCBexported2
+ - SCBexported1
+
+ tags:
+ - front-end
+ - subchartb
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartA/Chart.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartA/Chart.yaml
new file mode 100644
index 000000000..be3edcefb
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartA/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: subcharta
+version: 0.1.0
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartA/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartA/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartA/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartA/values.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartA/values.yaml
new file mode 100644
index 000000000..f0381ae6a
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartA/values.yaml
@@ -0,0 +1,17 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+# subchartA
+service:
+ name: apache
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+SCAdata:
+ SCAbool: false
+ SCAfloat: 3.1
+ SCAint: 55
+ SCAstring: "jabba"
+ SCAnested1:
+ SCAnested2: true
+
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartB/Chart.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartB/Chart.yaml
new file mode 100644
index 000000000..c3c6bbaf0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartB/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: subchartb
+version: 0.1.0
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartB/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartB/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartB/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartB/values.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartB/values.yaml
new file mode 100644
index 000000000..774fdd75c
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/charts/subchartB/values.yaml
@@ -0,0 +1,35 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+
+SCBdata:
+ SCBbool: true
+ SCBfloat: 7.77
+ SCBint: 33
+ SCBstring: "boba"
+
+exports:
+ SCBexported1:
+ SCBexported1A:
+ SCBexported1B: 1965
+
+ SCBexported2:
+ SCBexported2A: "blaster"
+
+global:
+ kolla:
+ nova:
+ api:
+ all:
+ port: 8774
+ metadata:
+ all:
+ port: 8775
+
+
+
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/crds/crdA.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/crds/crdA.yaml
new file mode 100644
index 000000000..fca77fd4b
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/crds/crdA.yaml
@@ -0,0 +1,13 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: testCRDs
+spec:
+ group: testCRDGroups
+ names:
+ kind: TestCRD
+ listKind: TestCRDList
+ plural: TestCRDs
+ shortNames:
+ - tc
+ singular: authconfig
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/NOTES.txt b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/NOTES.txt
new file mode 100644
index 000000000..4bdf443f6
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/NOTES.txt
@@ -0,0 +1 @@
+Sample notes for {{ .Chart.Name }}
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/service.yaml
new file mode 100644
index 000000000..fee94dced
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/service.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app.kubernetes.io/instance: "{{ .Release.Name }}"
+ kube-version/major: "{{ .Capabilities.KubeVersion.Major }}"
+ kube-version/minor: "{{ .Capabilities.KubeVersion.Minor }}"
+ kube-version/version: "v{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}.0"
+{{- if .Capabilities.APIVersions.Has "helm.k8s.io/test" }}
+ kube-api-version/test: v1
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/subdir/role.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/subdir/role.yaml
new file mode 100644
index 000000000..91b954e5f
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/subdir/role.yaml
@@ -0,0 +1,7 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ .Chart.Name }}-role
+rules:
+- resources: ["*"]
+ verbs: ["get","list","watch"]
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/subdir/rolebinding.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/subdir/rolebinding.yaml
new file mode 100644
index 000000000..5d193f1a6
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/subdir/rolebinding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ .Chart.Name }}-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ .Chart.Name }}-role
+subjects:
+- kind: ServiceAccount
+ name: {{ .Chart.Name }}-sa
+ namespace: default
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/subdir/serviceaccount.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/subdir/serviceaccount.yaml
new file mode 100644
index 000000000..7126c7d89
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/templates/subdir/serviceaccount.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Chart.Name }}-sa
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/values.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/values.yaml
new file mode 100644
index 000000000..a974e316a
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart1/values.yaml
@@ -0,0 +1,55 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+# subchart1
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+
+
+SC1data:
+ SC1bool: true
+ SC1float: 3.14
+ SC1int: 100
+ SC1string: "dollywood"
+ SC1extra1: 11
+
+imported-chartA:
+ SC1extra2: 1.337
+
+overridden-chartA:
+ SCAbool: true
+ SCAfloat: 3.14
+ SCAint: 100
+ SCAstring: "jabbathehut"
+ SC1extra3: true
+
+imported-chartA-B:
+ SC1extra5: "tiller"
+
+overridden-chartA-B:
+ SCAbool: true
+ SCAfloat: 3.33
+ SCAint: 555
+ SCAstring: "wormwood"
+ SCAextra1: 23
+
+ SCBbool: true
+ SCBfloat: 0.25
+ SCBint: 98
+ SCBstring: "murkwood"
+ SCBextra1: 13
+
+ SC1extra6: 77
+
+SCBexported1A:
+ SC1extra7: true
+
+exports:
+ SC1exported1:
+ global:
+ SC1exported2:
+ all:
+ SC1exported3: "SC1expstr"
\ No newline at end of file
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/Chart.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/Chart.yaml
new file mode 100644
index 000000000..f936528a7
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/Chart.yaml
@@ -0,0 +1,19 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: subchart2
+version: 0.1.0
+dependencies:
+ - name: subchartb
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchartb.enabled
+ tags:
+ - back-end
+ - subchartb
+ - name: subchartc
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchartc.enabled
+ tags:
+ - back-end
+ - subchartc
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartB/Chart.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartB/Chart.yaml
new file mode 100644
index 000000000..c3c6bbaf0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartB/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: subchartb
+version: 0.1.0
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartB/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartB/templates/service.yaml
new file mode 100644
index 000000000..fb3dfc445
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartB/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart2-{{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: subchart2-{{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartB/values.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartB/values.yaml
new file mode 100644
index 000000000..5e5b21065
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartB/values.yaml
@@ -0,0 +1,21 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartC/Chart.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartC/Chart.yaml
new file mode 100644
index 000000000..dcc45c088
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartC/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: subchartc
+version: 0.1.0
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartC/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartC/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartC/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartC/values.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartC/values.yaml
new file mode 100644
index 000000000..5e5b21065
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/charts/subchartC/values.yaml
@@ -0,0 +1,21 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/values.yaml b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/values.yaml
new file mode 100644
index 000000000..5e5b21065
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/charts/subchart2/values.yaml
@@ -0,0 +1,21 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/noreqs/Chart.yaml b/helm/pkg/chart/v2/util/testdata/subpop/noreqs/Chart.yaml
new file mode 100644
index 000000000..bbb0941c3
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/noreqs/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: parentchart
+version: 0.1.0
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/noreqs/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/subpop/noreqs/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/noreqs/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/noreqs/values.yaml b/helm/pkg/chart/v2/util/testdata/subpop/noreqs/values.yaml
new file mode 100644
index 000000000..4ed3b7ad3
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/noreqs/values.yaml
@@ -0,0 +1,26 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+replicaCount: 1
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+
+# switch-like
+tags:
+ front-end: true
+ back-end: false
diff --git a/helm/pkg/chart/v2/util/testdata/subpop/values.yaml b/helm/pkg/chart/v2/util/testdata/subpop/values.yaml
new file mode 100644
index 000000000..ba70ed406
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/subpop/values.yaml
@@ -0,0 +1,45 @@
+# parent/values.yaml
+
+imported-chart1:
+ SPextra1: "helm rocks"
+
+overridden-chart1:
+ SC1bool: false
+ SC1float: 3.141592
+ SC1int: 99
+ SC1string: "pollywog"
+ SPextra2: 42
+
+
+imported-chartA:
+ SPextra3: 1.337
+
+overridden-chartA:
+ SCAbool: true
+ SCAfloat: 41.3
+ SCAint: 808
+ SCAstring: "jabberwocky"
+ SPextra4: true
+
+imported-chartA-B:
+ SPextra5: "k8s"
+
+overridden-chartA-B:
+ SCAbool: true
+ SCAfloat: 41.3
+ SCAint: 808
+ SCAstring: "jabberwocky"
+ SCBbool: false
+ SCBfloat: 1.99
+ SCBint: 77
+ SCBstring: "jango"
+ SPextra6: 111
+
+tags:
+ front-end: true
+ back-end: false
+
+subchart2alias:
+ enabled: false
+
+ensurenull: null
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/README.md b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/README.md
new file mode 100644
index 000000000..536bb9792
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/README.md
@@ -0,0 +1,16 @@
+# Three Level Dependent Chart
+
+This chart is for testing the processing of multi-level dependencies.
+
+Consists of the following charts:
+
+- Library Chart
+- App Chart (Uses Library Chart as dependency, 2x: app1/app2)
+- Umbrella Chart (Has all the app charts as dependencies)
+
+The precedence is as follows: `library < app < umbrella`
+
+Catches two use-cases:
+
+- app overwriting library (app2)
+- umbrella overwriting app and library (app1)
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/Chart.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/Chart.yaml
new file mode 100644
index 000000000..e5dbe3131
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/Chart.yaml
@@ -0,0 +1,19 @@
+apiVersion: v2
+name: umbrella
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: app1
+ version: 0.1.0
+ condition: app1.enabled
+- name: app2
+ version: 0.1.0
+ condition: app2.enabled
+- name: app3
+ version: 0.1.0
+ condition: app3.enabled
+- name: app4
+ version: 0.1.0
+ condition: app4.enabled
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/Chart.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/Chart.yaml
new file mode 100644
index 000000000..388245e31
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v2
+name: app1
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
+ import-values:
+ - defaults
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/Chart.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/Chart.yaml
new file mode 100644
index 000000000..f2f8a90d9
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v2
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/values.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/values.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/values.yaml
new file mode 100644
index 000000000..3728aa930
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app1/values.yaml
@@ -0,0 +1,3 @@
+service:
+ type: ClusterIP
+ port: 1234
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/Chart.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/Chart.yaml
new file mode 100644
index 000000000..fea2768c7
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v2
+name: app2
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
+ import-values:
+ - defaults
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/Chart.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/Chart.yaml
new file mode 100644
index 000000000..f2f8a90d9
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v2
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/values.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/values.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/values.yaml
new file mode 100644
index 000000000..98bd6d24b
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app2/values.yaml
@@ -0,0 +1,3 @@
+service:
+ type: ClusterIP
+ port: 8080
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/Chart.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/Chart.yaml
new file mode 100644
index 000000000..a42f58773
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v2
+name: app3
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
+ import-values:
+ - defaults
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/Chart.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/Chart.yaml
new file mode 100644
index 000000000..f2f8a90d9
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v2
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/values.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/values.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/values.yaml
new file mode 100644
index 000000000..b738e2a57
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app3/values.yaml
@@ -0,0 +1,2 @@
+service:
+ type: ClusterIP
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/Chart.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/Chart.yaml
new file mode 100644
index 000000000..574bfdfd0
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/Chart.yaml
@@ -0,0 +1,9 @@
+apiVersion: v2
+name: app4
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+
+dependencies:
+- name: library
+ version: 0.1.0
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/Chart.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/Chart.yaml
new file mode 100644
index 000000000..f2f8a90d9
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v2
+name: library
+description: A Helm chart for Kubernetes
+type: library
+version: 0.1.0
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/templates/service.yaml
new file mode 100644
index 000000000..3fd398b53
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/templates/service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/values.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/values.yaml
new file mode 100644
index 000000000..0c08b6cd2
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/charts/library/values.yaml
@@ -0,0 +1,5 @@
+exports:
+ defaults:
+ service:
+ type: ClusterIP
+ port: 9090
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/templates/service.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/templates/service.yaml
new file mode 100644
index 000000000..8ed8ddf1f
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/templates/service.yaml
@@ -0,0 +1 @@
+{{- include "library.service" . }}
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/values.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/values.yaml
new file mode 100644
index 000000000..3728aa930
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/charts/app4/values.yaml
@@ -0,0 +1,3 @@
+service:
+ type: ClusterIP
+ port: 1234
diff --git a/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/values.yaml b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/values.yaml
new file mode 100644
index 000000000..de0bafa51
--- /dev/null
+++ b/helm/pkg/chart/v2/util/testdata/three-level-dependent-chart/umbrella/values.yaml
@@ -0,0 +1,14 @@
+app1:
+ enabled: true
+ service:
+ type: ClusterIP
+ port: 3456
+
+app2:
+ enabled: true
+
+app3:
+ enabled: true
+
+app4:
+ enabled: true
diff --git a/helm/pkg/chart/v2/util/validate_name.go b/helm/pkg/chart/v2/util/validate_name.go
new file mode 100644
index 000000000..6595e085d
--- /dev/null
+++ b/helm/pkg/chart/v2/util/validate_name.go
@@ -0,0 +1,111 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+)
+
+// validName is a regular expression for resource names.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+var validName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
+
+var (
+ // errMissingName indicates that a release (name) was not provided.
+ errMissingName = errors.New("no name provided")
+
+ // errInvalidName indicates that an invalid release name was provided
+ errInvalidName = fmt.Errorf(
+ "invalid release name, must match regex %s and the length must not be longer than 53",
+ validName.String())
+
+ // errInvalidKubernetesName indicates that the name does not meet the Kubernetes
+ // restrictions on metadata names.
+ errInvalidKubernetesName = fmt.Errorf(
+ "invalid metadata name, must match regex %s and the length must not be longer than 253",
+ validName.String())
+)
+
+const (
+ // According to the Kubernetes docs (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#rfc-1035-label-names)
+ // some resource names have a max length of 63 characters while others have a max
+ // length of 253 characters. As we cannot be sure the resources used in a chart, we
+ // therefore need to limit it to 63 chars and reserve 10 chars for additional part to name
+ // of the resource. The reason is that chart maintainers can use release name as part of
+ // the resource name (and some additional chars).
+ maxReleaseNameLen = 53
+ // maxMetadataNameLen is the maximum length Kubernetes allows for any name.
+ maxMetadataNameLen = 253
+)
+
+// ValidateReleaseName performs checks for an entry for a Helm release name
+//
+// For Helm to allow a name, it must be below a certain character count (53) and also match
+// a regular expression.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+func ValidateReleaseName(name string) error {
+ // This case is preserved for backwards compatibility
+ if name == "" {
+ return errMissingName
+
+ }
+ if len(name) > maxReleaseNameLen || !validName.MatchString(name) {
+ return errInvalidName
+ }
+ return nil
+}
+
+// ValidateMetadataName validates the name field of a Kubernetes metadata object.
+//
+// Empty strings, strings longer than 253 chars, or strings that don't match the regexp
+// will fail.
+//
+// According to the Kubernetes help text, the regular expression it uses is:
+//
+// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
+//
+// This follows the above regular expression (but requires a full string match, not partial).
+//
+// The Kubernetes documentation is here, though it is not entirely correct:
+// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+//
+// Deprecated: remove in Helm 4. Name validation now uses rules defined in
+// pkg/lint/rules.validateMetadataNameFunc()
+func ValidateMetadataName(name string) error {
+ if name == "" || len(name) > maxMetadataNameLen || !validName.MatchString(name) {
+ return errInvalidKubernetesName
+ }
+ return nil
+}
diff --git a/helm/pkg/chart/v2/util/validate_name_test.go b/helm/pkg/chart/v2/util/validate_name_test.go
new file mode 100644
index 000000000..cfc62a0f7
--- /dev/null
+++ b/helm/pkg/chart/v2/util/validate_name_test.go
@@ -0,0 +1,91 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import "testing"
+
+// TestValidateReleaseName is a regression test for ValidateName
+//
+// Kubernetes has strict naming conventions for resource names. This test represents
+// those conventions.
+//
+// See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+//
+// NOTE: At the time of this writing, the docs above say that names cannot begin with
+// digits. However, `kubectl`'s regular expression explicit allows this, and
+// Kubernetes (at least as of 1.18) also accepts resources whose names begin with digits.
+func TestValidateReleaseName(t *testing.T) {
+ names := map[string]bool{
+ "": false,
+ "foo": true,
+ "foo.bar1234baz.seventyone": true,
+ "FOO": false,
+ "123baz": true,
+ "foo.BAR.baz": false,
+ "one-two": true,
+ "-two": false,
+ "one_two": false,
+ "a..b": false,
+ "%^$%*@^*@^": false,
+ "example:com": false,
+ "example%%com": false,
+ "a1111111111111111111111111111111111111111111111111111111111z": false,
+ }
+ for input, expectPass := range names {
+ if err := ValidateReleaseName(input); (err == nil) != expectPass {
+ st := "fail"
+ if expectPass {
+ st = "succeed"
+ }
+ t.Errorf("Expected %q to %s", input, st)
+ }
+ }
+}
+
+func TestValidateMetadataName(t *testing.T) {
+ names := map[string]bool{
+ "": false,
+ "foo": true,
+ "foo.bar1234baz.seventyone": true,
+ "FOO": false,
+ "123baz": true,
+ "foo.BAR.baz": false,
+ "one-two": true,
+ "-two": false,
+ "one_two": false,
+ "a..b": false,
+ "%^$%*@^*@^": false,
+ "example:com": false,
+ "example%%com": false,
+ "a1111111111111111111111111111111111111111111111111111111111z": true,
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z" +
+ "a1111111111111111111111111111111111111111111111111111111111z": false,
+ }
+ for input, expectPass := range names {
+ if err := ValidateMetadataName(input); (err == nil) != expectPass {
+ st := "fail"
+ if expectPass {
+ st = "succeed"
+ }
+ t.Errorf("Expected %q to %s", input, st)
+ }
+ }
+}
diff --git a/helm/pkg/cli/environment.go b/helm/pkg/cli/environment.go
new file mode 100644
index 000000000..5c19734aa
--- /dev/null
+++ b/helm/pkg/cli/environment.go
@@ -0,0 +1,302 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package cli describes the operating environment for the Helm CLI.
+
+Helm's environment encapsulates all of the service dependencies Helm has.
+These dependencies are expressed as interfaces so that alternate implementations
+(mocks, etc.) can be easily generated.
+*/
+package cli
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/spf13/pflag"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
+ "k8s.io/client-go/rest"
+
+ "helm.sh/helm/v4/internal/version"
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/kube"
+)
+
+// defaultMaxHistory sets the maximum number of releases to 0: unlimited
+const defaultMaxHistory = 10
+
+// defaultBurstLimit sets the default client-side throttling limit
+const defaultBurstLimit = 100
+
+// defaultQPS sets the default QPS value to 0 to use library defaults unless specified
+const defaultQPS = float32(0)
+
+// EnvSettings describes all of the environment settings.
+type EnvSettings struct {
+ namespace string
+ config *genericclioptions.ConfigFlags
+
+ // KubeConfig is the path to the kubeconfig file
+ KubeConfig string
+ // KubeContext is the name of the kubeconfig context.
+ KubeContext string
+ // Bearer KubeToken used for authentication
+ KubeToken string
+ // Username to impersonate for the operation
+ KubeAsUser string
+ // Groups to impersonate for the operation, multiple groups parsed from a comma delimited list
+ KubeAsGroups []string
+ // Kubernetes API Server Endpoint for authentication
+ KubeAPIServer string
+ // Custom certificate authority file.
+ KubeCaFile string
+ // KubeInsecureSkipTLSVerify indicates if server's certificate will not be checked for validity.
+ // This makes the HTTPS connections insecure
+ KubeInsecureSkipTLSVerify bool
+ // KubeTLSServerName overrides the name to use for server certificate validation.
+ // If it is not provided, the hostname used to contact the server is used
+ KubeTLSServerName string
+ // Debug indicates whether or not Helm is running in Debug mode.
+ Debug bool
+ // RegistryConfig is the path to the registry config file.
+ RegistryConfig string
+ // RepositoryConfig is the path to the repositories file.
+ RepositoryConfig string
+ // RepositoryCache is the path to the repository cache directory.
+ RepositoryCache string
+ // PluginsDirectory is the path to the plugins directory.
+ PluginsDirectory string
+ // MaxHistory is the max release history maintained.
+ MaxHistory int
+ // BurstLimit is the default client-side throttling limit.
+ BurstLimit int
+ // QPS is queries per second which may be used to avoid throttling.
+ QPS float32
+ // ColorMode controls colorized output (never, auto, always)
+ ColorMode string
+ // ContentCache is the location where cached charts are stored
+ ContentCache string
+}
+
+func New() *EnvSettings {
+ env := &EnvSettings{
+ namespace: os.Getenv("HELM_NAMESPACE"),
+ MaxHistory: envIntOr("HELM_MAX_HISTORY", defaultMaxHistory),
+ KubeConfig: os.Getenv("KUBECONFIG"),
+ KubeContext: os.Getenv("HELM_KUBECONTEXT"),
+ KubeToken: os.Getenv("HELM_KUBETOKEN"),
+ KubeAsUser: os.Getenv("HELM_KUBEASUSER"),
+ KubeAsGroups: envCSV("HELM_KUBEASGROUPS"),
+ KubeAPIServer: os.Getenv("HELM_KUBEAPISERVER"),
+ KubeCaFile: os.Getenv("HELM_KUBECAFILE"),
+ KubeTLSServerName: os.Getenv("HELM_KUBETLS_SERVER_NAME"),
+ KubeInsecureSkipTLSVerify: envBoolOr("HELM_KUBEINSECURE_SKIP_TLS_VERIFY", false),
+ PluginsDirectory: envOr("HELM_PLUGINS", helmpath.DataPath("plugins")),
+ RegistryConfig: envOr("HELM_REGISTRY_CONFIG", helmpath.ConfigPath("registry/config.json")),
+ RepositoryConfig: envOr("HELM_REPOSITORY_CONFIG", helmpath.ConfigPath("repositories.yaml")),
+ RepositoryCache: envOr("HELM_REPOSITORY_CACHE", helmpath.CachePath("repository")),
+ ContentCache: envOr("HELM_CONTENT_CACHE", helmpath.CachePath("content")),
+ BurstLimit: envIntOr("HELM_BURST_LIMIT", defaultBurstLimit),
+ QPS: envFloat32Or("HELM_QPS", defaultQPS),
+ ColorMode: envColorMode(),
+ }
+ env.Debug, _ = strconv.ParseBool(os.Getenv("HELM_DEBUG"))
+
+ // bind to kubernetes config flags
+ config := &genericclioptions.ConfigFlags{
+ Namespace: &env.namespace,
+ Context: &env.KubeContext,
+ BearerToken: &env.KubeToken,
+ APIServer: &env.KubeAPIServer,
+ CAFile: &env.KubeCaFile,
+ KubeConfig: &env.KubeConfig,
+ Impersonate: &env.KubeAsUser,
+ Insecure: &env.KubeInsecureSkipTLSVerify,
+ TLSServerName: &env.KubeTLSServerName,
+ ImpersonateGroup: &env.KubeAsGroups,
+ WrapConfigFn: func(config *rest.Config) *rest.Config {
+ config.Burst = env.BurstLimit
+ config.QPS = env.QPS
+ config.Wrap(func(rt http.RoundTripper) http.RoundTripper {
+ return &kube.RetryingRoundTripper{Wrapped: rt}
+ })
+ config.UserAgent = version.GetUserAgent()
+ return config
+ },
+ }
+ if env.BurstLimit != defaultBurstLimit {
+ config = config.WithDiscoveryBurst(env.BurstLimit)
+ }
+ env.config = config
+
+ return env
+}
+
+// AddFlags binds flags to the given flagset.
+func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) {
+ fs.StringVarP(&s.namespace, "namespace", "n", s.namespace, "namespace scope for this request")
+ fs.StringVar(&s.KubeConfig, "kubeconfig", "", "path to the kubeconfig file")
+ fs.StringVar(&s.KubeContext, "kube-context", s.KubeContext, "name of the kubeconfig context to use")
+ fs.StringVar(&s.KubeToken, "kube-token", s.KubeToken, "bearer token used for authentication")
+ fs.StringVar(&s.KubeAsUser, "kube-as-user", s.KubeAsUser, "username to impersonate for the operation")
+ fs.StringArrayVar(&s.KubeAsGroups, "kube-as-group", s.KubeAsGroups, "group to impersonate for the operation, this flag can be repeated to specify multiple groups.")
+ fs.StringVar(&s.KubeAPIServer, "kube-apiserver", s.KubeAPIServer, "the address and the port for the Kubernetes API server")
+ fs.StringVar(&s.KubeCaFile, "kube-ca-file", s.KubeCaFile, "the certificate authority file for the Kubernetes API server connection")
+ fs.StringVar(&s.KubeTLSServerName, "kube-tls-server-name", s.KubeTLSServerName, "server name to use for Kubernetes API server certificate validation. If it is not provided, the hostname used to contact the server is used")
+ fs.BoolVar(&s.KubeInsecureSkipTLSVerify, "kube-insecure-skip-tls-verify", s.KubeInsecureSkipTLSVerify, "if true, the Kubernetes API server's certificate will not be checked for validity. This will make your HTTPS connections insecure")
+ fs.BoolVar(&s.Debug, "debug", s.Debug, "enable verbose output")
+ fs.StringVar(&s.RegistryConfig, "registry-config", s.RegistryConfig, "path to the registry config file")
+ fs.StringVar(&s.RepositoryConfig, "repository-config", s.RepositoryConfig, "path to the file containing repository names and URLs")
+ fs.StringVar(&s.RepositoryCache, "repository-cache", s.RepositoryCache, "path to the directory containing cached repository indexes")
+ fs.StringVar(&s.ContentCache, "content-cache", s.ContentCache, "path to the directory containing cached content (e.g. charts)")
+ fs.IntVar(&s.BurstLimit, "burst-limit", s.BurstLimit, "client-side default throttling limit")
+ fs.Float32Var(&s.QPS, "qps", s.QPS, "queries per second used when communicating with the Kubernetes API, not including bursting")
+ fs.StringVar(&s.ColorMode, "color", s.ColorMode, "use colored output (never, auto, always)")
+ fs.StringVar(&s.ColorMode, "colour", s.ColorMode, "use colored output (never, auto, always)")
+}
+
+func envOr(name, def string) string {
+ if v, ok := os.LookupEnv(name); ok {
+ return v
+ }
+ return def
+}
+
+func envBoolOr(name string, def bool) bool {
+ if name == "" {
+ return def
+ }
+ envVal := envOr(name, strconv.FormatBool(def))
+ ret, err := strconv.ParseBool(envVal)
+ if err != nil {
+ return def
+ }
+ return ret
+}
+
+func envIntOr(name string, def int) int {
+ if name == "" {
+ return def
+ }
+ envVal := envOr(name, strconv.Itoa(def))
+ ret, err := strconv.Atoi(envVal)
+ if err != nil {
+ return def
+ }
+ return ret
+}
+
+func envFloat32Or(name string, def float32) float32 {
+ if name == "" {
+ return def
+ }
+ envVal := envOr(name, strconv.FormatFloat(float64(def), 'f', 2, 32))
+ ret, err := strconv.ParseFloat(envVal, 32)
+ if err != nil {
+ return def
+ }
+ return float32(ret)
+}
+
+func envCSV(name string) (ls []string) {
+ trimmed := strings.Trim(os.Getenv(name), ", ")
+ if trimmed != "" {
+ ls = strings.Split(trimmed, ",")
+ }
+ return
+}
+
+func envColorMode() string {
+ // Check NO_COLOR environment variable first (standard)
+ if v, ok := os.LookupEnv("NO_COLOR"); ok && v != "" {
+ return "never"
+ }
+ // Check HELM_COLOR environment variable
+ if v, ok := os.LookupEnv("HELM_COLOR"); ok {
+ v = strings.ToLower(v)
+ switch v {
+ case "never", "auto", "always":
+ return v
+ }
+ }
+ // Default to auto
+ return "auto"
+}
+
+func (s *EnvSettings) EnvVars() map[string]string {
+ envvars := map[string]string{
+ "HELM_BIN": os.Args[0],
+ "HELM_CACHE_HOME": helmpath.CachePath(""),
+ "HELM_CONFIG_HOME": helmpath.ConfigPath(""),
+ "HELM_DATA_HOME": helmpath.DataPath(""),
+ "HELM_DEBUG": fmt.Sprint(s.Debug),
+ "HELM_PLUGINS": s.PluginsDirectory,
+ "HELM_REGISTRY_CONFIG": s.RegistryConfig,
+ "HELM_REPOSITORY_CACHE": s.RepositoryCache,
+ "HELM_CONTENT_CACHE": s.ContentCache,
+ "HELM_REPOSITORY_CONFIG": s.RepositoryConfig,
+ "HELM_NAMESPACE": s.Namespace(),
+ "HELM_MAX_HISTORY": strconv.Itoa(s.MaxHistory),
+ "HELM_BURST_LIMIT": strconv.Itoa(s.BurstLimit),
+ "HELM_QPS": strconv.FormatFloat(float64(s.QPS), 'f', 2, 32),
+
+ // broken, these are populated from helm flags and not kubeconfig.
+ "HELM_KUBECONTEXT": s.KubeContext,
+ "HELM_KUBETOKEN": s.KubeToken,
+ "HELM_KUBEASUSER": s.KubeAsUser,
+ "HELM_KUBEASGROUPS": strings.Join(s.KubeAsGroups, ","),
+ "HELM_KUBEAPISERVER": s.KubeAPIServer,
+ "HELM_KUBECAFILE": s.KubeCaFile,
+ "HELM_KUBEINSECURE_SKIP_TLS_VERIFY": strconv.FormatBool(s.KubeInsecureSkipTLSVerify),
+ "HELM_KUBETLS_SERVER_NAME": s.KubeTLSServerName,
+ }
+ if s.KubeConfig != "" {
+ envvars["KUBECONFIG"] = s.KubeConfig
+ }
+ return envvars
+}
+
+// Namespace gets the namespace from the configuration
+func (s *EnvSettings) Namespace() string {
+ if s.config != nil {
+ if ns, _, err := s.config.ToRawKubeConfigLoader().Namespace(); err == nil {
+ return ns
+ }
+ }
+ if s.namespace != "" {
+ return s.namespace
+ }
+ return "default"
+}
+
+// SetNamespace sets the namespace in the configuration
+func (s *EnvSettings) SetNamespace(namespace string) {
+ s.namespace = namespace
+}
+
+// RESTClientGetter gets the kubeconfig from EnvSettings
+func (s *EnvSettings) RESTClientGetter() genericclioptions.RESTClientGetter {
+ return s.config
+}
+
+// ShouldDisableColor returns true if color output should be disabled
+func (s *EnvSettings) ShouldDisableColor() bool {
+ return s.ColorMode == "never"
+}
diff --git a/helm/pkg/cli/environment_test.go b/helm/pkg/cli/environment_test.go
new file mode 100644
index 000000000..52326eeff
--- /dev/null
+++ b/helm/pkg/cli/environment_test.go
@@ -0,0 +1,274 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/spf13/pflag"
+
+ "helm.sh/helm/v4/internal/version"
+)
+
+func TestSetNamespace(t *testing.T) {
+ settings := New()
+
+ if settings.namespace != "" {
+ t.Errorf("Expected empty namespace, got %s", settings.namespace)
+ }
+
+ settings.SetNamespace("testns")
+ if settings.namespace != "testns" {
+ t.Errorf("Expected namespace testns, got %s", settings.namespace)
+ }
+}
+
+func TestEnvSettings(t *testing.T) {
+ tests := []struct {
+ name string
+
+ // input
+ args string
+ envvars map[string]string
+
+ // expected values
+ ns, kcontext string
+ debug bool
+ maxhistory int
+ kubeAsUser string
+ kubeAsGroups []string
+ kubeCaFile string
+ kubeInsecure bool
+ kubeTLSServer string
+ burstLimit int
+ qps float32
+ }{
+ {
+ name: "defaults",
+ ns: "default",
+ maxhistory: defaultMaxHistory,
+ burstLimit: defaultBurstLimit,
+ qps: defaultQPS,
+ },
+ {
+ name: "with flags set",
+ args: "--debug --namespace=myns --kube-as-user=poro --kube-as-group=admins --kube-as-group=teatime --kube-as-group=snackeaters --kube-ca-file=/tmp/ca.crt --burst-limit 100 --qps 50.12 --kube-insecure-skip-tls-verify=true --kube-tls-server-name=example.org",
+ ns: "myns",
+ debug: true,
+ maxhistory: defaultMaxHistory,
+ burstLimit: 100,
+ qps: 50.12,
+ kubeAsUser: "poro",
+ kubeAsGroups: []string{"admins", "teatime", "snackeaters"},
+ kubeCaFile: "/tmp/ca.crt",
+ kubeTLSServer: "example.org",
+ kubeInsecure: true,
+ },
+ {
+ name: "with envvars set",
+ envvars: map[string]string{"HELM_DEBUG": "1", "HELM_NAMESPACE": "yourns", "HELM_KUBEASUSER": "pikachu", "HELM_KUBEASGROUPS": ",,,operators,snackeaters,partyanimals", "HELM_MAX_HISTORY": "5", "HELM_KUBECAFILE": "/tmp/ca.crt", "HELM_BURST_LIMIT": "150", "HELM_KUBEINSECURE_SKIP_TLS_VERIFY": "true", "HELM_KUBETLS_SERVER_NAME": "example.org", "HELM_QPS": "60.34"},
+ ns: "yourns",
+ maxhistory: 5,
+ burstLimit: 150,
+ qps: 60.34,
+ debug: true,
+ kubeAsUser: "pikachu",
+ kubeAsGroups: []string{"operators", "snackeaters", "partyanimals"},
+ kubeCaFile: "/tmp/ca.crt",
+ kubeTLSServer: "example.org",
+ kubeInsecure: true,
+ },
+ {
+ name: "with flags and envvars set",
+ args: "--debug --namespace=myns --kube-as-user=poro --kube-as-group=admins --kube-as-group=teatime --kube-as-group=snackeaters --kube-ca-file=/my/ca.crt --burst-limit 175 --qps 70 --kube-insecure-skip-tls-verify=true --kube-tls-server-name=example.org",
+ envvars: map[string]string{"HELM_DEBUG": "1", "HELM_NAMESPACE": "yourns", "HELM_KUBEASUSER": "pikachu", "HELM_KUBEASGROUPS": ",,,operators,snackeaters,partyanimals", "HELM_MAX_HISTORY": "5", "HELM_KUBECAFILE": "/tmp/ca.crt", "HELM_BURST_LIMIT": "200", "HELM_KUBEINSECURE_SKIP_TLS_VERIFY": "true", "HELM_KUBETLS_SERVER_NAME": "example.org", "HELM_QPS": "40"},
+ ns: "myns",
+ debug: true,
+ maxhistory: 5,
+ burstLimit: 175,
+ qps: 70,
+ kubeAsUser: "poro",
+ kubeAsGroups: []string{"admins", "teatime", "snackeaters"},
+ kubeCaFile: "/my/ca.crt",
+ kubeTLSServer: "example.org",
+ kubeInsecure: true,
+ },
+ {
+ name: "invalid kubeconfig",
+ ns: "testns",
+ args: "--namespace=testns --kubeconfig=/path/to/fake/file",
+ maxhistory: defaultMaxHistory,
+ burstLimit: defaultBurstLimit,
+ qps: defaultQPS,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ defer resetEnv()()
+
+ for k, v := range tt.envvars {
+ t.Setenv(k, v)
+ }
+
+ flags := pflag.NewFlagSet("testing", pflag.ContinueOnError)
+
+ settings := New()
+ settings.AddFlags(flags)
+ flags.Parse(strings.Split(tt.args, " "))
+
+ if settings.Debug != tt.debug {
+ t.Errorf("expected debug %t, got %t", tt.debug, settings.Debug)
+ }
+ if settings.Namespace() != tt.ns {
+ t.Errorf("expected namespace %q, got %q", tt.ns, settings.Namespace())
+ }
+ if settings.KubeContext != tt.kcontext {
+ t.Errorf("expected kube-context %q, got %q", tt.kcontext, settings.KubeContext)
+ }
+ if settings.MaxHistory != tt.maxhistory {
+ t.Errorf("expected maxHistory %d, got %d", tt.maxhistory, settings.MaxHistory)
+ }
+ if tt.kubeAsUser != settings.KubeAsUser {
+ t.Errorf("expected kAsUser %q, got %q", tt.kubeAsUser, settings.KubeAsUser)
+ }
+ if !reflect.DeepEqual(tt.kubeAsGroups, settings.KubeAsGroups) {
+ t.Errorf("expected kAsGroups %+v, got %+v", len(tt.kubeAsGroups), len(settings.KubeAsGroups))
+ }
+ if tt.kubeCaFile != settings.KubeCaFile {
+ t.Errorf("expected kCaFile %q, got %q", tt.kubeCaFile, settings.KubeCaFile)
+ }
+ if tt.burstLimit != settings.BurstLimit {
+ t.Errorf("expected BurstLimit %d, got %d", tt.burstLimit, settings.BurstLimit)
+ }
+ if tt.kubeInsecure != settings.KubeInsecureSkipTLSVerify {
+ t.Errorf("expected kubeInsecure %t, got %t", tt.kubeInsecure, settings.KubeInsecureSkipTLSVerify)
+ }
+ if tt.kubeTLSServer != settings.KubeTLSServerName {
+ t.Errorf("expected kubeTLSServer %q, got %q", tt.kubeTLSServer, settings.KubeTLSServerName)
+ }
+ })
+ }
+}
+
+func TestEnvOrBool(t *testing.T) {
+ const envName = "TEST_ENV_OR_BOOL"
+ tests := []struct {
+ name string
+ env string
+ val string
+ def bool
+ expected bool
+ }{
+ {
+ name: "unset with default false",
+ def: false,
+ expected: false,
+ },
+ {
+ name: "unset with default true",
+ def: true,
+ expected: true,
+ },
+ {
+ name: "blank env with default false",
+ env: envName,
+ def: false,
+ expected: false,
+ },
+ {
+ name: "blank env with default true",
+ env: envName,
+ def: true,
+ expected: true,
+ },
+ {
+ name: "env true with default false",
+ env: envName,
+ val: "true",
+ def: false,
+ expected: true,
+ },
+ {
+ name: "env false with default true",
+ env: envName,
+ val: "false",
+ def: true,
+ expected: false,
+ },
+ {
+ name: "env fails parsing with default true",
+ env: envName,
+ val: "NOT_A_BOOL",
+ def: true,
+ expected: true,
+ },
+ {
+ name: "env fails parsing with default false",
+ env: envName,
+ val: "NOT_A_BOOL",
+ def: false,
+ expected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if tt.env != "" {
+ t.Setenv(tt.env, tt.val)
+ }
+ actual := envBoolOr(tt.env, tt.def)
+ if actual != tt.expected {
+ t.Errorf("expected result %t, got %t", tt.expected, actual)
+ }
+ })
+ }
+}
+
+func TestUserAgentHeaderInK8sRESTClientConfig(t *testing.T) {
+ defer resetEnv()()
+
+ settings := New()
+ restConfig, err := settings.RESTClientGetter().ToRESTConfig()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedUserAgent := version.GetUserAgent()
+ if restConfig.UserAgent != expectedUserAgent {
+ t.Errorf("expected User-Agent header %q in K8s REST client config, got %q", expectedUserAgent, restConfig.UserAgent)
+ }
+}
+
+func resetEnv() func() {
+ origEnv := os.Environ()
+
+ // ensure any local envvars do not hose us
+ for e := range New().EnvVars() {
+ os.Unsetenv(e)
+ }
+
+ return func() {
+ for _, pair := range origEnv {
+ kv := strings.SplitN(pair, "=", 2)
+ os.Setenv(kv[0], kv[1])
+ }
+ }
+}
diff --git a/helm/pkg/cli/output/output.go b/helm/pkg/cli/output/output.go
new file mode 100644
index 000000000..28d503741
--- /dev/null
+++ b/helm/pkg/cli/output/output.go
@@ -0,0 +1,139 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package output
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/gosuri/uitable"
+ "sigs.k8s.io/yaml"
+)
+
+// Format is a type for capturing supported output formats
+type Format string
+
+const (
+ Table Format = "table"
+ JSON Format = "json"
+ YAML Format = "yaml"
+)
+
+// Formats returns a list of the string representation of the supported formats
+func Formats() []string {
+ return []string{Table.String(), JSON.String(), YAML.String()}
+}
+
+// FormatsWithDesc returns a list of the string representation of the supported formats
+// including a description
+func FormatsWithDesc() map[string]string {
+ return map[string]string{
+ Table.String(): "Output result in human-readable format",
+ JSON.String(): "Output result in JSON format",
+ YAML.String(): "Output result in YAML format",
+ }
+}
+
+// ErrInvalidFormatType is returned when an unsupported format type is used
+var ErrInvalidFormatType = fmt.Errorf("invalid format type")
+
+// String returns the string representation of the Format
+func (o Format) String() string {
+ return string(o)
+}
+
+// Write the output in the given format to the io.Writer. Unsupported formats
+// will return an error
+func (o Format) Write(out io.Writer, w Writer) error {
+ switch o {
+ case Table:
+ return w.WriteTable(out)
+ case JSON:
+ return w.WriteJSON(out)
+ case YAML:
+ return w.WriteYAML(out)
+ }
+ return ErrInvalidFormatType
+}
+
+// ParseFormat takes a raw string and returns the matching Format.
+// If the format does not exist, ErrInvalidFormatType is returned
+func ParseFormat(s string) (out Format, err error) {
+ switch s {
+ case Table.String():
+ out, err = Table, nil
+ case JSON.String():
+ out, err = JSON, nil
+ case YAML.String():
+ out, err = YAML, nil
+ default:
+ out, err = "", ErrInvalidFormatType
+ }
+ return
+}
+
+// Writer is an interface that any type can implement to write supported formats
+type Writer interface {
+ // WriteTable will write tabular output into the given io.Writer, returning
+ // an error if any occur
+ WriteTable(out io.Writer) error
+ // WriteJSON will write JSON formatted output into the given io.Writer,
+ // returning an error if any occur
+ WriteJSON(out io.Writer) error
+ // WriteYAML will write YAML formatted output into the given io.Writer,
+ // returning an error if any occur
+ WriteYAML(out io.Writer) error
+}
+
+// EncodeJSON is a helper function to decorate any error message with a bit more
+// context and avoid writing the same code over and over for printers.
+func EncodeJSON(out io.Writer, obj interface{}) error {
+ enc := json.NewEncoder(out)
+ err := enc.Encode(obj)
+ if err != nil {
+ return fmt.Errorf("unable to write JSON output: %w", err)
+ }
+ return nil
+}
+
+// EncodeYAML is a helper function to decorate any error message with a bit more
+// context and avoid writing the same code over and over for printers
+func EncodeYAML(out io.Writer, obj interface{}) error {
+ raw, err := yaml.Marshal(obj)
+ if err != nil {
+ return fmt.Errorf("unable to write YAML output: %w", err)
+ }
+
+ _, err = out.Write(raw)
+ if err != nil {
+ return fmt.Errorf("unable to write YAML output: %w", err)
+ }
+ return nil
+}
+
+// EncodeTable is a helper function to decorate any error message with a bit
+// more context and avoid writing the same code over and over for printers
+func EncodeTable(out io.Writer, table *uitable.Table) error {
+ raw := table.Bytes()
+ raw = append(raw, []byte("\n")...)
+ _, err := out.Write(raw)
+ if err != nil {
+ return fmt.Errorf("unable to write table output: %w", err)
+ }
+ return nil
+}
diff --git a/helm/pkg/cli/values/options.go b/helm/pkg/cli/values/options.go
new file mode 100644
index 000000000..cd65fa885
--- /dev/null
+++ b/helm/pkg/cli/values/options.go
@@ -0,0 +1,138 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package values
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "strings"
+
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/strvals"
+)
+
+// Options captures the different ways to specify values
+type Options struct {
+ ValueFiles []string // -f/--values
+ StringValues []string // --set-string
+ Values []string // --set
+ FileValues []string // --set-file
+ JSONValues []string // --set-json
+ LiteralValues []string // --set-literal
+}
+
+// MergeValues merges values from files specified via -f/--values and directly
+// via --set-json, --set, --set-string, or --set-file, marshaling them to YAML
+func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, error) {
+ base := map[string]interface{}{}
+
+ // User specified a values files via -f/--values
+ for _, filePath := range opts.ValueFiles {
+ raw, err := readFile(filePath, p)
+ if err != nil {
+ return nil, err
+ }
+ currentMap, err := loader.LoadValues(bytes.NewReader(raw))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse %s: %w", filePath, err)
+ }
+ // Merge with the previous map
+ base = loader.MergeMaps(base, currentMap)
+ }
+
+ // User specified a value via --set-json
+ for _, value := range opts.JSONValues {
+ trimmedValue := strings.TrimSpace(value)
+ if len(trimmedValue) > 0 && trimmedValue[0] == '{' {
+ // If value is JSON object format, parse it as map
+ var jsonMap map[string]interface{}
+ if err := json.Unmarshal([]byte(trimmedValue), &jsonMap); err != nil {
+ return nil, fmt.Errorf("failed parsing --set-json data JSON: %s", value)
+ }
+ base = loader.MergeMaps(base, jsonMap)
+ } else {
+ // Otherwise, parse it as key=value format
+ if err := strvals.ParseJSON(value, base); err != nil {
+ return nil, fmt.Errorf("failed parsing --set-json data %s", value)
+ }
+ }
+ }
+
+ // User specified a value via --set
+ for _, value := range opts.Values {
+ if err := strvals.ParseInto(value, base); err != nil {
+ return nil, fmt.Errorf("failed parsing --set data: %w", err)
+ }
+ }
+
+ // User specified a value via --set-string
+ for _, value := range opts.StringValues {
+ if err := strvals.ParseIntoString(value, base); err != nil {
+ return nil, fmt.Errorf("failed parsing --set-string data: %w", err)
+ }
+ }
+
+ // User specified a value via --set-file
+ for _, value := range opts.FileValues {
+ reader := func(rs []rune) (interface{}, error) {
+ bytes, err := readFile(string(rs), p)
+ if err != nil {
+ return nil, err
+ }
+ return string(bytes), err
+ }
+ if err := strvals.ParseIntoFile(value, base, reader); err != nil {
+ return nil, fmt.Errorf("failed parsing --set-file data: %w", err)
+ }
+ }
+
+ // User specified a value via --set-literal
+ for _, value := range opts.LiteralValues {
+ if err := strvals.ParseLiteralInto(value, base); err != nil {
+ return nil, fmt.Errorf("failed parsing --set-literal data: %w", err)
+ }
+ }
+
+ return base, nil
+}
+
+// readFile load a file from stdin, the local directory, or a remote file with a url.
+func readFile(filePath string, p getter.Providers) ([]byte, error) {
+ if strings.TrimSpace(filePath) == "-" {
+ return io.ReadAll(os.Stdin)
+ }
+ u, err := url.Parse(filePath)
+ if err != nil {
+ return nil, err
+ }
+
+ // FIXME: maybe someone handle other protocols like ftp.
+ g, err := p.ByScheme(u.Scheme)
+ if err != nil {
+ return os.ReadFile(filePath)
+ }
+ data, err := g.Get(filePath, getter.WithURL(filePath))
+ if err != nil {
+ return nil, err
+ }
+ return data.Bytes(), nil
+}
diff --git a/helm/pkg/cli/values/options_test.go b/helm/pkg/cli/values/options_test.go
new file mode 100644
index 000000000..fe1afc5d2
--- /dev/null
+++ b/helm/pkg/cli/values/options_test.go
@@ -0,0 +1,389 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package values
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/getter"
+)
+
+// mockGetter implements getter.Getter for testing
+type mockGetter struct {
+ content []byte
+ err error
+}
+
+func (m *mockGetter) Get(_ string, _ ...getter.Option) (*bytes.Buffer, error) {
+ if m.err != nil {
+ return nil, m.err
+ }
+ return bytes.NewBuffer(m.content), nil
+}
+
+// mockProvider creates a test provider
+func mockProvider(schemes []string, content []byte, err error) getter.Provider {
+ return getter.Provider{
+ Schemes: schemes,
+ New: func(_ ...getter.Option) (getter.Getter, error) {
+ return &mockGetter{content: content, err: err}, nil
+ },
+ }
+}
+
+func TestReadFile(t *testing.T) {
+ tests := []struct {
+ name string
+ filePath string
+ providers getter.Providers
+ setupFunc func(*testing.T) (string, func()) // setup temp files, return cleanup
+ expectError bool
+ expectStdin bool
+ expectedData []byte
+ }{
+ {
+ name: "stdin input with dash",
+ filePath: "-",
+ providers: getter.Providers{},
+ expectStdin: true,
+ expectError: false,
+ },
+ {
+ name: "stdin input with whitespace",
+ filePath: " - ",
+ providers: getter.Providers{},
+ expectStdin: true,
+ expectError: false,
+ },
+ {
+ name: "invalid URL parsing",
+ filePath: "://invalid-url",
+ providers: getter.Providers{},
+ expectError: true,
+ },
+ {
+ name: "local file - existing",
+ filePath: "test.txt",
+ providers: getter.Providers{},
+ setupFunc: func(t *testing.T) (string, func()) {
+ t.Helper()
+ tmpDir := t.TempDir()
+ filePath := filepath.Join(tmpDir, "test.txt")
+ content := []byte("local file content")
+ err := os.WriteFile(filePath, content, 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return filePath, func() {} // cleanup handled by t.TempDir()
+ },
+ expectError: false,
+ expectedData: []byte("local file content"),
+ },
+ {
+ name: "local file - non-existent",
+ filePath: "/non/existent/file.txt",
+ providers: getter.Providers{},
+ expectError: true,
+ },
+ {
+ name: "remote file with http scheme - success",
+ filePath: "http://example.com/values.yaml",
+ providers: getter.Providers{
+ mockProvider([]string{"http", "https"}, []byte("remote content"), nil),
+ },
+ expectError: false,
+ expectedData: []byte("remote content"),
+ },
+ {
+ name: "remote file with https scheme - success",
+ filePath: "https://example.com/values.yaml",
+ providers: getter.Providers{
+ mockProvider([]string{"http", "https"}, []byte("https content"), nil),
+ },
+ expectError: false,
+ expectedData: []byte("https content"),
+ },
+ {
+ name: "remote file with custom scheme - success",
+ filePath: "oci://registry.example.com/chart",
+ providers: getter.Providers{
+ mockProvider([]string{"oci"}, []byte("oci content"), nil),
+ },
+ expectError: false,
+ expectedData: []byte("oci content"),
+ },
+ {
+ name: "remote file - getter error",
+ filePath: "http://example.com/values.yaml",
+ providers: getter.Providers{
+ mockProvider([]string{"http"}, nil, errors.New("network error")),
+ },
+ expectError: true,
+ },
+ {
+ name: "unsupported scheme fallback to local file",
+ filePath: "ftp://example.com/file.txt",
+ providers: getter.Providers{
+ mockProvider([]string{"http"}, []byte("should not be used"), nil),
+ },
+ setupFunc: func(t *testing.T) (string, func()) {
+ t.Helper()
+ // Create a local file named "ftp://example.com/file.txt"
+ // This tests the fallback behavior when scheme is not supported
+ tmpDir := t.TempDir()
+ fileName := "ftp_file.txt" // Valid filename for filesystem
+ filePath := filepath.Join(tmpDir, fileName)
+ content := []byte("local fallback content")
+ err := os.WriteFile(filePath, content, 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return filePath, func() {}
+ },
+ expectError: false,
+ expectedData: []byte("local fallback content"),
+ },
+ {
+ name: "empty file path",
+ filePath: "",
+ providers: getter.Providers{},
+ expectError: true, // Empty path should cause error
+ },
+ {
+ name: "multiple providers - correct selection",
+ filePath: "custom://example.com/resource",
+ providers: getter.Providers{
+ mockProvider([]string{"http", "https"}, []byte("wrong content"), nil),
+ mockProvider([]string{"custom"}, []byte("correct content"), nil),
+ mockProvider([]string{"oci"}, []byte("also wrong"), nil),
+ },
+ expectError: false,
+ expectedData: []byte("correct content"),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var actualFilePath string
+ var cleanup func()
+
+ if tt.setupFunc != nil {
+ actualFilePath, cleanup = tt.setupFunc(t)
+ defer cleanup()
+ } else {
+ actualFilePath = tt.filePath
+ }
+
+ // Handle stdin test case
+ if tt.expectStdin {
+ // Save original stdin
+ originalStdin := os.Stdin
+ defer func() { os.Stdin = originalStdin }()
+
+ // Create a pipe for stdin
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+ defer w.Close()
+
+ // Replace stdin with our pipe
+ os.Stdin = r
+
+ // Write test data to stdin
+ testData := []byte("stdin test data")
+ go func() {
+ defer w.Close()
+ w.Write(testData)
+ }()
+
+ // Test the function
+ got, err := readFile(actualFilePath, tt.providers)
+ if err != nil {
+ t.Errorf("readFile() error = %v, expected no error for stdin", err)
+ return
+ }
+
+ if !bytes.Equal(got, testData) {
+ t.Errorf("readFile() = %v, want %v", got, testData)
+ }
+ return
+ }
+
+ // Regular test cases
+ got, err := readFile(actualFilePath, tt.providers)
+ if (err != nil) != tt.expectError {
+ t.Errorf("readFile() error = %v, expectError %v", err, tt.expectError)
+ return
+ }
+
+ if !tt.expectError && tt.expectedData != nil {
+ if !bytes.Equal(got, tt.expectedData) {
+ t.Errorf("readFile() = %v, want %v", got, tt.expectedData)
+ }
+ }
+ })
+ }
+}
+
+// TestReadFileErrorMessages tests specific error scenarios and their messages
+func TestReadFileErrorMessages(t *testing.T) {
+ tests := []struct {
+ name string
+ filePath string
+ providers getter.Providers
+ wantErr string
+ }{
+ {
+ name: "URL parse error",
+ filePath: "://invalid",
+ providers: getter.Providers{},
+ wantErr: "missing protocol scheme",
+ },
+ {
+ name: "getter error with message",
+ filePath: "http://example.com/file",
+ providers: getter.Providers{mockProvider([]string{"http"}, nil, fmt.Errorf("connection refused"))},
+ wantErr: "connection refused",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := readFile(tt.filePath, tt.providers)
+ if err == nil {
+ t.Errorf("readFile() expected error containing %q, got nil", tt.wantErr)
+ return
+ }
+ if !strings.Contains(err.Error(), tt.wantErr) {
+ t.Errorf("readFile() error = %v, want error containing %q", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+// Original test case - keeping for backward compatibility
+func TestReadFileOriginal(t *testing.T) {
+ var p getter.Providers
+ filePath := "%a.txt"
+ _, err := readFile(filePath, p)
+ if err == nil {
+ t.Errorf("Expected error when has special strings")
+ }
+}
+
+func TestMergeValuesCLI(t *testing.T) {
+ tests := []struct {
+ name string
+ opts Options
+ expected map[string]interface{}
+ wantErr bool
+ }{
+ {
+ name: "set-json object",
+ opts: Options{
+ JSONValues: []string{`{"foo": {"bar": "baz"}}`},
+ },
+ expected: map[string]interface{}{
+ "foo": map[string]interface{}{
+ "bar": "baz",
+ },
+ },
+ },
+ {
+ name: "set-json key=value",
+ opts: Options{
+ JSONValues: []string{"foo.bar=[1,2,3]"},
+ },
+ expected: map[string]interface{}{
+ "foo": map[string]interface{}{
+ "bar": []interface{}{1.0, 2.0, 3.0},
+ },
+ },
+ },
+ {
+ name: "set regular value",
+ opts: Options{
+ Values: []string{"foo=bar"},
+ },
+ expected: map[string]interface{}{
+ "foo": "bar",
+ },
+ },
+ {
+ name: "set string value",
+ opts: Options{
+ StringValues: []string{"foo=123"},
+ },
+ expected: map[string]interface{}{
+ "foo": "123",
+ },
+ },
+ {
+ name: "set literal value",
+ opts: Options{
+ LiteralValues: []string{"foo=true"},
+ },
+ expected: map[string]interface{}{
+ "foo": "true",
+ },
+ },
+ {
+ name: "multiple options",
+ opts: Options{
+ Values: []string{"a=foo"},
+ StringValues: []string{"b=bar"},
+ JSONValues: []string{`{"c": "foo1"}`},
+ LiteralValues: []string{"d=bar1"},
+ },
+ expected: map[string]interface{}{
+ "a": "foo",
+ "b": "bar",
+ "c": "foo1",
+ "d": "bar1",
+ },
+ },
+ {
+ name: "invalid json",
+ opts: Options{
+ JSONValues: []string{`{invalid`},
+ },
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := tt.opts.MergeValues(getter.Providers{})
+ if (err != nil) != tt.wantErr {
+ t.Errorf("MergeValues() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !tt.wantErr && !reflect.DeepEqual(got, tt.expected) {
+ t.Errorf("MergeValues() = %v, want %v", got, tt.expected)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/cmd/completion.go b/helm/pkg/cmd/completion.go
new file mode 100644
index 000000000..6f6dbd25d
--- /dev/null
+++ b/helm/pkg/cmd/completion.go
@@ -0,0 +1,223 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const completionDesc = `
+Generate autocompletion scripts for Helm for the specified shell.
+`
+const bashCompDesc = `
+Generate the autocompletion script for Helm for the bash shell.
+
+To load completions in your current shell session:
+
+ source <(helm completion bash)
+
+To load completions for every new session, execute once:
+- Linux:
+
+ helm completion bash > /etc/bash_completion.d/helm
+
+- MacOS:
+
+ helm completion bash > /usr/local/etc/bash_completion.d/helm
+`
+
+const zshCompDesc = `
+Generate the autocompletion script for Helm for the zsh shell.
+
+To load completions in your current shell session:
+
+ source <(helm completion zsh)
+
+To load completions for every new session, execute once:
+
+ helm completion zsh > "${fpath[1]}/_helm"
+`
+
+const fishCompDesc = `
+Generate the autocompletion script for Helm for the fish shell.
+
+To load completions in your current shell session:
+
+ helm completion fish | source
+
+To load completions for every new session, execute once:
+
+ helm completion fish > ~/.config/fish/completions/helm.fish
+
+You will need to start a new shell for this setup to take effect.
+`
+
+const powershellCompDesc = `
+Generate the autocompletion script for powershell.
+
+To load completions in your current shell session:
+PS C:\> helm completion powershell | Out-String | Invoke-Expression
+
+To load completions for every new session, add the output of the above command
+to your powershell profile.
+`
+
+const (
+ noDescFlagName = "no-descriptions"
+ noDescFlagText = "disable completion descriptions"
+)
+
+var disableCompDescriptions bool
+
+func newCompletionCmd(out io.Writer) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "completion",
+ Short: "generate autocompletion scripts for the specified shell",
+ Long: completionDesc,
+ Args: require.NoArgs,
+ }
+
+ bash := &cobra.Command{
+ Use: "bash",
+ Short: "generate autocompletion script for bash",
+ Long: bashCompDesc,
+ Args: require.NoArgs,
+ ValidArgsFunction: noMoreArgsCompFunc,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ return runCompletionBash(out, cmd)
+ },
+ }
+ bash.Flags().BoolVar(&disableCompDescriptions, noDescFlagName, false, noDescFlagText)
+
+ zsh := &cobra.Command{
+ Use: "zsh",
+ Short: "generate autocompletion script for zsh",
+ Long: zshCompDesc,
+ Args: require.NoArgs,
+ ValidArgsFunction: noMoreArgsCompFunc,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ return runCompletionZsh(out, cmd)
+ },
+ }
+ zsh.Flags().BoolVar(&disableCompDescriptions, noDescFlagName, false, noDescFlagText)
+
+ fish := &cobra.Command{
+ Use: "fish",
+ Short: "generate autocompletion script for fish",
+ Long: fishCompDesc,
+ Args: require.NoArgs,
+ ValidArgsFunction: noMoreArgsCompFunc,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ return runCompletionFish(out, cmd)
+ },
+ }
+ fish.Flags().BoolVar(&disableCompDescriptions, noDescFlagName, false, noDescFlagText)
+
+ powershell := &cobra.Command{
+ Use: "powershell",
+ Short: "generate autocompletion script for powershell",
+ Long: powershellCompDesc,
+ Args: require.NoArgs,
+ ValidArgsFunction: noMoreArgsCompFunc,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ return runCompletionPowershell(out, cmd)
+ },
+ }
+ powershell.Flags().BoolVar(&disableCompDescriptions, noDescFlagName, false, noDescFlagText)
+
+ cmd.AddCommand(bash, zsh, fish, powershell)
+
+ return cmd
+}
+
+func runCompletionBash(out io.Writer, cmd *cobra.Command) error {
+ err := cmd.Root().GenBashCompletionV2(out, !disableCompDescriptions)
+
+ // In case the user renamed the helm binary (e.g., to be able to run
+ // both helm2 and helm3), we hook the new binary name to the completion function
+ if binary := filepath.Base(os.Args[0]); binary != "helm" {
+ renamedBinaryHook := `
+# Hook the command used to generate the completion script
+# to the helm completion function to handle the case where
+# the user renamed the helm binary
+if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_helm %[1]s
+else
+ complete -o default -o nospace -F __start_helm %[1]s
+fi
+`
+ fmt.Fprintf(out, renamedBinaryHook, binary)
+ }
+
+ return err
+}
+
+func runCompletionZsh(out io.Writer, cmd *cobra.Command) error {
+ var err error
+ if disableCompDescriptions {
+ err = cmd.Root().GenZshCompletionNoDesc(out)
+ } else {
+ err = cmd.Root().GenZshCompletion(out)
+ }
+
+ // In case the user renamed the helm binary (e.g., to be able to run
+ // both helm2 and helm3), we hook the new binary name to the completion function
+ if binary := filepath.Base(os.Args[0]); binary != "helm" {
+ renamedBinaryHook := `
+# Hook the command used to generate the completion script
+# to the helm completion function to handle the case where
+# the user renamed the helm binary
+compdef _helm %[1]s
+`
+ fmt.Fprintf(out, renamedBinaryHook, binary)
+ }
+
+ // Cobra doesn't source zsh completion file, explicitly doing it here
+ fmt.Fprintf(out, "compdef _helm helm")
+
+ return err
+}
+
+func runCompletionFish(out io.Writer, cmd *cobra.Command) error {
+ return cmd.Root().GenFishCompletion(out, !disableCompDescriptions)
+}
+
+func runCompletionPowershell(out io.Writer, cmd *cobra.Command) error {
+ if disableCompDescriptions {
+ return cmd.Root().GenPowerShellCompletion(out)
+ }
+ return cmd.Root().GenPowerShellCompletionWithDesc(out)
+}
+
+// noMoreArgsCompFunc deactivates file completion when doing argument shell completion.
+// It also provides some ActiveHelp to indicate no more arguments are accepted.
+func noMoreArgsCompFunc(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return noMoreArgsComp()
+}
+
+// noMoreArgsComp deactivates file completion when doing argument shell completion.
+// It also provides some ActiveHelp to indicate no more arguments are accepted.
+func noMoreArgsComp() ([]string, cobra.ShellCompDirective) {
+ activeHelpMsg := "This command does not take any more arguments (but may accept flags)."
+ return cobra.AppendActiveHelp(nil, activeHelpMsg), cobra.ShellCompDirectiveNoFileComp
+}
diff --git a/helm/pkg/cmd/completion_test.go b/helm/pkg/cmd/completion_test.go
new file mode 100644
index 000000000..81c1ee2ad
--- /dev/null
+++ b/helm/pkg/cmd/completion_test.go
@@ -0,0 +1,96 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+// Check if file completion should be performed according to parameter 'shouldBePerformed'
+func checkFileCompletion(t *testing.T, cmdName string, shouldBePerformed bool) {
+ t.Helper()
+ storage := storageFixture()
+ storage.Create(&release.Release{
+ Name: "myrelease",
+ Info: &release.Info{Status: common.StatusDeployed},
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "Myrelease-Chart",
+ Version: "1.2.3",
+ },
+ },
+ Version: 1,
+ })
+
+ testcmd := fmt.Sprintf("__complete %s ''", cmdName)
+ _, out, err := executeActionCommandC(storage, testcmd)
+ if err != nil {
+ t.Errorf("unexpected error, %s", err)
+ }
+ if !strings.Contains(out, "ShellCompDirectiveNoFileComp") != shouldBePerformed {
+ if shouldBePerformed {
+ t.Errorf("Unexpected directive ShellCompDirectiveNoFileComp when completing '%s'", cmdName)
+ } else {
+
+ t.Errorf("Did not receive directive ShellCompDirectiveNoFileComp when completing '%s'", cmdName)
+ }
+ t.Log(out)
+ }
+}
+
+func TestCompletionFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "completion", false)
+ checkFileCompletion(t, "completion bash", false)
+ checkFileCompletion(t, "completion zsh", false)
+ checkFileCompletion(t, "completion fish", false)
+}
+
+func checkReleaseCompletion(t *testing.T, cmdName string, multiReleasesAllowed bool) {
+ t.Helper()
+ multiReleaseTestGolden := "output/empty_nofile_comp.txt"
+ if multiReleasesAllowed {
+ multiReleaseTestGolden = "output/release_list_repeat_comp.txt"
+ }
+ tests := []cmdTestCase{{
+ name: "completion for uninstall",
+ cmd: fmt.Sprintf("__complete %s ''", cmdName),
+ golden: "output/release_list_comp.txt",
+ rels: []*release.Release{
+ release.Mock(&release.MockReleaseOptions{Name: "athos"}),
+ release.Mock(&release.MockReleaseOptions{Name: "porthos"}),
+ release.Mock(&release.MockReleaseOptions{Name: "aramis"}),
+ },
+ }, {
+ name: "completion for uninstall repetition",
+ cmd: fmt.Sprintf("__complete %s porthos ''", cmdName),
+ golden: multiReleaseTestGolden,
+ rels: []*release.Release{
+ release.Mock(&release.MockReleaseOptions{Name: "athos"}),
+ release.Mock(&release.MockReleaseOptions{Name: "porthos"}),
+ release.Mock(&release.MockReleaseOptions{Name: "aramis"}),
+ },
+ }}
+ for _, test := range tests {
+ runTestCmd(t, []cmdTestCase{test})
+ }
+}
diff --git a/helm/pkg/cmd/create.go b/helm/pkg/cmd/create.go
new file mode 100644
index 000000000..435c8ca82
--- /dev/null
+++ b/helm/pkg/cmd/create.go
@@ -0,0 +1,113 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+const createDesc = `
+This command creates a chart directory along with the common files and
+directories used in a chart.
+
+For example, 'helm create foo' will create a directory structure that looks
+something like this:
+
+ foo/
+ ├── .helmignore # Contains patterns to ignore when packaging Helm charts.
+ ├── Chart.yaml # Information about your chart
+ ├── values.yaml # The default values for your templates
+ ├── charts/ # Charts that this chart depends on
+ └── templates/ # The template files
+ └── tests/ # The test files
+
+'helm create' takes a path for an argument. If directories in the given path
+do not exist, Helm will attempt to create them as it goes. If the given
+destination exists and there are files in that directory, conflicting files
+will be overwritten, but other files will be left alone.
+`
+
+type createOptions struct {
+ starter string // --starter
+ name string
+ starterDir string
+}
+
+func newCreateCmd(out io.Writer) *cobra.Command {
+ o := &createOptions{}
+
+ cmd := &cobra.Command{
+ Use: "create NAME",
+ Short: "create a new chart with the given name",
+ Long: createDesc,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 0 {
+ // Allow file completion when completing the argument for the name
+ // which could be a path
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ // No more completions, so disable file completion
+ return noMoreArgsComp()
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ o.name = args[0]
+ o.starterDir = helmpath.DataPath("starters")
+ return o.run(out)
+ },
+ }
+
+ cmd.Flags().StringVarP(&o.starter, "starter", "p", "", "the name or absolute path to Helm starter scaffold")
+ return cmd
+}
+
+func (o *createOptions) run(out io.Writer) error {
+ fmt.Fprintf(out, "Creating %s\n", o.name)
+
+ chartname := filepath.Base(o.name)
+ cfile := &chart.Metadata{
+ Name: chartname,
+ Description: "A Helm chart for Kubernetes",
+ Type: "application",
+ Version: "0.1.0",
+ AppVersion: "0.1.0",
+ APIVersion: chart.APIVersionV2,
+ }
+
+ if o.starter != "" {
+ // Create from the starter
+ lstarter := filepath.Join(o.starterDir, o.starter)
+ // If path is absolute, we don't want to prefix it with helm starters folder
+ if filepath.IsAbs(o.starter) {
+ lstarter = o.starter
+ }
+ return chartutil.CreateFrom(cfile, filepath.Dir(o.name), lstarter)
+ }
+
+ chartutil.Stderr = out
+ _, err := chartutil.Create(chartname, filepath.Dir(o.name))
+ return err
+}
diff --git a/helm/pkg/cmd/create_test.go b/helm/pkg/cmd/create_test.go
new file mode 100644
index 000000000..90ed90eff
--- /dev/null
+++ b/helm/pkg/cmd/create_test.go
@@ -0,0 +1,192 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+func TestCreateCmd(t *testing.T) {
+ t.Chdir(t.TempDir())
+ ensure.HelmHome(t)
+ cname := "testchart"
+
+ // Run a create
+ if _, _, err := executeActionCommand("create " + cname); err != nil {
+ t.Fatalf("Failed to run create: %s", err)
+ }
+
+ // Test that the chart is there
+ if fi, err := os.Stat(cname); err != nil {
+ t.Fatalf("no chart directory: %s", err)
+ } else if !fi.IsDir() {
+ t.Fatalf("chart is not directory")
+ }
+
+ c, err := loader.LoadDir(cname)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if c.Name() != cname {
+ t.Errorf("Expected %q name, got %q", cname, c.Name())
+ }
+ if c.Metadata.APIVersion != chart.APIVersionV2 {
+ t.Errorf("Wrong API version: %q", c.Metadata.APIVersion)
+ }
+}
+
+func TestCreateStarterCmd(t *testing.T) {
+ t.Chdir(t.TempDir())
+ ensure.HelmHome(t)
+ cname := "testchart"
+ defer resetEnv()()
+ // Create a starter.
+ starterchart := helmpath.DataPath("starters")
+ os.MkdirAll(starterchart, 0o755)
+ if dest, err := chartutil.Create("starterchart", starterchart); err != nil {
+ t.Fatalf("Could not create chart: %s", err)
+ } else {
+ t.Logf("Created %s", dest)
+ }
+ tplpath := filepath.Join(starterchart, "starterchart", "templates", "foo.tpl")
+ if err := os.WriteFile(tplpath, []byte("test"), 0o644); err != nil {
+ t.Fatalf("Could not write template: %s", err)
+ }
+
+ // Run a create
+ if _, _, err := executeActionCommand(fmt.Sprintf("create --starter=starterchart %s", cname)); err != nil {
+ t.Errorf("Failed to run create: %s", err)
+ return
+ }
+
+ // Test that the chart is there
+ if fi, err := os.Stat(cname); err != nil {
+ t.Fatalf("no chart directory: %s", err)
+ } else if !fi.IsDir() {
+ t.Fatalf("chart is not directory")
+ }
+
+ c, err := loader.LoadDir(cname)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if c.Name() != cname {
+ t.Errorf("Expected %q name, got %q", cname, c.Name())
+ }
+ if c.Metadata.APIVersion != chart.APIVersionV2 {
+ t.Errorf("Wrong API version: %q", c.Metadata.APIVersion)
+ }
+
+ expectedNumberOfTemplates := 10
+ if l := len(c.Templates); l != expectedNumberOfTemplates {
+ t.Errorf("Expected %d templates, got %d", expectedNumberOfTemplates, l)
+ }
+
+ found := false
+ for _, tpl := range c.Templates {
+ if tpl.Name == "templates/foo.tpl" {
+ found = true
+ if data := string(tpl.Data); data != "test" {
+ t.Errorf("Expected template 'test', got %q", data)
+ }
+ }
+ }
+ if !found {
+ t.Error("Did not find foo.tpl")
+ }
+}
+
+func TestCreateStarterAbsoluteCmd(t *testing.T) {
+ t.Chdir(t.TempDir())
+ defer resetEnv()()
+ ensure.HelmHome(t)
+ cname := "testchart"
+
+ // Create a starter.
+ starterchart := helmpath.DataPath("starters")
+ os.MkdirAll(starterchart, 0o755)
+ if dest, err := chartutil.Create("starterchart", starterchart); err != nil {
+ t.Fatalf("Could not create chart: %s", err)
+ } else {
+ t.Logf("Created %s", dest)
+ }
+ tplpath := filepath.Join(starterchart, "starterchart", "templates", "foo.tpl")
+ if err := os.WriteFile(tplpath, []byte("test"), 0o644); err != nil {
+ t.Fatalf("Could not write template: %s", err)
+ }
+
+ starterChartPath := filepath.Join(starterchart, "starterchart")
+
+ // Run a create
+ if _, _, err := executeActionCommand(fmt.Sprintf("create --starter=%s %s", starterChartPath, cname)); err != nil {
+ t.Errorf("Failed to run create: %s", err)
+ return
+ }
+
+ // Test that the chart is there
+ if fi, err := os.Stat(cname); err != nil {
+ t.Fatalf("no chart directory: %s", err)
+ } else if !fi.IsDir() {
+ t.Fatalf("chart is not directory")
+ }
+
+ c, err := loader.LoadDir(cname)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if c.Name() != cname {
+ t.Errorf("Expected %q name, got %q", cname, c.Name())
+ }
+ if c.Metadata.APIVersion != chart.APIVersionV2 {
+ t.Errorf("Wrong API version: %q", c.Metadata.APIVersion)
+ }
+
+ expectedNumberOfTemplates := 10
+ if l := len(c.Templates); l != expectedNumberOfTemplates {
+ t.Errorf("Expected %d templates, got %d", expectedNumberOfTemplates, l)
+ }
+
+ found := false
+ for _, tpl := range c.Templates {
+ if tpl.Name == "templates/foo.tpl" {
+ found = true
+ if data := string(tpl.Data); data != "test" {
+ t.Errorf("Expected template 'test', got %q", data)
+ }
+ }
+ }
+ if !found {
+ t.Error("Did not find foo.tpl")
+ }
+}
+
+func TestCreateFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "create", true)
+ checkFileCompletion(t, "create myname", false)
+}
diff --git a/helm/pkg/cmd/dependency.go b/helm/pkg/cmd/dependency.go
new file mode 100644
index 000000000..5978c902a
--- /dev/null
+++ b/helm/pkg/cmd/dependency.go
@@ -0,0 +1,136 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "io"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const dependencyDesc = `
+Manage the dependencies of a chart.
+
+Helm charts store their dependencies in 'charts/'. For chart developers, it is
+often easier to manage dependencies in 'Chart.yaml' which declares all
+dependencies.
+
+The dependency commands operate on that file, making it easy to synchronize
+between the desired dependencies and the actual dependencies stored in the
+'charts/' directory.
+
+For example, this Chart.yaml declares two dependencies:
+
+ # Chart.yaml
+ dependencies:
+ - name: nginx
+ version: "1.2.3"
+ repository: "https://example.com/charts"
+ - name: memcached
+ version: "3.2.1"
+ repository: "https://another.example.com/charts"
+
+
+The 'name' should be the name of a chart, where that name must match the name
+in that chart's 'Chart.yaml' file.
+
+The 'version' field should contain a semantic version or version range.
+
+The 'repository' URL should point to a Chart Repository. Helm expects that by
+appending '/index.yaml' to the URL, it should be able to retrieve the chart
+repository's index. Note: 'repository' can be an alias. The alias must start
+with 'alias:' or '@'.
+
+Starting from 2.2.0, repository can be defined as the path to the directory of
+the dependency charts stored locally. The path should start with a prefix of
+"file://". For example,
+
+ # Chart.yaml
+ dependencies:
+ - name: nginx
+ version: "1.2.3"
+ repository: "file://../dependency_chart/nginx"
+
+If the dependency chart is retrieved locally, it is not required to have the
+repository added to helm by "helm add repo". Version matching is also supported
+for this case.
+`
+
+const dependencyListDesc = `
+List all of the dependencies declared in a chart.
+
+This can take chart archives and chart directories as input. It will not alter
+the contents of a chart.
+
+This will produce an error if the chart cannot be loaded.
+`
+
+func newDependencyCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "dependency update|build|list",
+ Aliases: []string{"dep", "dependencies"},
+ Short: "manage a chart's dependencies",
+ Long: dependencyDesc,
+ Args: require.NoArgs,
+ }
+
+ cmd.AddCommand(newDependencyListCmd(out))
+ cmd.AddCommand(newDependencyUpdateCmd(cfg, out))
+ cmd.AddCommand(newDependencyBuildCmd(out))
+
+ return cmd
+}
+
+func newDependencyListCmd(out io.Writer) *cobra.Command {
+ client := action.NewDependency()
+ cmd := &cobra.Command{
+ Use: "list CHART",
+ Aliases: []string{"ls"},
+ Short: "list the dependencies for the given chart",
+ Long: dependencyListDesc,
+ Args: require.MaximumNArgs(1),
+ RunE: func(_ *cobra.Command, args []string) error {
+ chartpath := "."
+ if len(args) > 0 {
+ chartpath = filepath.Clean(args[0])
+ }
+ return client.List(chartpath, out)
+ },
+ }
+
+ f := cmd.Flags()
+
+ f.UintVar(&client.ColumnWidth, "max-col-width", 80, "maximum column width for output table")
+ return cmd
+}
+
+func addDependencySubcommandFlags(f *pflag.FlagSet, client *action.Dependency) {
+ f.BoolVar(&client.Verify, "verify", false, "verify the packages against signatures")
+ f.StringVar(&client.Keyring, "keyring", defaultKeyring(), "keyring containing public keys")
+ f.BoolVar(&client.SkipRefresh, "skip-refresh", false, "do not refresh the local repository cache")
+ f.StringVar(&client.Username, "username", "", "chart repository username where to locate the requested chart")
+ f.StringVar(&client.Password, "password", "", "chart repository password where to locate the requested chart")
+ f.StringVar(&client.CertFile, "cert-file", "", "identify HTTPS client using this SSL certificate file")
+ f.StringVar(&client.KeyFile, "key-file", "", "identify HTTPS client using this SSL key file")
+ f.BoolVar(&client.InsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the chart download")
+ f.BoolVar(&client.PlainHTTP, "plain-http", false, "use insecure HTTP connections for the chart download")
+ f.StringVar(&client.CaFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
+}
diff --git a/helm/pkg/cmd/dependency_build.go b/helm/pkg/cmd/dependency_build.go
new file mode 100644
index 000000000..7e5c731b7
--- /dev/null
+++ b/helm/pkg/cmd/dependency_build.go
@@ -0,0 +1,98 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+ "k8s.io/client-go/util/homedir"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/downloader"
+ "helm.sh/helm/v4/pkg/getter"
+)
+
+const dependencyBuildDesc = `
+Build out the charts/ directory from the Chart.lock file.
+
+Build is used to reconstruct a chart's dependencies to the state specified in
+the lock file. This will not re-negotiate dependencies, as 'helm dependency update'
+does.
+
+If no lock file is found, 'helm dependency build' will mirror the behavior
+of 'helm dependency update'.
+`
+
+func newDependencyBuildCmd(out io.Writer) *cobra.Command {
+ client := action.NewDependency()
+
+ cmd := &cobra.Command{
+ Use: "build CHART",
+ Short: "rebuild the charts/ directory based on the Chart.lock file",
+ Long: dependencyBuildDesc,
+ Args: require.MaximumNArgs(1),
+ RunE: func(_ *cobra.Command, args []string) error {
+ chartpath := "."
+ if len(args) > 0 {
+ chartpath = filepath.Clean(args[0])
+ }
+ registryClient, err := newRegistryClient(client.CertFile, client.KeyFile, client.CaFile,
+ client.InsecureSkipTLSVerify, client.PlainHTTP, client.Username, client.Password)
+ if err != nil {
+ return fmt.Errorf("missing registry client: %w", err)
+ }
+
+ man := &downloader.Manager{
+ Out: out,
+ ChartPath: chartpath,
+ Keyring: client.Keyring,
+ SkipUpdate: client.SkipRefresh,
+ Getters: getter.All(settings),
+ RegistryClient: registryClient,
+ RepositoryConfig: settings.RepositoryConfig,
+ RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
+ Debug: settings.Debug,
+ }
+ if client.Verify {
+ man.Verify = downloader.VerifyIfPossible
+ }
+ err = man.Build()
+ if e, ok := err.(downloader.ErrRepoNotFound); ok {
+ return fmt.Errorf("%s. Please add the missing repos via 'helm repo add'", e.Error())
+ }
+ return err
+ },
+ }
+
+ f := cmd.Flags()
+ addDependencySubcommandFlags(f, client)
+
+ return cmd
+}
+
+// defaultKeyring returns the expanded path to the default keyring.
+func defaultKeyring() string {
+ if v, ok := os.LookupEnv("GNUPGHOME"); ok {
+ return filepath.Join(v, "pubring.gpg")
+ }
+ return filepath.Join(homedir.HomeDir(), ".gnupg", "pubring.gpg")
+}
diff --git a/helm/pkg/cmd/dependency_build_test.go b/helm/pkg/cmd/dependency_build_test.go
new file mode 100644
index 000000000..a3473301d
--- /dev/null
+++ b/helm/pkg/cmd/dependency_build_test.go
@@ -0,0 +1,164 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/provenance"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
+)
+
+func TestDependencyBuildCmd(t *testing.T) {
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testcharts/*.tgz"),
+ )
+ defer srv.Stop()
+
+ rootDir := srv.Root()
+ srv.LinkIndices()
+
+ ociSrv, err := repotest.NewOCIServer(t, srv.Root())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ociChartName := "oci-depending-chart"
+ c := createTestingMetadataForOCI(ociChartName, ociSrv.RegistryURL)
+ if _, err := chartutil.Save(c, ociSrv.Dir); err != nil {
+ t.Fatal(err)
+ }
+ ociSrv.Run(t, repotest.WithDependingChart(c))
+
+ dir := func(p ...string) string {
+ return filepath.Join(append([]string{srv.Root()}, p...)...)
+ }
+
+ chartname := "depbuild"
+ createTestingChart(t, rootDir, chartname, srv.URL())
+ repoFile := filepath.Join(rootDir, "repositories.yaml")
+
+ cmd := fmt.Sprintf("dependency build '%s' --repository-config %s --repository-cache %s --plain-http", filepath.Join(rootDir, chartname), repoFile, rootDir)
+ _, out, err := executeActionCommand(cmd)
+
+ // In the first pass, we basically want the same results as an update.
+ if err != nil {
+ t.Logf("Output: %s", out)
+ t.Fatal(err)
+ }
+
+ if !strings.Contains(out, `update from the "test" chart repository`) {
+ t.Errorf("Repo did not get updated\n%s", out)
+ }
+
+ // Make sure the actual file got downloaded.
+ expect := filepath.Join(rootDir, chartname, "charts/reqtest-0.1.0.tgz")
+ if _, err := os.Stat(expect); err != nil {
+ t.Fatal(err)
+ }
+
+ // In the second pass, we want to remove the chart's request dependency,
+ // then see if it restores from the lock.
+ lockfile := filepath.Join(rootDir, chartname, "Chart.lock")
+ if _, err := os.Stat(lockfile); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.RemoveAll(expect); err != nil {
+ t.Fatal(err)
+ }
+
+ _, out, err = executeActionCommand(cmd)
+ if err != nil {
+ t.Logf("Output: %s", out)
+ t.Fatal(err)
+ }
+
+ // Now repeat the test that the dependency exists.
+ if _, err := os.Stat(expect); err != nil {
+ t.Fatal(err)
+ }
+
+ // Make sure that build is also fetching the correct version.
+ hash, err := provenance.DigestFile(expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ i, err := repo.LoadIndexFile(filepath.Join(rootDir, "index.yaml"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ reqver := i.Entries["reqtest"][0]
+ if h := reqver.Digest; h != hash {
+ t.Errorf("Failed hash match: expected %s, got %s", hash, h)
+ }
+ if v := reqver.Version; v != "0.1.0" {
+ t.Errorf("mismatched versions. Expected %q, got %q", "0.1.0", v)
+ }
+
+ skipRefreshCmd := fmt.Sprintf("dependency build '%s' --skip-refresh --repository-config %s --repository-cache %s --plain-http", filepath.Join(rootDir, chartname), repoFile, rootDir)
+ _, out, err = executeActionCommand(skipRefreshCmd)
+
+ // In this pass, we check --skip-refresh option becomes effective.
+ if err != nil {
+ t.Logf("Output: %s", out)
+ t.Fatal(err)
+ }
+
+ if strings.Contains(out, `update from the "test" chart repository`) {
+ t.Errorf("Repo did get updated\n%s", out)
+ }
+
+ // OCI dependencies
+ if err := chartutil.SaveDir(c, dir()); err != nil {
+ t.Fatal(err)
+ }
+ cmd = fmt.Sprintf("dependency build '%s' --repository-config %s --repository-cache %s --registry-config %s/config.json --plain-http",
+ dir(ociChartName),
+ dir("repositories.yaml"),
+ dir(),
+ dir())
+ _, out, err = executeActionCommand(cmd)
+ if err != nil {
+ t.Logf("Output: %s", out)
+ t.Fatal(err)
+ }
+ expect = dir(ociChartName, "charts/oci-dependent-chart-0.1.0.tgz")
+ if _, err := os.Stat(expect); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDependencyBuildCmdWithHelmV2Hash(t *testing.T) {
+ chartName := "testdata/testcharts/issue-7233"
+
+ cmd := fmt.Sprintf("dependency build '%s'", chartName)
+ _, out, err := executeActionCommand(cmd)
+
+ // Want to make sure the build can verify Helm v2 hash
+ if err != nil {
+ t.Logf("Output: %s", out)
+ t.Fatal(err)
+ }
+}
diff --git a/helm/pkg/cmd/dependency_test.go b/helm/pkg/cmd/dependency_test.go
new file mode 100644
index 000000000..d6bcebf1b
--- /dev/null
+++ b/helm/pkg/cmd/dependency_test.go
@@ -0,0 +1,57 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "runtime"
+ "testing"
+)
+
+func TestDependencyListCmd(t *testing.T) {
+ noSuchChart := cmdTestCase{
+ name: "No such chart",
+ cmd: "dependency list /no/such/chart",
+ golden: "output/dependency-list-no-chart-linux.txt",
+ wantError: true,
+ }
+
+ noDependencies := cmdTestCase{
+ name: "No dependencies",
+ cmd: "dependency list testdata/testcharts/alpine",
+ golden: "output/dependency-list-no-requirements-linux.txt",
+ }
+
+ if runtime.GOOS == "windows" {
+ noSuchChart.golden = "output/dependency-list-no-chart-windows.txt"
+ noDependencies.golden = "output/dependency-list-no-requirements-windows.txt"
+ }
+
+ tests := []cmdTestCase{noSuchChart,
+ noDependencies, {
+ name: "Dependencies in chart dir",
+ cmd: "dependency list testdata/testcharts/reqtest",
+ golden: "output/dependency-list.txt",
+ }, {
+ name: "Dependencies in chart archive",
+ cmd: "dependency list testdata/testcharts/reqtest-0.1.0.tgz",
+ golden: "output/dependency-list-archive.txt",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestDependencyFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "dependency", false)
+}
diff --git a/helm/pkg/cmd/dependency_update.go b/helm/pkg/cmd/dependency_update.go
new file mode 100644
index 000000000..7f805c37b
--- /dev/null
+++ b/helm/pkg/cmd/dependency_update.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/downloader"
+ "helm.sh/helm/v4/pkg/getter"
+)
+
+const dependencyUpDesc = `
+Update the on-disk dependencies to mirror Chart.yaml.
+
+This command verifies that the required charts, as expressed in 'Chart.yaml',
+are present in 'charts/' and are at an acceptable version. It will pull down
+the latest charts that satisfy the dependencies, and clean up old dependencies.
+
+On successful update, this will generate a lock file that can be used to
+rebuild the dependencies to an exact version.
+
+Dependencies are not required to be represented in 'Chart.yaml'. For that
+reason, an update command will not remove charts unless they are (a) present
+in the Chart.yaml file, but (b) at the wrong version.
+`
+
+// newDependencyUpdateCmd creates a new dependency update command.
+func newDependencyUpdateCmd(_ *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewDependency()
+
+ cmd := &cobra.Command{
+ Use: "update CHART",
+ Aliases: []string{"up"},
+ Short: "update charts/ based on the contents of Chart.yaml",
+ Long: dependencyUpDesc,
+ Args: require.MaximumNArgs(1),
+ RunE: func(_ *cobra.Command, args []string) error {
+ chartpath := "."
+ if len(args) > 0 {
+ chartpath = filepath.Clean(args[0])
+ }
+ registryClient, err := newRegistryClient(client.CertFile, client.KeyFile, client.CaFile,
+ client.InsecureSkipTLSVerify, client.PlainHTTP, client.Username, client.Password)
+ if err != nil {
+ return fmt.Errorf("missing registry client: %w", err)
+ }
+
+ man := &downloader.Manager{
+ Out: out,
+ ChartPath: chartpath,
+ Keyring: client.Keyring,
+ SkipUpdate: client.SkipRefresh,
+ Getters: getter.All(settings),
+ RegistryClient: registryClient,
+ RepositoryConfig: settings.RepositoryConfig,
+ RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
+ Debug: settings.Debug,
+ }
+ if client.Verify {
+ man.Verify = downloader.VerifyAlways
+ }
+ return man.Update()
+ },
+ }
+
+ f := cmd.Flags()
+ addDependencySubcommandFlags(f, client)
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/dependency_update_test.go b/helm/pkg/cmd/dependency_update_test.go
new file mode 100644
index 000000000..3eaa51df1
--- /dev/null
+++ b/helm/pkg/cmd/dependency_update_test.go
@@ -0,0 +1,312 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/provenance"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
+)
+
+func TestDependencyUpdateCmd(t *testing.T) {
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testcharts/*.tgz"),
+ )
+ defer srv.Stop()
+ t.Logf("Listening on directory %s", srv.Root())
+
+ ociSrv, err := repotest.NewOCIServer(t, srv.Root())
+ if err != nil {
+ t.Fatal(err)
+ }
+ contentCache := t.TempDir()
+
+ ociChartName := "oci-depending-chart"
+ c := createTestingMetadataForOCI(ociChartName, ociSrv.RegistryURL)
+ if _, err := chartutil.Save(c, ociSrv.Dir); err != nil {
+ t.Fatal(err)
+ }
+ ociSrv.Run(t, repotest.WithDependingChart(c))
+
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ dir := func(p ...string) string {
+ return filepath.Join(append([]string{srv.Root()}, p...)...)
+ }
+
+ chartname := "depup"
+ ch := createTestingMetadata(chartname, srv.URL())
+ md := ch.Metadata
+ if err := chartutil.SaveDir(ch, dir()); err != nil {
+ t.Fatal(err)
+ }
+
+ _, out, err := executeActionCommand(
+ fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --content-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir(), contentCache),
+ )
+ if err != nil {
+ t.Logf("Output: %s", out)
+ t.Fatal(err)
+ }
+
+ // This is written directly to stdout, so we have to capture as is.
+ if !strings.Contains(out, `update from the "test" chart repository`) {
+ t.Errorf("Repo did not get updated\n%s", out)
+ }
+
+ // Make sure the actual file got downloaded.
+ expect := dir(chartname, "charts/reqtest-0.1.0.tgz")
+ if _, err := os.Stat(expect); err != nil {
+ t.Fatal(err)
+ }
+
+ hash, err := provenance.DigestFile(expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ i, err := repo.LoadIndexFile(dir(helmpath.CacheIndexFile("test")))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ reqver := i.Entries["reqtest"][0]
+ if h := reqver.Digest; h != hash {
+ t.Errorf("Failed hash match: expected %s, got %s", hash, h)
+ }
+
+ // Now change the dependencies and update. This verifies that on update,
+ // old dependencies are cleansed and new dependencies are added.
+ md.Dependencies = []*chart.Dependency{
+ {Name: "reqtest", Version: "0.1.0", Repository: srv.URL()},
+ {Name: "compressedchart", Version: "0.3.0", Repository: srv.URL()},
+ }
+ if err := chartutil.SaveChartfile(dir(chartname, "Chart.yaml"), md); err != nil {
+ t.Fatal(err)
+ }
+
+ _, out, err = executeActionCommand(fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --content-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir(), contentCache))
+ if err != nil {
+ t.Logf("Output: %s", out)
+ t.Fatal(err)
+ }
+
+ // In this second run, we should see compressedchart-0.3.0.tgz, and not
+ // the 0.1.0 version.
+ expect = dir(chartname, "charts/compressedchart-0.3.0.tgz")
+ if _, err := os.Stat(expect); err != nil {
+ t.Fatalf("Expected %q: %s", expect, err)
+ }
+ unexpected := dir(chartname, "charts/compressedchart-0.1.0.tgz")
+ if _, err := os.Stat(unexpected); err == nil {
+ t.Fatalf("Unexpected %q", unexpected)
+ }
+
+ // test for OCI charts
+ if err := chartutil.SaveDir(c, dir()); err != nil {
+ t.Fatal(err)
+ }
+ cmd := fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --registry-config %s/config.json --content-cache %s --plain-http",
+ dir(ociChartName),
+ dir("repositories.yaml"),
+ dir(),
+ dir(),
+ contentCache)
+ _, out, err = executeActionCommand(cmd)
+ if err != nil {
+ t.Logf("Output: %s", out)
+ t.Fatal(err)
+ }
+ expect = dir(ociChartName, "charts/oci-dependent-chart-0.1.0.tgz")
+ if _, err := os.Stat(expect); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDependencyUpdateCmd_DoNotDeleteOldChartsOnError(t *testing.T) {
+ defer resetEnv()()
+ ensure.HelmHome(t)
+
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testcharts/*.tgz"),
+ )
+ defer srv.Stop()
+ t.Logf("Listening on directory %s", srv.Root())
+
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ chartname := "depupdelete"
+
+ dir := func(p ...string) string {
+ return filepath.Join(append([]string{srv.Root()}, p...)...)
+ }
+ createTestingChart(t, dir(), chartname, srv.URL())
+
+ _, output, err := executeActionCommand(fmt.Sprintf("dependency update %s --repository-config %s --repository-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir()))
+ if err != nil {
+ t.Logf("Output: %s", output)
+ t.Fatal(err)
+ }
+
+ // Chart repo is down
+ srv.Stop()
+ contentCache := t.TempDir()
+
+ _, output, err = executeActionCommand(fmt.Sprintf("dependency update %s --repository-config %s --repository-cache %s --content-cache %s --plain-http", dir(chartname), dir("repositories.yaml"), dir(), contentCache))
+ if err == nil {
+ t.Logf("Output: %s", output)
+ t.Fatal("Expected error, got nil")
+ }
+
+ // Make sure charts dir still has dependencies
+ files, err := os.ReadDir(filepath.Join(dir(chartname), "charts"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ dependencies := []string{"compressedchart-0.1.0.tgz", "reqtest-0.1.0.tgz"}
+
+ if len(dependencies) != len(files) {
+ t.Fatalf("Expected %d chart dependencies, got %d", len(dependencies), len(files))
+ }
+ for index, file := range files {
+ if dependencies[index] != file.Name() {
+ t.Fatalf("Chart dependency %s not matching %s", dependencies[index], file.Name())
+ }
+ }
+
+ // Make sure tmpcharts-x is deleted
+ tmpPath := filepath.Join(dir(chartname), fmt.Sprintf("tmpcharts-%d", os.Getpid()))
+ if _, err := os.Stat(tmpPath); !errors.Is(err, fs.ErrNotExist) {
+ t.Fatalf("tmpcharts dir still exists")
+ }
+}
+
+func TestDependencyUpdateCmd_WithRepoThatWasNotAdded(t *testing.T) {
+ srv := setupMockRepoServer(t)
+ srvForUnmanagedRepo := setupMockRepoServer(t)
+ defer srv.Stop()
+ defer srvForUnmanagedRepo.Stop()
+
+ dir := func(p ...string) string {
+ return filepath.Join(append([]string{srv.Root()}, p...)...)
+ }
+
+ chartname := "depup"
+ ch := createTestingMetadata(chartname, srv.URL())
+ chartDependency := &chart.Dependency{
+ Name: "signtest",
+ Version: "0.1.0",
+ Repository: srvForUnmanagedRepo.URL(),
+ }
+ ch.Metadata.Dependencies = append(ch.Metadata.Dependencies, chartDependency)
+
+ if err := chartutil.SaveDir(ch, dir()); err != nil {
+ t.Fatal(err)
+ }
+
+ contentCache := t.TempDir()
+
+ _, out, err := executeActionCommand(
+ fmt.Sprintf("dependency update '%s' --repository-config %s --repository-cache %s --content-cache %s", dir(chartname),
+ dir("repositories.yaml"), dir(), contentCache),
+ )
+
+ if err != nil {
+ t.Logf("Output: %s", out)
+ t.Fatal(err)
+ }
+
+ // This is written directly to stdout, so we have to capture as is
+ if !strings.Contains(out, `Getting updates for unmanaged Helm repositories...`) {
+ t.Errorf("No ‘unmanaged’ Helm repo used in test chartdependency or it doesn’t cause the creation "+
+ "of an ‘ad hoc’ repo index cache file\n%s", out)
+ }
+}
+
+func setupMockRepoServer(t *testing.T) *repotest.Server {
+ t.Helper()
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testcharts/*.tgz"),
+ )
+
+ t.Logf("Listening on directory %s", srv.Root())
+
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ return srv
+}
+
+// createTestingMetadata creates a basic chart that depends on reqtest-0.1.0
+//
+// The baseURL can be used to point to a particular repository server.
+func createTestingMetadata(name, baseURL string) *chart.Chart {
+ return &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV2,
+ Name: name,
+ Version: "1.2.3",
+ Dependencies: []*chart.Dependency{
+ {Name: "reqtest", Version: "0.1.0", Repository: baseURL},
+ {Name: "compressedchart", Version: "0.1.0", Repository: baseURL},
+ },
+ },
+ }
+}
+
+func createTestingMetadataForOCI(name, registryURL string) *chart.Chart {
+ return &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV2,
+ Name: name,
+ Version: "1.2.3",
+ Dependencies: []*chart.Dependency{
+ {Name: "oci-dependent-chart", Version: "0.1.0", Repository: fmt.Sprintf("oci://%s/u/ocitestuser", registryURL)},
+ },
+ },
+ }
+}
+
+// createTestingChart creates a basic chart that depends on reqtest-0.1.0
+//
+// The baseURL can be used to point to a particular repository server.
+func createTestingChart(t *testing.T, dest, name, baseURL string) {
+ t.Helper()
+ cfile := createTestingMetadata(name, baseURL)
+ if err := chartutil.SaveDir(cfile, dest); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/helm/pkg/cmd/docs.go b/helm/pkg/cmd/docs.go
new file mode 100644
index 000000000..7fae60743
--- /dev/null
+++ b/helm/pkg/cmd/docs.go
@@ -0,0 +1,103 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/cobra/doc"
+ "golang.org/x/text/cases"
+ "golang.org/x/text/language"
+
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const docsDesc = `
+Generate documentation files for Helm.
+
+This command can generate documentation for Helm in the following formats:
+
+- Markdown
+- Man pages
+
+It can also generate bash autocompletions.
+`
+
+type docsOptions struct {
+ dest string
+ docTypeString string
+ topCmd *cobra.Command
+ generateHeaders bool
+}
+
+func newDocsCmd(out io.Writer) *cobra.Command {
+ o := &docsOptions{}
+
+ cmd := &cobra.Command{
+ Use: "docs",
+ Short: "generate documentation as markdown or man pages",
+ Long: docsDesc,
+ Hidden: true,
+ Args: require.NoArgs,
+ ValidArgsFunction: noMoreArgsCompFunc,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ o.topCmd = cmd.Root()
+ return o.run(out)
+ },
+ }
+
+ f := cmd.Flags()
+ f.StringVar(&o.dest, "dir", "./", "directory to which documentation is written")
+ f.StringVar(&o.docTypeString, "type", "markdown", "the type of documentation to generate (markdown, man, bash)")
+ f.BoolVar(&o.generateHeaders, "generate-headers", false, "generate standard headers for markdown files")
+
+ cmd.RegisterFlagCompletionFunc("type", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{"bash", "man", "markdown"}, cobra.ShellCompDirectiveNoFileComp
+ })
+
+ return cmd
+}
+
+func (o *docsOptions) run(_ io.Writer) error {
+ switch o.docTypeString {
+ case "markdown", "mdown", "md":
+ if o.generateHeaders {
+ standardLinks := func(s string) string { return s }
+
+ hdrFunc := func(filename string) string {
+ base := filepath.Base(filename)
+ name := strings.TrimSuffix(base, path.Ext(base))
+ title := cases.Title(language.Und, cases.NoLower).String(strings.ReplaceAll(name, "_", " "))
+ return fmt.Sprintf("---\ntitle: \"%s\"\n---\n\n", title)
+ }
+
+ return doc.GenMarkdownTreeCustom(o.topCmd, o.dest, hdrFunc, standardLinks)
+ }
+ return doc.GenMarkdownTree(o.topCmd, o.dest)
+ case "man":
+ manHdr := &doc.GenManHeader{Title: "HELM", Section: "1"}
+ return doc.GenManTree(o.topCmd, manHdr, o.dest)
+ case "bash":
+ return o.topCmd.GenBashCompletionFile(filepath.Join(o.dest, "completions.bash"))
+ default:
+ return fmt.Errorf("unknown doc type %q. Try 'markdown' or 'man'", o.docTypeString)
+ }
+}
diff --git a/helm/pkg/cmd/docs_test.go b/helm/pkg/cmd/docs_test.go
new file mode 100644
index 000000000..4a8a8c687
--- /dev/null
+++ b/helm/pkg/cmd/docs_test.go
@@ -0,0 +1,38 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+)
+
+func TestDocsTypeFlagCompletion(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "completion for docs --type",
+ cmd: "__complete docs --type ''",
+ golden: "output/docs-type-comp.txt",
+ }, {
+ name: "completion for docs --type, no filter",
+ cmd: "__complete docs --type mar",
+ golden: "output/docs-type-comp.txt",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestDocsFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "docs", false)
+}
diff --git a/helm/pkg/cmd/env.go b/helm/pkg/cmd/env.go
new file mode 100644
index 000000000..8da201031
--- /dev/null
+++ b/helm/pkg/cmd/env.go
@@ -0,0 +1,76 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "sort"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+var envHelp = `
+Env prints out all the environment information in use by Helm.
+`
+
+func newEnvCmd(out io.Writer) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "env",
+ Short: "helm client environment information",
+ Long: envHelp,
+ Args: require.MaximumNArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 0 {
+ keys := getSortedEnvVarKeys()
+ return keys, cobra.ShellCompDirectiveNoFileComp
+ }
+
+ return noMoreArgsComp()
+ },
+ Run: func(_ *cobra.Command, args []string) {
+ envVars := settings.EnvVars()
+
+ if len(args) == 0 {
+ // Sort the variables by alphabetical order.
+ // This allows for a constant output across calls to 'helm env'.
+ keys := getSortedEnvVarKeys()
+
+ for _, k := range keys {
+ fmt.Fprintf(out, "%s=\"%s\"\n", k, envVars[k])
+ }
+ } else {
+ fmt.Fprintf(out, "%s\n", envVars[args[0]])
+ }
+ },
+ }
+ return cmd
+}
+
+func getSortedEnvVarKeys() []string {
+ envVars := settings.EnvVars()
+
+ var keys []string
+ for k := range envVars {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ return keys
+}
diff --git a/helm/pkg/cmd/env_test.go b/helm/pkg/cmd/env_test.go
new file mode 100644
index 000000000..c5d7af1b7
--- /dev/null
+++ b/helm/pkg/cmd/env_test.go
@@ -0,0 +1,35 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+)
+
+func TestEnv(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "completion for env",
+ cmd: "__complete env ''",
+ golden: "output/env-comp.txt",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestEnvFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "env", false)
+ checkFileCompletion(t, "env HELM_BIN", false)
+}
diff --git a/helm/pkg/cmd/flags.go b/helm/pkg/cmd/flags.go
new file mode 100644
index 000000000..6d9d117f8
--- /dev/null
+++ b/helm/pkg/cmd/flags.go
@@ -0,0 +1,306 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "log/slog"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "k8s.io/klog/v2"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cli/values"
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/kube"
+ "helm.sh/helm/v4/pkg/postrenderer"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+const (
+ outputFlag = "output"
+ postRenderFlag = "post-renderer"
+ postRenderArgsFlag = "post-renderer-args"
+)
+
+func addValueOptionsFlags(f *pflag.FlagSet, v *values.Options) {
+ f.StringSliceVarP(&v.ValueFiles, "values", "f", []string{}, "specify values in a YAML file or a URL (can specify multiple)")
+ f.StringArrayVar(&v.Values, "set", []string{}, "set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)")
+ f.StringArrayVar(&v.StringValues, "set-string", []string{}, "set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)")
+ f.StringArrayVar(&v.FileValues, "set-file", []string{}, "set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2)")
+ f.StringArrayVar(&v.JSONValues, "set-json", []string{}, "set JSON values on the command line (can specify multiple or separate values with commas: key1=jsonval1,key2=jsonval2 or using json format: {\"key1\": jsonval1, \"key2\": \"jsonval2\"})")
+ f.StringArrayVar(&v.LiteralValues, "set-literal", []string{}, "set a literal STRING value on the command line")
+}
+
+func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) {
+ cmd.Flags().Var(
+ newWaitValue(kube.HookOnlyStrategy, wait),
+ "wait",
+ "wait until resources are ready (up to --timeout). Use '--wait' alone for 'watcher' strategy, or specify one of: 'watcher', 'hookOnly', 'legacy'. Default when flag is omitted: 'hookOnly'.",
+ )
+ cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy)
+}
+
+type waitValue kube.WaitStrategy
+
+func newWaitValue(defaultValue kube.WaitStrategy, ws *kube.WaitStrategy) *waitValue {
+ *ws = defaultValue
+ return (*waitValue)(ws)
+}
+
+func (ws *waitValue) String() string {
+ if ws == nil {
+ return ""
+ }
+ return string(*ws)
+}
+
+func (ws *waitValue) Set(s string) error {
+ switch s {
+ case string(kube.StatusWatcherStrategy), string(kube.LegacyStrategy), string(kube.HookOnlyStrategy):
+ *ws = waitValue(s)
+ return nil
+ case "true":
+ slog.Warn("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher")
+ *ws = waitValue(kube.StatusWatcherStrategy)
+ return nil
+ case "false":
+ slog.Warn("--wait=false is deprecated (boolean value) and can be replaced with --wait=hookOnly")
+ *ws = waitValue(kube.HookOnlyStrategy)
+ return nil
+ default:
+ return fmt.Errorf("invalid wait input %q. Valid inputs are %s, %s, and %s", s, kube.StatusWatcherStrategy, kube.HookOnlyStrategy, kube.LegacyStrategy)
+ }
+}
+
+func (ws *waitValue) Type() string {
+ return "WaitStrategy"
+}
+
+func addChartPathOptionsFlags(f *pflag.FlagSet, c *action.ChartPathOptions) {
+ f.StringVar(&c.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used")
+ f.BoolVar(&c.Verify, "verify", false, "verify the package before using it")
+ f.StringVar(&c.Keyring, "keyring", defaultKeyring(), "location of public keys used for verification")
+ f.StringVar(&c.RepoURL, "repo", "", "chart repository url where to locate the requested chart")
+ f.StringVar(&c.Username, "username", "", "chart repository username where to locate the requested chart")
+ f.StringVar(&c.Password, "password", "", "chart repository password where to locate the requested chart")
+ f.StringVar(&c.CertFile, "cert-file", "", "identify HTTPS client using this SSL certificate file")
+ f.StringVar(&c.KeyFile, "key-file", "", "identify HTTPS client using this SSL key file")
+ f.BoolVar(&c.InsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the chart download")
+ f.BoolVar(&c.PlainHTTP, "plain-http", false, "use insecure HTTP connections for the chart download")
+ f.StringVar(&c.CaFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
+ f.BoolVar(&c.PassCredentialsAll, "pass-credentials", false, "pass credentials to all domains")
+}
+
+// bindOutputFlag will add the output flag to the given command and bind the
+// value to the given format pointer
+func bindOutputFlag(cmd *cobra.Command, varRef *output.Format) {
+ cmd.Flags().VarP(newOutputValue(output.Table, varRef), outputFlag, "o",
+ fmt.Sprintf("prints the output in the specified format. Allowed values: %s", strings.Join(output.Formats(), ", ")))
+
+ err := cmd.RegisterFlagCompletionFunc(outputFlag, func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ var formatNames []string
+ for format, desc := range output.FormatsWithDesc() {
+ formatNames = append(formatNames, fmt.Sprintf("%s\t%s", format, desc))
+ }
+
+ // Sort the results to get a deterministic order for the tests
+ sort.Strings(formatNames)
+ return formatNames, cobra.ShellCompDirectiveNoFileComp
+ })
+
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+type outputValue output.Format
+
+func newOutputValue(defaultValue output.Format, p *output.Format) *outputValue {
+ *p = defaultValue
+ return (*outputValue)(p)
+}
+
+func (o *outputValue) String() string {
+ // It is much cleaner looking (and technically less allocations) to just
+ // convert to a string rather than type asserting to the underlying
+ // output.Format
+ return string(*o)
+}
+
+func (o *outputValue) Type() string {
+ return "format"
+}
+
+func (o *outputValue) Set(s string) error {
+ outfmt, err := output.ParseFormat(s)
+ if err != nil {
+ return err
+ }
+ *o = outputValue(outfmt)
+ return nil
+}
+
+// TODO there is probably a better way to pass cobra settings than as a param
+func bindPostRenderFlag(cmd *cobra.Command, varRef *postrenderer.PostRenderer, settings *cli.EnvSettings) {
+ p := &postRendererOptions{varRef, "", []string{}, settings}
+ cmd.Flags().Var(&postRendererString{p}, postRenderFlag, "the name of a postrenderer type plugin to be used for post rendering. If it exists, the plugin will be used")
+ cmd.Flags().Var(&postRendererArgsSlice{p}, postRenderArgsFlag, "an argument to the post-renderer (can specify multiple)")
+}
+
+type postRendererOptions struct {
+ renderer *postrenderer.PostRenderer
+ pluginName string
+ args []string
+ settings *cli.EnvSettings
+}
+
+type postRendererString struct {
+ options *postRendererOptions
+}
+
+func (p *postRendererString) String() string {
+ return p.options.pluginName
+}
+
+func (p *postRendererString) Type() string {
+ return "postRendererString"
+}
+
+func (p *postRendererString) Set(val string) error {
+ if val == "" {
+ return nil
+ }
+ if p.options.pluginName != "" {
+ return fmt.Errorf("cannot specify --post-renderer flag more than once")
+ }
+ p.options.pluginName = val
+ pr, err := postrenderer.NewPostRendererPlugin(p.options.settings, p.options.pluginName, p.options.args...)
+ if err != nil {
+ return err
+ }
+ *p.options.renderer = pr
+ return nil
+}
+
+type postRendererArgsSlice struct {
+ options *postRendererOptions
+}
+
+func (p *postRendererArgsSlice) String() string {
+ return "[" + strings.Join(p.options.args, ",") + "]"
+}
+
+func (p *postRendererArgsSlice) Type() string {
+ return "postRendererArgsSlice"
+}
+
+func (p *postRendererArgsSlice) Set(val string) error {
+
+ // a post-renderer defined by a user may accept empty arguments
+ p.options.args = append(p.options.args, val)
+
+ if p.options.pluginName == "" {
+ return nil
+ }
+ // overwrite if already create PostRenderer by `post-renderer` flags
+ pr, err := postrenderer.NewPostRendererPlugin(p.options.settings, p.options.pluginName, p.options.args...)
+ if err != nil {
+ return err
+ }
+ *p.options.renderer = pr
+ return nil
+}
+
+func (p *postRendererArgsSlice) Append(val string) error {
+ p.options.args = append(p.options.args, val)
+ return nil
+}
+
+func (p *postRendererArgsSlice) Replace(val []string) error {
+ p.options.args = val
+ return nil
+}
+
+func (p *postRendererArgsSlice) GetSlice() []string {
+ return p.options.args
+}
+
+func compVersionFlag(chartRef string, _ string) ([]string, cobra.ShellCompDirective) {
+ chartInfo := strings.Split(chartRef, "/")
+ if len(chartInfo) != 2 {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+
+ repoName := chartInfo[0]
+ chartName := chartInfo[1]
+
+ path := filepath.Join(settings.RepositoryCache, helmpath.CacheIndexFile(repoName))
+
+ var versions []string
+ if indexFile, err := repo.LoadIndexFile(path); err == nil {
+ for _, details := range indexFile.Entries[chartName] {
+ appVersion := details.AppVersion
+ appVersionDesc := ""
+ if appVersion != "" {
+ appVersionDesc = fmt.Sprintf("App: %s, ", appVersion)
+ }
+ created := details.Created.Format("January 2, 2006")
+ createdDesc := ""
+ if created != "" {
+ createdDesc = fmt.Sprintf("Created: %s ", created)
+ }
+ deprecated := ""
+ if details.Deprecated {
+ deprecated = "(deprecated)"
+ }
+ versions = append(versions, fmt.Sprintf("%s\t%s%s%s", details.Version, appVersionDesc, createdDesc, deprecated))
+ }
+ }
+
+ return versions, cobra.ShellCompDirectiveNoFileComp
+}
+
+// addKlogFlags adds flags from k8s.io/klog
+// marks the flags as hidden to avoid polluting the help text
+func addKlogFlags(fs *pflag.FlagSet) {
+ local := flag.NewFlagSet("klog", flag.ExitOnError)
+ klog.InitFlags(local)
+ local.VisitAll(func(fl *flag.Flag) {
+ fl.Name = normalize(fl.Name)
+ if fs.Lookup(fl.Name) != nil {
+ return
+ }
+ newflag := pflag.PFlagFromGoFlag(fl)
+ newflag.Hidden = true
+ fs.AddFlag(newflag)
+ })
+}
+
+// normalize replaces underscores with hyphens
+func normalize(s string) string {
+ return strings.ReplaceAll(s, "_", "-")
+}
diff --git a/helm/pkg/cmd/flags_test.go b/helm/pkg/cmd/flags_test.go
new file mode 100644
index 000000000..614970252
--- /dev/null
+++ b/helm/pkg/cmd/flags_test.go
@@ -0,0 +1,123 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/pkg/action"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func outputFlagCompletionTest(t *testing.T, cmdName string) {
+ t.Helper()
+ releasesMockWithStatus := func(info *release.Info, hooks ...*release.Hook) []*release.Release {
+ info.LastDeployed = time.Unix(1452902400, 0).UTC()
+ return []*release.Release{{
+ Name: "athos",
+ Namespace: "default",
+ Info: info,
+ Chart: &chart.Chart{},
+ Hooks: hooks,
+ }, {
+ Name: "porthos",
+ Namespace: "default",
+ Info: info,
+ Chart: &chart.Chart{},
+ Hooks: hooks,
+ }, {
+ Name: "aramis",
+ Namespace: "default",
+ Info: info,
+ Chart: &chart.Chart{},
+ Hooks: hooks,
+ }, {
+ Name: "dartagnan",
+ Namespace: "gascony",
+ Info: info,
+ Chart: &chart.Chart{},
+ Hooks: hooks,
+ }}
+ }
+
+ tests := []cmdTestCase{{
+ name: "completion for output flag long and before arg",
+ cmd: fmt.Sprintf("__complete %s --output ''", cmdName),
+ golden: "output/output-comp.txt",
+ rels: releasesMockWithStatus(&release.Info{
+ Status: common.StatusDeployed,
+ }),
+ }, {
+ name: "completion for output flag long and after arg",
+ cmd: fmt.Sprintf("__complete %s aramis --output ''", cmdName),
+ golden: "output/output-comp.txt",
+ rels: releasesMockWithStatus(&release.Info{
+ Status: common.StatusDeployed,
+ }),
+ }, {
+ name: "completion for output flag short and before arg",
+ cmd: fmt.Sprintf("__complete %s -o ''", cmdName),
+ golden: "output/output-comp.txt",
+ rels: releasesMockWithStatus(&release.Info{
+ Status: common.StatusDeployed,
+ }),
+ }, {
+ name: "completion for output flag short and after arg",
+ cmd: fmt.Sprintf("__complete %s aramis -o ''", cmdName),
+ golden: "output/output-comp.txt",
+ rels: releasesMockWithStatus(&release.Info{
+ Status: common.StatusDeployed,
+ }),
+ }, {
+ name: "completion for output flag, no filter",
+ cmd: fmt.Sprintf("__complete %s --output jso", cmdName),
+ golden: "output/output-comp.txt",
+ rels: releasesMockWithStatus(&release.Info{
+ Status: common.StatusDeployed,
+ }),
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestPostRendererFlagSetOnce(t *testing.T) {
+ cfg := action.Configuration{}
+ client := action.NewInstall(&cfg)
+ settings.PluginsDirectory = "testdata/helmhome/helm/plugins"
+ str := postRendererString{
+ options: &postRendererOptions{
+ renderer: &client.PostRenderer,
+ settings: settings,
+ },
+ }
+ // Set the plugin name once
+ err := str.Set("postrenderer-v1")
+ require.NoError(t, err)
+
+ // Set the plugin name again to the same value is not ok
+ err = str.Set("postrenderer-v1")
+ require.Error(t, err)
+
+ // Set the plugin name again to a different value is not ok
+ err = str.Set("cat")
+ require.Error(t, err)
+}
diff --git a/helm/pkg/cmd/get.go b/helm/pkg/cmd/get.go
new file mode 100644
index 000000000..1e672beea
--- /dev/null
+++ b/helm/pkg/cmd/get.go
@@ -0,0 +1,55 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "io"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+var getHelp = `
+This command consists of multiple subcommands which can be used to
+get extended information about the release, including:
+
+- The values used to generate the release
+- The generated manifest file
+- The notes provided by the chart of the release
+- The hooks associated with the release
+- The metadata of the release
+`
+
+func newGetCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "get",
+ Short: "download extended information of a named release",
+ Long: getHelp,
+ Args: require.NoArgs,
+ }
+
+ cmd.AddCommand(newGetAllCmd(cfg, out))
+ cmd.AddCommand(newGetValuesCmd(cfg, out))
+ cmd.AddCommand(newGetManifestCmd(cfg, out))
+ cmd.AddCommand(newGetHooksCmd(cfg, out))
+ cmd.AddCommand(newGetNotesCmd(cfg, out))
+ cmd.AddCommand(newGetMetadataCmd(cfg, out))
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/get_all.go b/helm/pkg/cmd/get_all.go
new file mode 100644
index 000000000..32744796c
--- /dev/null
+++ b/helm/pkg/cmd/get_all.go
@@ -0,0 +1,86 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "io"
+ "log"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+var getAllHelp = `
+This command prints a human readable collection of information about the
+notes, hooks, supplied values, and generated manifest file of the given release.
+`
+
+func newGetAllCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ var template string
+ client := action.NewGet(cfg)
+
+ cmd := &cobra.Command{
+ Use: "all RELEASE_NAME",
+ Short: "download all information for a named release",
+ Long: getAllHelp,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return noMoreArgsComp()
+ }
+ return compListReleases(toComplete, args, cfg)
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ res, err := client.Run(args[0])
+ if err != nil {
+ return err
+ }
+ if template != "" {
+ data := map[string]interface{}{
+ "Release": res,
+ }
+ return tpl(template, data, out)
+ }
+ return output.Table.Write(out, &statusPrinter{
+ release: res,
+ debug: true,
+ showMetadata: true,
+ hideNotes: false,
+ noColor: settings.ShouldDisableColor(),
+ })
+ },
+ }
+
+ f := cmd.Flags()
+ f.IntVar(&client.Version, "revision", 0, "get the named release with revision")
+ err := cmd.RegisterFlagCompletionFunc("revision", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 1 {
+ return compListRevisions(toComplete, cfg, args[0])
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ f.StringVar(&template, "template", "", "go template for formatting the output, eg: {{.Release.Name}}")
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/get_all_test.go b/helm/pkg/cmd/get_all_test.go
new file mode 100644
index 000000000..80bb7d332
--- /dev/null
+++ b/helm/pkg/cmd/get_all_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestGetCmd(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "get all with a release",
+ cmd: "get all thomas-guide",
+ golden: "output/get-release.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide"})},
+ }, {
+ name: "get all with a formatted release",
+ cmd: "get all elevated-turkey --template {{.Release.Chart.Metadata.Version}}",
+ golden: "output/get-release-template.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "elevated-turkey"})},
+ }, {
+ name: "get all requires release name arg",
+ cmd: "get all",
+ golden: "output/get-all-no-args.txt",
+ wantError: true,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestGetAllCompletion(t *testing.T) {
+ checkReleaseCompletion(t, "get all", false)
+}
+
+func TestGetAllRevisionCompletion(t *testing.T) {
+ revisionFlagCompletionTest(t, "get all")
+}
+
+func TestGetAllFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "get all", false)
+ checkFileCompletion(t, "get all myrelease", false)
+}
diff --git a/helm/pkg/cmd/get_hooks.go b/helm/pkg/cmd/get_hooks.go
new file mode 100644
index 000000000..d344307cb
--- /dev/null
+++ b/helm/pkg/cmd/get_hooks.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "log"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/release"
+)
+
+const getHooksHelp = `
+This command downloads hooks for a given release.
+
+Hooks are formatted in YAML and separated by the YAML '---\n' separator.
+`
+
+func newGetHooksCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewGet(cfg)
+
+ cmd := &cobra.Command{
+ Use: "hooks RELEASE_NAME",
+ Short: "download all hooks for a named release",
+ Long: getHooksHelp,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return noMoreArgsComp()
+ }
+ return compListReleases(toComplete, args, cfg)
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ res, err := client.Run(args[0])
+ if err != nil {
+ return err
+ }
+ rac, err := release.NewAccessor(res)
+ if err != nil {
+ return err
+ }
+ for _, hook := range rac.Hooks() {
+ hac, err := release.NewHookAccessor(hook)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(out, "---\n# Source: %s\n%s\n", hac.Path(), hac.Manifest())
+ }
+ return nil
+ },
+ }
+
+ cmd.Flags().IntVar(&client.Version, "revision", 0, "get the named release with revision")
+ err := cmd.RegisterFlagCompletionFunc("revision", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 1 {
+ return compListRevisions(toComplete, cfg, args[0])
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ })
+
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/get_hooks_test.go b/helm/pkg/cmd/get_hooks_test.go
new file mode 100644
index 000000000..3be1d8500
--- /dev/null
+++ b/helm/pkg/cmd/get_hooks_test.go
@@ -0,0 +1,51 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestGetHooks(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "get hooks with release",
+ cmd: "get hooks aeneas",
+ golden: "output/get-hooks.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "aeneas"})},
+ }, {
+ name: "get hooks without args",
+ cmd: "get hooks",
+ golden: "output/get-hooks-no-args.txt",
+ wantError: true,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestGetHooksCompletion(t *testing.T) {
+ checkReleaseCompletion(t, "get hooks", false)
+}
+
+func TestGetHooksRevisionCompletion(t *testing.T) {
+ revisionFlagCompletionTest(t, "get hooks")
+}
+
+func TestGetHooksFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "get hooks", false)
+ checkFileCompletion(t, "get hooks myrelease", false)
+}
diff --git a/helm/pkg/cmd/get_manifest.go b/helm/pkg/cmd/get_manifest.go
new file mode 100644
index 000000000..253b011c1
--- /dev/null
+++ b/helm/pkg/cmd/get_manifest.go
@@ -0,0 +1,80 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "log"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/release"
+)
+
+var getManifestHelp = `
+This command fetches the generated manifest for a given release.
+
+A manifest is a YAML-encoded representation of the Kubernetes resources that
+were generated from this release's chart(s). If a chart is dependent on other
+charts, those resources will also be included in the manifest.
+`
+
+func newGetManifestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewGet(cfg)
+
+ cmd := &cobra.Command{
+ Use: "manifest RELEASE_NAME",
+ Short: "download the manifest for a named release",
+ Long: getManifestHelp,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return noMoreArgsComp()
+ }
+ return compListReleases(toComplete, args, cfg)
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ res, err := client.Run(args[0])
+ if err != nil {
+ return err
+ }
+ rac, err := release.NewAccessor(res)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintln(out, rac.Manifest())
+ return nil
+ },
+ }
+
+ cmd.Flags().IntVar(&client.Version, "revision", 0, "get the named release with revision")
+ err := cmd.RegisterFlagCompletionFunc("revision", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 1 {
+ return compListRevisions(toComplete, cfg, args[0])
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ })
+
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/get_manifest_test.go b/helm/pkg/cmd/get_manifest_test.go
new file mode 100644
index 000000000..cfb5215bf
--- /dev/null
+++ b/helm/pkg/cmd/get_manifest_test.go
@@ -0,0 +1,51 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestGetManifest(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "get manifest with release",
+ cmd: "get manifest juno",
+ golden: "output/get-manifest.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "juno"})},
+ }, {
+ name: "get manifest without args",
+ cmd: "get manifest",
+ golden: "output/get-manifest-no-args.txt",
+ wantError: true,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestGetManifestCompletion(t *testing.T) {
+ checkReleaseCompletion(t, "get manifest", false)
+}
+
+func TestGetManifestRevisionCompletion(t *testing.T) {
+ revisionFlagCompletionTest(t, "get manifest")
+}
+
+func TestGetManifestFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "get manifest", false)
+ checkFileCompletion(t, "get manifest myrelease", false)
+}
diff --git a/helm/pkg/cmd/get_metadata.go b/helm/pkg/cmd/get_metadata.go
new file mode 100644
index 000000000..eb90b6e44
--- /dev/null
+++ b/helm/pkg/cmd/get_metadata.go
@@ -0,0 +1,116 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "log"
+
+ "github.com/spf13/cobra"
+ k8sLabels "k8s.io/apimachinery/pkg/labels"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cmd/require"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+type metadataWriter struct {
+ metadata *action.Metadata
+}
+
+func newGetMetadataCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ var outfmt output.Format
+ client := action.NewGetMetadata(cfg)
+
+ cmd := &cobra.Command{
+ Use: "metadata RELEASE_NAME",
+ Short: "This command fetches metadata for a given release",
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return noMoreArgsComp()
+ }
+ return compListReleases(toComplete, args, cfg)
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ releaseMetadata, err := client.Run(args[0])
+ if err != nil {
+ return err
+ }
+ return outfmt.Write(out, &metadataWriter{releaseMetadata})
+ },
+ }
+
+ f := cmd.Flags()
+ f.IntVar(&client.Version, "revision", 0, "specify release revision")
+ err := cmd.RegisterFlagCompletionFunc("revision", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 1 {
+ return compListRevisions(toComplete, cfg, args[0])
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ })
+
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ bindOutputFlag(cmd, &outfmt)
+
+ return cmd
+}
+
+func (w metadataWriter) WriteTable(out io.Writer) error {
+
+ formatApplyMethod := func(applyMethod string) string {
+ switch applyMethod {
+ case "":
+ return "client-side apply (defaulted)"
+ case string(release.ApplyMethodClientSideApply):
+ return "client-side apply"
+ case string(release.ApplyMethodServerSideApply):
+ return "server-side apply"
+ default:
+ return fmt.Sprintf("unknown (%q)", applyMethod)
+ }
+ }
+
+ _, _ = fmt.Fprintf(out, "NAME: %v\n", w.metadata.Name)
+ _, _ = fmt.Fprintf(out, "CHART: %v\n", w.metadata.Chart)
+ _, _ = fmt.Fprintf(out, "VERSION: %v\n", w.metadata.Version)
+ _, _ = fmt.Fprintf(out, "APP_VERSION: %v\n", w.metadata.AppVersion)
+ _, _ = fmt.Fprintf(out, "ANNOTATIONS: %v\n", k8sLabels.Set(w.metadata.Annotations).String())
+ _, _ = fmt.Fprintf(out, "LABELS: %v\n", k8sLabels.Set(w.metadata.Labels).String())
+ _, _ = fmt.Fprintf(out, "DEPENDENCIES: %v\n", w.metadata.FormattedDepNames())
+ _, _ = fmt.Fprintf(out, "NAMESPACE: %v\n", w.metadata.Namespace)
+ _, _ = fmt.Fprintf(out, "REVISION: %v\n", w.metadata.Revision)
+ _, _ = fmt.Fprintf(out, "STATUS: %v\n", w.metadata.Status)
+ _, _ = fmt.Fprintf(out, "DEPLOYED_AT: %v\n", w.metadata.DeployedAt)
+ _, _ = fmt.Fprintf(out, "APPLY_METHOD: %v\n", formatApplyMethod(w.metadata.ApplyMethod))
+
+ return nil
+}
+
+func (w metadataWriter) WriteJSON(out io.Writer) error {
+ return output.EncodeJSON(out, w.metadata)
+}
+
+func (w metadataWriter) WriteYAML(out io.Writer) error {
+ return output.EncodeYAML(out, w.metadata)
+}
diff --git a/helm/pkg/cmd/get_metadata_test.go b/helm/pkg/cmd/get_metadata_test.go
new file mode 100644
index 000000000..59fc3b82c
--- /dev/null
+++ b/helm/pkg/cmd/get_metadata_test.go
@@ -0,0 +1,66 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestGetMetadataCmd(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "get metadata with a release",
+ cmd: "get metadata thomas-guide",
+ golden: "output/get-metadata.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide", Labels: map[string]string{"key1": "value1"}})},
+ }, {
+ name: "get metadata requires release name arg",
+ cmd: "get metadata",
+ golden: "output/get-metadata-args.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide", Labels: map[string]string{"key1": "value1"}})},
+ wantError: true,
+ }, {
+ name: "get metadata to json",
+ cmd: "get metadata thomas-guide --output json",
+ golden: "output/get-metadata.json",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide", Labels: map[string]string{"key1": "value1"}})},
+ }, {
+ name: "get metadata to yaml",
+ cmd: "get metadata thomas-guide --output yaml",
+ golden: "output/get-metadata.yaml",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide", Labels: map[string]string{"key1": "value1"}})},
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestGetMetadataCompletion(t *testing.T) {
+ checkReleaseCompletion(t, "get metadata", false)
+}
+
+func TestGetMetadataRevisionCompletion(t *testing.T) {
+ revisionFlagCompletionTest(t, "get metadata")
+}
+
+func TestGetMetadataOutputCompletion(t *testing.T) {
+ outputFlagCompletionTest(t, "get metadata")
+}
+
+func TestGetMetadataFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "get metadata", false)
+ checkFileCompletion(t, "get metadata myrelease", false)
+}
diff --git a/helm/pkg/cmd/get_notes.go b/helm/pkg/cmd/get_notes.go
new file mode 100644
index 000000000..46fbeeaf5
--- /dev/null
+++ b/helm/pkg/cmd/get_notes.go
@@ -0,0 +1,79 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "log"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/release"
+)
+
+var getNotesHelp = `
+This command shows notes provided by the chart of a named release.
+`
+
+func newGetNotesCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewGet(cfg)
+
+ cmd := &cobra.Command{
+ Use: "notes RELEASE_NAME",
+ Short: "download the notes for a named release",
+ Long: getNotesHelp,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return noMoreArgsComp()
+ }
+ return compListReleases(toComplete, args, cfg)
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ res, err := client.Run(args[0])
+ if err != nil {
+ return err
+ }
+ rac, err := release.NewAccessor(res)
+ if err != nil {
+ return err
+ }
+ if len(rac.Notes()) > 0 {
+ fmt.Fprintf(out, "NOTES:\n%s\n", rac.Notes())
+ }
+ return nil
+ },
+ }
+
+ f := cmd.Flags()
+ f.IntVar(&client.Version, "revision", 0, "get the named release with revision")
+ err := cmd.RegisterFlagCompletionFunc("revision", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 1 {
+ return compListRevisions(toComplete, cfg, args[0])
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ })
+
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/get_notes_test.go b/helm/pkg/cmd/get_notes_test.go
new file mode 100644
index 000000000..b451dfa05
--- /dev/null
+++ b/helm/pkg/cmd/get_notes_test.go
@@ -0,0 +1,51 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestGetNotesCmd(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "get notes of a deployed release",
+ cmd: "get notes the-limerick",
+ golden: "output/get-notes.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "the-limerick"})},
+ }, {
+ name: "get notes without args",
+ cmd: "get notes",
+ golden: "output/get-notes-no-args.txt",
+ wantError: true,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestGetNotesCompletion(t *testing.T) {
+ checkReleaseCompletion(t, "get notes", false)
+}
+
+func TestGetNotesRevisionCompletion(t *testing.T) {
+ revisionFlagCompletionTest(t, "get notes")
+}
+
+func TestGetNotesFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "get notes", false)
+ checkFileCompletion(t, "get notes myrelease", false)
+}
diff --git a/helm/pkg/cmd/get_test.go b/helm/pkg/cmd/get_test.go
new file mode 100644
index 000000000..cf81e4df7
--- /dev/null
+++ b/helm/pkg/cmd/get_test.go
@@ -0,0 +1,25 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+)
+
+func TestGetFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "get", false)
+}
diff --git a/helm/pkg/cmd/get_values.go b/helm/pkg/cmd/get_values.go
new file mode 100644
index 000000000..02b195551
--- /dev/null
+++ b/helm/pkg/cmd/get_values.go
@@ -0,0 +1,98 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "log"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+var getValuesHelp = `
+This command downloads a values file for a given release.
+`
+
+type valuesWriter struct {
+ vals map[string]interface{}
+ allValues bool
+}
+
+func newGetValuesCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ var outfmt output.Format
+ client := action.NewGetValues(cfg)
+
+ cmd := &cobra.Command{
+ Use: "values RELEASE_NAME",
+ Short: "download the values file for a named release",
+ Long: getValuesHelp,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return noMoreArgsComp()
+ }
+ return compListReleases(toComplete, args, cfg)
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ vals, err := client.Run(args[0])
+ if err != nil {
+ return err
+ }
+ return outfmt.Write(out, &valuesWriter{vals, client.AllValues})
+ },
+ }
+
+ f := cmd.Flags()
+ f.IntVar(&client.Version, "revision", 0, "get the named release with revision")
+ err := cmd.RegisterFlagCompletionFunc("revision", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 1 {
+ return compListRevisions(toComplete, cfg, args[0])
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ })
+
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ f.BoolVarP(&client.AllValues, "all", "a", false, "dump all (computed) values")
+ bindOutputFlag(cmd, &outfmt)
+
+ return cmd
+}
+
+func (v valuesWriter) WriteTable(out io.Writer) error {
+ if v.allValues {
+ fmt.Fprintln(out, "COMPUTED VALUES:")
+ } else {
+ fmt.Fprintln(out, "USER-SUPPLIED VALUES:")
+ }
+ return output.EncodeYAML(out, v.vals)
+}
+
+func (v valuesWriter) WriteJSON(out io.Writer) error {
+ return output.EncodeJSON(out, v.vals)
+}
+
+func (v valuesWriter) WriteYAML(out io.Writer) error {
+ return output.EncodeYAML(out, v.vals)
+}
diff --git a/helm/pkg/cmd/get_values_test.go b/helm/pkg/cmd/get_values_test.go
new file mode 100644
index 000000000..7bbe109f6
--- /dev/null
+++ b/helm/pkg/cmd/get_values_test.go
@@ -0,0 +1,71 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestGetValuesCmd(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "get values with a release",
+ cmd: "get values thomas-guide",
+ golden: "output/get-values.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide"})},
+ }, {
+ name: "get values requires release name arg",
+ cmd: "get values",
+ golden: "output/get-values-args.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide"})},
+ wantError: true,
+ }, {
+ name: "get values thomas-guide (all)",
+ cmd: "get values thomas-guide --all",
+ golden: "output/get-values-all.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide"})},
+ }, {
+ name: "get values to json",
+ cmd: "get values thomas-guide --output json",
+ golden: "output/values.json",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide"})},
+ }, {
+ name: "get values to yaml",
+ cmd: "get values thomas-guide --output yaml",
+ golden: "output/values.yaml",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "thomas-guide"})},
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestGetValuesCompletion(t *testing.T) {
+ checkReleaseCompletion(t, "get values", false)
+}
+
+func TestGetValuesRevisionCompletion(t *testing.T) {
+ revisionFlagCompletionTest(t, "get values")
+}
+
+func TestGetValuesOutputCompletion(t *testing.T) {
+ outputFlagCompletionTest(t, "get values")
+}
+
+func TestGetValuesFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "get values", false)
+ checkFileCompletion(t, "get values myrelease", false)
+}
diff --git a/helm/pkg/cmd/helpers.go b/helm/pkg/cmd/helpers.go
new file mode 100644
index 000000000..e555dd18b
--- /dev/null
+++ b/helm/pkg/cmd/helpers.go
@@ -0,0 +1,83 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "log/slog"
+ "strconv"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+)
+
+func addDryRunFlag(cmd *cobra.Command) {
+ // --dry-run options with expected outcome:
+ // - Not set means no dry run and server is contacted.
+ // - Set with no value, a value of client, or a value of true and the server is not contacted
+ // - Set with a value of false, none, or false and the server is contacted
+ // The true/false part is meant to reflect some legacy behavior while none is equal to "".
+ f := cmd.Flags()
+ f.String(
+ "dry-run",
+ "none",
+ `simulates the operation without persisting changes. Must be one of: "none" (default), "client", or "server". '--dry-run=none' executes the operation normally and persists changes (no simulation). '--dry-run=client' simulates the operation client-side only and avoids cluster connections. '--dry-run=server' simulates the operation on the server, requiring cluster connectivity.`)
+ f.Lookup("dry-run").NoOptDefVal = "unset"
+}
+
+// Determine the `action.DryRunStrategy` given -dry-run=` flag (or absence of)
+// Legacy usage of the flag: boolean values, and `--dry-run` (without value) are supported, and log warnings emitted
+func cmdGetDryRunFlagStrategy(cmd *cobra.Command, isTemplate bool) (action.DryRunStrategy, error) {
+
+ f := cmd.Flag("dry-run")
+ v := f.Value.String()
+
+ switch v {
+ case f.NoOptDefVal:
+ slog.Warn(`--dry-run is deprecated and should be replaced with '--dry-run=client'`)
+ return action.DryRunClient, nil
+ case string(action.DryRunClient):
+ return action.DryRunClient, nil
+ case string(action.DryRunServer):
+ return action.DryRunServer, nil
+ case string(action.DryRunNone):
+ if isTemplate {
+ // Special case hack for `helm template`, which is always a dry run
+ return action.DryRunNone, fmt.Errorf(`invalid dry-run value (%q). Must be "server" or "client"`, v)
+ }
+ return action.DryRunNone, nil
+ }
+
+ b, err := strconv.ParseBool(v)
+ if err != nil {
+ return action.DryRunNone, fmt.Errorf(`invalid dry-run value (%q). Must be "none", "server", or "client"`, v)
+ }
+
+ if isTemplate && !b {
+ // Special case for `helm template`, which is always a dry run
+ return action.DryRunNone, fmt.Errorf(`invalid dry-run value (%q). Must be "server" or "client"`, v)
+ }
+
+ result := action.DryRunNone
+ if b {
+ result = action.DryRunClient
+ }
+ slog.Warn(fmt.Sprintf(`boolean '--dry-run=%v' flag is deprecated and must be replaced with '--dry-run=%s'`, v, result))
+
+ return result, nil
+}
diff --git a/helm/pkg/cmd/helpers_test.go b/helm/pkg/cmd/helpers_test.go
new file mode 100644
index 000000000..08065499e
--- /dev/null
+++ b/helm/pkg/cmd/helpers_test.go
@@ -0,0 +1,309 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "log/slog"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ shellwords "github.com/mattn/go-shellwords"
+ "github.com/spf13/cobra"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/internal/test"
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/cli"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ "helm.sh/helm/v4/pkg/storage"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+func testTimestamper() time.Time { return time.Unix(242085845, 0).UTC() }
+
+func init() {
+ action.Timestamper = testTimestamper
+}
+
+func runTestCmd(t *testing.T, tests []cmdTestCase) {
+ t.Helper()
+ for _, tt := range tests {
+ for i := 0; i <= tt.repeat; i++ {
+ t.Run(tt.name, func(t *testing.T) {
+ defer resetEnv()()
+
+ storage := storageFixture()
+ for _, rel := range tt.rels {
+ if err := storage.Create(rel); err != nil {
+ t.Fatal(err)
+ }
+ }
+ t.Logf("running cmd (attempt %d): %s", i+1, tt.cmd)
+ _, out, err := executeActionCommandC(storage, tt.cmd)
+ if tt.wantError && err == nil {
+ t.Errorf("expected error, got success with the following output:\n%s", out)
+ }
+ if !tt.wantError && err != nil {
+ t.Errorf("expected no error, got: '%v'", err)
+ }
+ if tt.golden != "" {
+ test.AssertGoldenString(t, out, tt.golden)
+ }
+ })
+ }
+ }
+}
+
+func storageFixture() *storage.Storage {
+ return storage.Init(driver.NewMemory())
+}
+
+func executeActionCommandC(store *storage.Storage, cmd string) (*cobra.Command, string, error) {
+ return executeActionCommandStdinC(store, nil, cmd)
+}
+
+func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string) (*cobra.Command, string, error) {
+ args, err := shellwords.Parse(cmd)
+ if err != nil {
+ return nil, "", err
+ }
+
+ buf := new(bytes.Buffer)
+
+ actionConfig := &action.Configuration{
+ Releases: store,
+ KubeClient: &kubefake.PrintingKubeClient{Out: io.Discard},
+ Capabilities: common.DefaultCapabilities,
+ }
+
+ root, err := newRootCmdWithConfig(actionConfig, buf, args, SetupLogging)
+ if err != nil {
+ return nil, "", err
+ }
+
+ root.SetOut(buf)
+ root.SetErr(buf)
+ root.SetArgs(args)
+
+ oldStdin := os.Stdin
+ defer func() {
+ os.Stdin = oldStdin
+ }()
+
+ if in != nil {
+ root.SetIn(in)
+ os.Stdin = in
+ }
+
+ if mem, ok := store.Driver.(*driver.Memory); ok {
+ mem.SetNamespace(settings.Namespace())
+ }
+ c, err := root.ExecuteC()
+
+ result := buf.String()
+
+ return c, result, err
+}
+
+// cmdTestCase describes a test case that works with releases.
+type cmdTestCase struct {
+ name string
+ cmd string
+ golden string
+ wantError bool
+ // Rels are the available releases at the start of the test.
+ rels []*release.Release
+ // Number of repeats (in case a feature was previously flaky and the test checks
+ // it's now stably producing identical results). 0 means test is run exactly once.
+ repeat int
+}
+
+func executeActionCommand(cmd string) (*cobra.Command, string, error) {
+ return executeActionCommandC(storageFixture(), cmd)
+}
+
+func resetEnv() func() {
+ origEnv := os.Environ()
+ return func() {
+ os.Clearenv()
+ for _, pair := range origEnv {
+ kv := strings.SplitN(pair, "=", 2)
+ os.Setenv(kv[0], kv[1])
+ }
+ settings = cli.New()
+ }
+}
+
+func TestCmdGetDryRunFlagStrategy(t *testing.T) {
+
+ type testCaseExpectedLog struct {
+ Level string
+ Msg string
+ }
+ testCases := map[string]struct {
+ DryRunFlagArg string
+ IsTemplate bool
+ ExpectedStrategy action.DryRunStrategy
+ ExpectedError bool
+ ExpectedLog *testCaseExpectedLog
+ }{
+ "unset_value": {
+ DryRunFlagArg: "--dry-run",
+ ExpectedStrategy: action.DryRunClient,
+ ExpectedLog: &testCaseExpectedLog{
+ Level: "WARN",
+ Msg: `--dry-run is deprecated and should be replaced with '--dry-run=client'`,
+ },
+ },
+ "unset_special": {
+ DryRunFlagArg: "--dry-run=unset", // Special value that matches cmd.Flags("dry-run").NoOptDefVal
+ ExpectedStrategy: action.DryRunClient,
+ ExpectedLog: &testCaseExpectedLog{
+ Level: "WARN",
+ Msg: `--dry-run is deprecated and should be replaced with '--dry-run=client'`,
+ },
+ },
+ "none": {
+ DryRunFlagArg: "--dry-run=none",
+ ExpectedStrategy: action.DryRunNone,
+ },
+ "client": {
+ DryRunFlagArg: "--dry-run=client",
+ ExpectedStrategy: action.DryRunClient,
+ },
+ "server": {
+ DryRunFlagArg: "--dry-run=server",
+ ExpectedStrategy: action.DryRunServer,
+ },
+ "bool_false": {
+ DryRunFlagArg: "--dry-run=false",
+ ExpectedStrategy: action.DryRunNone,
+ ExpectedLog: &testCaseExpectedLog{
+ Level: "WARN",
+ Msg: `boolean '--dry-run=false' flag is deprecated and must be replaced with '--dry-run=none'`,
+ },
+ },
+ "bool_true": {
+ DryRunFlagArg: "--dry-run=true",
+ ExpectedStrategy: action.DryRunClient,
+ ExpectedLog: &testCaseExpectedLog{
+ Level: "WARN",
+ Msg: `boolean '--dry-run=true' flag is deprecated and must be replaced with '--dry-run=client'`,
+ },
+ },
+ "bool_0": {
+ DryRunFlagArg: "--dry-run=0",
+ ExpectedStrategy: action.DryRunNone,
+ ExpectedLog: &testCaseExpectedLog{
+ Level: "WARN",
+ Msg: `boolean '--dry-run=0' flag is deprecated and must be replaced with '--dry-run=none'`,
+ },
+ },
+ "bool_1": {
+ DryRunFlagArg: "--dry-run=1",
+ ExpectedStrategy: action.DryRunClient,
+ ExpectedLog: &testCaseExpectedLog{
+ Level: "WARN",
+ Msg: `boolean '--dry-run=1' flag is deprecated and must be replaced with '--dry-run=client'`,
+ },
+ },
+ "invalid": {
+ DryRunFlagArg: "--dry-run=invalid",
+ ExpectedError: true,
+ },
+ "template_unset_value": {
+ DryRunFlagArg: "--dry-run",
+ IsTemplate: true,
+ ExpectedStrategy: action.DryRunClient,
+ ExpectedLog: &testCaseExpectedLog{
+ Level: "WARN",
+ Msg: `--dry-run is deprecated and should be replaced with '--dry-run=client'`,
+ },
+ },
+ "template_bool_false": {
+ DryRunFlagArg: "--dry-run=false",
+ IsTemplate: true,
+ ExpectedError: true,
+ },
+ "template_bool_template_true": {
+ DryRunFlagArg: "--dry-run=true",
+ IsTemplate: true,
+ ExpectedStrategy: action.DryRunClient,
+ ExpectedLog: &testCaseExpectedLog{
+ Level: "WARN",
+ Msg: `boolean '--dry-run=true' flag is deprecated and must be replaced with '--dry-run=client'`,
+ },
+ },
+ "template_none": {
+ DryRunFlagArg: "--dry-run=none",
+ IsTemplate: true,
+ ExpectedError: true,
+ },
+ "template_client": {
+ DryRunFlagArg: "--dry-run=client",
+ IsTemplate: true,
+ ExpectedStrategy: action.DryRunClient,
+ },
+ "template_server": {
+ DryRunFlagArg: "--dry-run=server",
+ IsTemplate: true,
+ ExpectedStrategy: action.DryRunServer,
+ },
+ }
+
+ for name, tc := range testCases {
+
+ logBuf := new(bytes.Buffer)
+ logger := slog.New(slog.NewJSONHandler(logBuf, nil))
+ slog.SetDefault(logger)
+
+ cmd := &cobra.Command{
+ Use: "helm",
+ }
+ addDryRunFlag(cmd)
+ cmd.Flags().Parse([]string{"helm", tc.DryRunFlagArg})
+
+ t.Run(name, func(t *testing.T) {
+ dryRunStrategy, err := cmdGetDryRunFlagStrategy(cmd, tc.IsTemplate)
+ if tc.ExpectedError {
+ assert.Error(t, err)
+ } else {
+ assert.Nil(t, err)
+ assert.Equal(t, tc.ExpectedStrategy, dryRunStrategy)
+ }
+
+ if tc.ExpectedLog != nil {
+ logResult := map[string]string{}
+ err = json.Unmarshal(logBuf.Bytes(), &logResult)
+ require.Nil(t, err)
+
+ assert.Equal(t, tc.ExpectedLog.Level, logResult["level"])
+ assert.Equal(t, tc.ExpectedLog.Msg, logResult["msg"])
+ } else {
+ assert.Equal(t, 0, logBuf.Len())
+ }
+ })
+ }
+}
diff --git a/helm/pkg/cmd/history.go b/helm/pkg/cmd/history.go
new file mode 100644
index 000000000..b294a9da7
--- /dev/null
+++ b/helm/pkg/cmd/history.go
@@ -0,0 +1,270 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+
+ "github.com/gosuri/uitable"
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+)
+
+var historyHelp = `
+History prints historical revisions for a given release.
+
+A default maximum of 256 revisions will be returned. Setting '--max'
+configures the maximum length of the revision list returned.
+
+The historical release set is printed as a formatted table, e.g:
+
+ $ helm history angry-bird
+ REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION
+ 1 Mon Oct 3 10:15:13 2016 superseded alpine-0.1.0 1.0 Initial install
+ 2 Mon Oct 3 10:15:13 2016 superseded alpine-0.1.0 1.0 Upgraded successfully
+ 3 Mon Oct 3 10:15:13 2016 superseded alpine-0.1.0 1.0 Rolled back to 2
+ 4 Mon Oct 3 10:15:13 2016 deployed alpine-0.1.0 1.0 Upgraded successfully
+`
+
+func newHistoryCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewHistory(cfg)
+ var outfmt output.Format
+
+ cmd := &cobra.Command{
+ Use: "history RELEASE_NAME",
+ Long: historyHelp,
+ Short: "fetch release history",
+ Aliases: []string{"hist"},
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return noMoreArgsComp()
+ }
+ return compListReleases(toComplete, args, cfg)
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ history, err := getHistory(client, args[0])
+ if err != nil {
+ return err
+ }
+
+ return outfmt.Write(out, history)
+ },
+ }
+
+ f := cmd.Flags()
+ f.IntVar(&client.Max, "max", 256, "maximum number of revision to include in history")
+ bindOutputFlag(cmd, &outfmt)
+
+ return cmd
+}
+
+type releaseInfo struct {
+ Revision int `json:"revision"`
+ Updated time.Time `json:"updated,omitzero"`
+ Status string `json:"status"`
+ Chart string `json:"chart"`
+ AppVersion string `json:"app_version"`
+ Description string `json:"description"`
+}
+
+// releaseInfoJSON is used for custom JSON marshaling/unmarshaling
+type releaseInfoJSON struct {
+ Revision int `json:"revision"`
+ Updated *time.Time `json:"updated,omitempty"`
+ Status string `json:"status"`
+ Chart string `json:"chart"`
+ AppVersion string `json:"app_version"`
+ Description string `json:"description"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+// It handles empty string time fields by treating them as zero values.
+func (r *releaseInfo) UnmarshalJSON(data []byte) error {
+ // First try to unmarshal into a map to handle empty string time fields
+ var raw map[string]interface{}
+ if err := json.Unmarshal(data, &raw); err != nil {
+ return err
+ }
+
+ // Replace empty string time fields with nil
+ if val, ok := raw["updated"]; ok {
+ if str, ok := val.(string); ok && str == "" {
+ raw["updated"] = nil
+ }
+ }
+
+ // Re-marshal with cleaned data
+ cleaned, err := json.Marshal(raw)
+ if err != nil {
+ return err
+ }
+
+ // Unmarshal into temporary struct with pointer time field
+ var tmp releaseInfoJSON
+ if err := json.Unmarshal(cleaned, &tmp); err != nil {
+ return err
+ }
+
+ // Copy values to releaseInfo struct
+ r.Revision = tmp.Revision
+ if tmp.Updated != nil {
+ r.Updated = *tmp.Updated
+ }
+ r.Status = tmp.Status
+ r.Chart = tmp.Chart
+ r.AppVersion = tmp.AppVersion
+ r.Description = tmp.Description
+
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+// It omits zero-value time fields from the JSON output.
+func (r releaseInfo) MarshalJSON() ([]byte, error) {
+ tmp := releaseInfoJSON{
+ Revision: r.Revision,
+ Status: r.Status,
+ Chart: r.Chart,
+ AppVersion: r.AppVersion,
+ Description: r.Description,
+ }
+
+ if !r.Updated.IsZero() {
+ tmp.Updated = &r.Updated
+ }
+
+ return json.Marshal(tmp)
+}
+
+type releaseHistory []releaseInfo
+
+func (r releaseHistory) WriteJSON(out io.Writer) error {
+ return output.EncodeJSON(out, r)
+}
+
+func (r releaseHistory) WriteYAML(out io.Writer) error {
+ return output.EncodeYAML(out, r)
+}
+
+func (r releaseHistory) WriteTable(out io.Writer) error {
+ tbl := uitable.New()
+ tbl.AddRow("REVISION", "UPDATED", "STATUS", "CHART", "APP VERSION", "DESCRIPTION")
+ for _, item := range r {
+ tbl.AddRow(item.Revision, item.Updated.Format(time.ANSIC), item.Status, item.Chart, item.AppVersion, item.Description)
+ }
+ return output.EncodeTable(out, tbl)
+}
+
+func getHistory(client *action.History, name string) (releaseHistory, error) {
+ histi, err := client.Run(name)
+ if err != nil {
+ return nil, err
+ }
+ hist, err := releaseListToV1List(histi)
+ if err != nil {
+ return nil, err
+ }
+
+ releaseutil.Reverse(hist, releaseutil.SortByRevision)
+
+ var rels []*release.Release
+ for i := 0; i < min(len(hist), client.Max); i++ {
+ rels = append(rels, hist[i])
+ }
+
+ if len(rels) == 0 {
+ return releaseHistory{}, nil
+ }
+
+ releaseHistory := getReleaseHistory(rels)
+
+ return releaseHistory, nil
+}
+
+func getReleaseHistory(rls []*release.Release) (history releaseHistory) {
+ for i := len(rls) - 1; i >= 0; i-- {
+ r := rls[i]
+ c := formatChartName(r.Chart)
+ s := r.Info.Status.String()
+ v := r.Version
+ d := r.Info.Description
+ a := formatAppVersion(r.Chart)
+
+ rInfo := releaseInfo{
+ Revision: v,
+ Status: s,
+ Chart: c,
+ AppVersion: a,
+ Description: d,
+ }
+ if !r.Info.LastDeployed.IsZero() {
+ rInfo.Updated = r.Info.LastDeployed
+
+ }
+ history = append(history, rInfo)
+ }
+
+ return history
+}
+
+func formatChartName(c *chart.Chart) string {
+ if c == nil || c.Metadata == nil {
+ // This is an edge case that has happened in prod, though we don't
+ // know how: https://github.com/helm/helm/issues/1347
+ return "MISSING"
+ }
+ return fmt.Sprintf("%s-%s", c.Name(), c.Metadata.Version)
+}
+
+func formatAppVersion(c *chart.Chart) string {
+ if c == nil || c.Metadata == nil {
+ // This is an edge case that has happened in prod, though we don't
+ // know how: https://github.com/helm/helm/issues/1347
+ return "MISSING"
+ }
+ return c.AppVersion()
+}
+
+func compListRevisions(_ string, cfg *action.Configuration, releaseName string) ([]string, cobra.ShellCompDirective) {
+ client := action.NewHistory(cfg)
+
+ var revisions []string
+ if histi, err := client.Run(releaseName); err == nil {
+ hist, err := releaseListToV1List(histi)
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveError
+ }
+ for _, version := range hist {
+ appVersion := fmt.Sprintf("App: %s", version.Chart.Metadata.AppVersion)
+ chartDesc := fmt.Sprintf("Chart: %s-%s", version.Chart.Metadata.Name, version.Chart.Metadata.Version)
+ revisions = append(revisions, fmt.Sprintf("%s\t%s, %s", strconv.Itoa(version.Version), appVersion, chartDesc))
+ }
+ return revisions, cobra.ShellCompDirectiveNoFileComp
+ }
+ return nil, cobra.ShellCompDirectiveError
+}
diff --git a/helm/pkg/cmd/history_test.go b/helm/pkg/cmd/history_test.go
new file mode 100644
index 000000000..d8adc2d19
--- /dev/null
+++ b/helm/pkg/cmd/history_test.go
@@ -0,0 +1,333 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestHistoryCmd(t *testing.T) {
+ mk := func(name string, vers int, status common.Status) *release.Release {
+ return release.Mock(&release.MockReleaseOptions{
+ Name: name,
+ Version: vers,
+ Status: status,
+ })
+ }
+
+ tests := []cmdTestCase{{
+ name: "get history for release",
+ cmd: "history angry-bird",
+ rels: []*release.Release{
+ mk("angry-bird", 4, common.StatusDeployed),
+ mk("angry-bird", 3, common.StatusSuperseded),
+ mk("angry-bird", 2, common.StatusSuperseded),
+ mk("angry-bird", 1, common.StatusSuperseded),
+ },
+ golden: "output/history.txt",
+ }, {
+ name: "get history with max limit set",
+ cmd: "history angry-bird --max 2",
+ rels: []*release.Release{
+ mk("angry-bird", 4, common.StatusDeployed),
+ mk("angry-bird", 3, common.StatusSuperseded),
+ },
+ golden: "output/history-limit.txt",
+ }, {
+ name: "get history with yaml output format",
+ cmd: "history angry-bird --output yaml",
+ rels: []*release.Release{
+ mk("angry-bird", 4, common.StatusDeployed),
+ mk("angry-bird", 3, common.StatusSuperseded),
+ },
+ golden: "output/history.yaml",
+ }, {
+ name: "get history with json output format",
+ cmd: "history angry-bird --output json",
+ rels: []*release.Release{
+ mk("angry-bird", 4, common.StatusDeployed),
+ mk("angry-bird", 3, common.StatusSuperseded),
+ },
+ golden: "output/history.json",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestHistoryOutputCompletion(t *testing.T) {
+ outputFlagCompletionTest(t, "history")
+}
+
+func revisionFlagCompletionTest(t *testing.T, cmdName string) {
+ t.Helper()
+ mk := func(name string, vers int, status common.Status) *release.Release {
+ return release.Mock(&release.MockReleaseOptions{
+ Name: name,
+ Version: vers,
+ Status: status,
+ })
+ }
+
+ releases := []*release.Release{
+ mk("musketeers", 11, common.StatusDeployed),
+ mk("musketeers", 10, common.StatusSuperseded),
+ mk("musketeers", 9, common.StatusSuperseded),
+ mk("musketeers", 8, common.StatusSuperseded),
+ }
+
+ tests := []cmdTestCase{{
+ name: "completion for revision flag",
+ cmd: fmt.Sprintf("__complete %s musketeers --revision ''", cmdName),
+ rels: releases,
+ golden: "output/revision-comp.txt",
+ }, {
+ name: "completion for revision flag, no filter",
+ cmd: fmt.Sprintf("__complete %s musketeers --revision 1", cmdName),
+ rels: releases,
+ golden: "output/revision-comp.txt",
+ }, {
+ name: "completion for revision flag with too few args",
+ cmd: fmt.Sprintf("__complete %s --revision ''", cmdName),
+ rels: releases,
+ golden: "output/revision-wrong-args-comp.txt",
+ }, {
+ name: "completion for revision flag with too many args",
+ cmd: fmt.Sprintf("__complete %s three musketeers --revision ''", cmdName),
+ rels: releases,
+ golden: "output/revision-wrong-args-comp.txt",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestHistoryCompletion(t *testing.T) {
+ checkReleaseCompletion(t, "history", false)
+}
+
+func TestHistoryFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "history", false)
+ checkFileCompletion(t, "history myrelease", false)
+}
+
+func TestReleaseInfoMarshalJSON(t *testing.T) {
+ updated := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC)
+
+ tests := []struct {
+ name string
+ info releaseInfo
+ expected string
+ }{
+ {
+ name: "all fields populated",
+ info: releaseInfo{
+ Revision: 1,
+ Updated: updated,
+ Status: "deployed",
+ Chart: "mychart-1.0.0",
+ AppVersion: "1.0.0",
+ Description: "Initial install",
+ },
+ expected: `{"revision":1,"updated":"2025-10-08T12:00:00Z","status":"deployed","chart":"mychart-1.0.0","app_version":"1.0.0","description":"Initial install"}`,
+ },
+ {
+ name: "without updated time",
+ info: releaseInfo{
+ Revision: 2,
+ Status: "superseded",
+ Chart: "mychart-1.0.1",
+ AppVersion: "1.0.1",
+ Description: "Upgraded",
+ },
+ expected: `{"revision":2,"status":"superseded","chart":"mychart-1.0.1","app_version":"1.0.1","description":"Upgraded"}`,
+ },
+ {
+ name: "with zero revision",
+ info: releaseInfo{
+ Revision: 0,
+ Updated: updated,
+ Status: "failed",
+ Chart: "mychart-1.0.0",
+ AppVersion: "1.0.0",
+ Description: "Install failed",
+ },
+ expected: `{"revision":0,"updated":"2025-10-08T12:00:00Z","status":"failed","chart":"mychart-1.0.0","app_version":"1.0.0","description":"Install failed"}`,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ data, err := json.Marshal(&tt.info)
+ require.NoError(t, err)
+ assert.JSONEq(t, tt.expected, string(data))
+ })
+ }
+}
+
+func TestReleaseInfoUnmarshalJSON(t *testing.T) {
+ updated := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC)
+
+ tests := []struct {
+ name string
+ input string
+ expected releaseInfo
+ wantErr bool
+ }{
+ {
+ name: "all fields populated",
+ input: `{"revision":1,"updated":"2025-10-08T12:00:00Z","status":"deployed","chart":"mychart-1.0.0","app_version":"1.0.0","description":"Initial install"}`,
+ expected: releaseInfo{
+ Revision: 1,
+ Updated: updated,
+ Status: "deployed",
+ Chart: "mychart-1.0.0",
+ AppVersion: "1.0.0",
+ Description: "Initial install",
+ },
+ },
+ {
+ name: "empty string updated field",
+ input: `{"revision":2,"updated":"","status":"superseded","chart":"mychart-1.0.1","app_version":"1.0.1","description":"Upgraded"}`,
+ expected: releaseInfo{
+ Revision: 2,
+ Status: "superseded",
+ Chart: "mychart-1.0.1",
+ AppVersion: "1.0.1",
+ Description: "Upgraded",
+ },
+ },
+ {
+ name: "missing updated field",
+ input: `{"revision":3,"status":"deployed","chart":"mychart-1.0.2","app_version":"1.0.2","description":"Upgraded"}`,
+ expected: releaseInfo{
+ Revision: 3,
+ Status: "deployed",
+ Chart: "mychart-1.0.2",
+ AppVersion: "1.0.2",
+ Description: "Upgraded",
+ },
+ },
+ {
+ name: "null updated field",
+ input: `{"revision":4,"updated":null,"status":"failed","chart":"mychart-1.0.3","app_version":"1.0.3","description":"Failed"}`,
+ expected: releaseInfo{
+ Revision: 4,
+ Status: "failed",
+ Chart: "mychart-1.0.3",
+ AppVersion: "1.0.3",
+ Description: "Failed",
+ },
+ },
+ {
+ name: "invalid time format",
+ input: `{"revision":5,"updated":"invalid-time","status":"deployed","chart":"mychart-1.0.4","app_version":"1.0.4","description":"Test"}`,
+ wantErr: true,
+ },
+ {
+ name: "zero revision",
+ input: `{"revision":0,"updated":"2025-10-08T12:00:00Z","status":"pending-install","chart":"mychart-1.0.0","app_version":"1.0.0","description":"Installing"}`,
+ expected: releaseInfo{
+ Revision: 0,
+ Updated: updated,
+ Status: "pending-install",
+ Chart: "mychart-1.0.0",
+ AppVersion: "1.0.0",
+ Description: "Installing",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var info releaseInfo
+ err := json.Unmarshal([]byte(tt.input), &info)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ assert.Equal(t, tt.expected.Revision, info.Revision)
+ assert.Equal(t, tt.expected.Updated.Unix(), info.Updated.Unix())
+ assert.Equal(t, tt.expected.Status, info.Status)
+ assert.Equal(t, tt.expected.Chart, info.Chart)
+ assert.Equal(t, tt.expected.AppVersion, info.AppVersion)
+ assert.Equal(t, tt.expected.Description, info.Description)
+ })
+ }
+}
+
+func TestReleaseInfoRoundTrip(t *testing.T) {
+ updated := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC)
+
+ original := releaseInfo{
+ Revision: 1,
+ Updated: updated,
+ Status: "deployed",
+ Chart: "mychart-1.0.0",
+ AppVersion: "1.0.0",
+ Description: "Initial install",
+ }
+
+ data, err := json.Marshal(&original)
+ require.NoError(t, err)
+
+ var decoded releaseInfo
+ err = json.Unmarshal(data, &decoded)
+ require.NoError(t, err)
+
+ assert.Equal(t, original.Revision, decoded.Revision)
+ assert.Equal(t, original.Updated.Unix(), decoded.Updated.Unix())
+ assert.Equal(t, original.Status, decoded.Status)
+ assert.Equal(t, original.Chart, decoded.Chart)
+ assert.Equal(t, original.AppVersion, decoded.AppVersion)
+ assert.Equal(t, original.Description, decoded.Description)
+}
+
+func TestReleaseInfoEmptyStringRoundTrip(t *testing.T) {
+ // This test specifically verifies that empty string time fields
+ // are handled correctly during parsing
+ input := `{"revision":1,"updated":"","status":"deployed","chart":"mychart-1.0.0","app_version":"1.0.0","description":"Test"}`
+
+ var info releaseInfo
+ err := json.Unmarshal([]byte(input), &info)
+ require.NoError(t, err)
+
+ // Verify time field is zero value
+ assert.True(t, info.Updated.IsZero())
+ assert.Equal(t, 1, info.Revision)
+ assert.Equal(t, "deployed", info.Status)
+
+ // Marshal back and verify empty time field is omitted
+ data, err := json.Marshal(&info)
+ require.NoError(t, err)
+
+ var result map[string]interface{}
+ err = json.Unmarshal(data, &result)
+ require.NoError(t, err)
+
+ // Zero time value should be omitted
+ assert.NotContains(t, result, "updated")
+ assert.Equal(t, float64(1), result["revision"])
+ assert.Equal(t, "deployed", result["status"])
+ assert.Equal(t, "mychart-1.0.0", result["chart"])
+}
diff --git a/helm/pkg/cmd/install.go b/helm/pkg/cmd/install.go
new file mode 100644
index 000000000..d36cd9e34
--- /dev/null
+++ b/helm/pkg/cmd/install.go
@@ -0,0 +1,357 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+ "log/slog"
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/loader"
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cli/values"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/downloader"
+ "helm.sh/helm/v4/pkg/getter"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+const installDesc = `
+This command installs a chart archive.
+
+The install argument must be a chart reference, a path to a packaged chart,
+a path to an unpacked chart directory or a URL.
+
+To override values in a chart, use either the '--values' flag and pass in a file
+or use the '--set' flag and pass configuration from the command line, to force
+a string value use '--set-string'. You can use '--set-file' to set individual
+values from a file when the value itself is too long for the command line
+or is dynamically generated. You can also use '--set-json' to set json values
+(scalars/objects/arrays) from the command line. Additionally, you can use '--set-json' and passing json object as a string.
+
+ $ helm install -f myvalues.yaml myredis ./redis
+
+or
+
+ $ helm install --set name=prod myredis ./redis
+
+or
+
+ $ helm install --set-string long_int=1234567890 myredis ./redis
+
+or
+
+ $ helm install --set-file my_script=dothings.sh myredis ./redis
+
+or
+
+ $ helm install --set-json 'master.sidecars=[{"name":"sidecar","image":"myImage","imagePullPolicy":"Always","ports":[{"name":"portname","containerPort":1234}]}]' myredis ./redis
+
+or
+
+ $ helm install --set-json '{"master":{"sidecars":[{"name":"sidecar","image":"myImage","imagePullPolicy":"Always","ports":[{"name":"portname","containerPort":1234}]}]}}' myredis ./redis
+
+You can specify the '--values'/'-f' flag multiple times. The priority will be given to the
+last (right-most) file specified. For example, if both myvalues.yaml and override.yaml
+contained a key called 'Test', the value set in override.yaml would take precedence:
+
+ $ helm install -f myvalues.yaml -f override.yaml myredis ./redis
+
+You can specify the '--set' flag multiple times. The priority will be given to the
+last (right-most) set specified. For example, if both 'bar' and 'newbar' values are
+set for a key called 'foo', the 'newbar' value would take precedence:
+
+ $ helm install --set foo=bar --set foo=newbar myredis ./redis
+
+Similarly, in the following example 'foo' is set to '["four"]':
+
+ $ helm install --set-json='foo=["one", "two", "three"]' --set-json='foo=["four"]' myredis ./redis
+
+And in the following example, 'foo' is set to '{"key1":"value1","key2":"bar"}':
+
+ $ helm install --set-json='foo={"key1":"value1","key2":"value2"}' --set-json='foo.key2="bar"' myredis ./redis
+
+To check the generated manifests of a release without installing the chart,
+the --debug and --dry-run flags can be combined.
+
+The --dry-run flag will output all generated chart manifests, including Secrets
+which can contain sensitive values. To hide Kubernetes Secrets use the
+--hide-secret flag. Please carefully consider how and when these flags are used.
+
+If --verify is set, the chart MUST have a provenance file, and the provenance
+file MUST pass all verification steps.
+
+There are six different ways you can express the chart you want to install:
+
+1. By chart reference: helm install mymaria example/mariadb
+2. By path to a packaged chart: helm install mynginx ./nginx-1.2.3.tgz
+3. By path to an unpacked chart directory: helm install mynginx ./nginx
+4. By absolute URL: helm install mynginx https://example.com/charts/nginx-1.2.3.tgz
+5. By chart reference and repo url: helm install --repo https://example.com/charts/ mynginx nginx
+6. By OCI registries: helm install mynginx --version 1.2.3 oci://example.com/charts/nginx
+
+CHART REFERENCES
+
+A chart reference is a convenient way of referencing a chart in a chart repository.
+
+When you use a chart reference with a repo prefix ('example/mariadb'), Helm will look in the local
+configuration for a chart repository named 'example', and will then look for a
+chart in that repository whose name is 'mariadb'. It will install the latest stable version of that chart
+until you specify '--devel' flag to also include development version (alpha, beta, and release candidate releases), or
+supply a version number with the '--version' flag.
+
+To see the list of chart repositories, use 'helm repo list'. To search for
+charts in a repository, use 'helm search'.
+`
+
+func newInstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewInstall(cfg)
+ valueOpts := &values.Options{}
+ var outfmt output.Format
+
+ cmd := &cobra.Command{
+ Use: "install [NAME] [CHART]",
+ Short: "install a chart",
+ Long: installDesc,
+ Args: require.MinimumNArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return compInstall(args, toComplete, client)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ registryClient, err := newRegistryClient(client.CertFile, client.KeyFile, client.CaFile,
+ client.InsecureSkipTLSVerify, client.PlainHTTP, client.Username, client.Password)
+ if err != nil {
+ return fmt.Errorf("missing registry client: %w", err)
+ }
+ client.SetRegistryClient(registryClient)
+
+ dryRunStrategy, err := cmdGetDryRunFlagStrategy(cmd, false)
+ if err != nil {
+ return err
+ }
+ client.DryRunStrategy = dryRunStrategy
+
+ rel, err := runInstall(args, client, valueOpts, out)
+ if err != nil {
+ return fmt.Errorf("INSTALLATION FAILED: %w", err)
+ }
+
+ return outfmt.Write(out, &statusPrinter{
+ release: rel,
+ debug: settings.Debug,
+ showMetadata: false,
+ hideNotes: client.HideNotes,
+ noColor: settings.ShouldDisableColor(),
+ })
+ },
+ }
+
+ f := cmd.Flags()
+ addInstallFlags(cmd, f, client, valueOpts)
+ // hide-secret is not available in all places the install flags are used so
+ // it is added separately
+ f.BoolVar(&client.HideSecret, "hide-secret", false, "hide Kubernetes Secrets when also using the --dry-run flag")
+ addDryRunFlag(cmd)
+ bindOutputFlag(cmd, &outfmt)
+ bindPostRenderFlag(cmd, &client.PostRenderer, settings)
+
+ return cmd
+}
+
+func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Install, valueOpts *values.Options) {
+ f.BoolVar(&client.CreateNamespace, "create-namespace", false, "create the release namespace if not present")
+ f.BoolVar(&client.ForceReplace, "force-replace", false, "force resource updates by replacement")
+ f.BoolVar(&client.ForceReplace, "force", false, "deprecated")
+ f.MarkDeprecated("force", "use --force-replace instead")
+ f.BoolVar(&client.ForceConflicts, "force-conflicts", false, "if set server-side apply will force changes against conflicts")
+ f.BoolVar(&client.ServerSideApply, "server-side", true, "object updates run in the server instead of the client")
+ f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during install")
+ f.BoolVar(&client.Replace, "replace", false, "reuse the given name, only if that name is a deleted release which remains in the history. This is unsafe in production")
+ f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
+ f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
+ f.BoolVarP(&client.GenerateName, "generate-name", "g", false, "generate the name (and omit the NAME parameter)")
+ f.StringVar(&client.NameTemplate, "name-template", "", "specify template used to name the release")
+ f.StringVar(&client.Description, "description", "", "add a custom description")
+ f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored")
+ f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart")
+ f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema")
+ f.BoolVar(&client.RollbackOnFailure, "rollback-on-failure", false, "if set, Helm will rollback (uninstall) the installation upon failure. The --wait flag will be default to \"watcher\" if --rollback-on-failure is set")
+ f.MarkDeprecated("atomic", "use --rollback-on-failure instead")
+ f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present")
+ f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent")
+ f.BoolVar(&client.SkipSchemaValidation, "skip-schema-validation", false, "if set, disables JSON schema validation")
+ f.StringToStringVarP(&client.Labels, "labels", "l", nil, "Labels that would be added to release metadata. Should be divided by comma.")
+ f.BoolVar(&client.EnableDNS, "enable-dns", false, "enable DNS lookups when rendering templates")
+ f.BoolVar(&client.HideNotes, "hide-notes", false, "if set, do not show notes in install output. Does not affect presence in chart metadata")
+ f.BoolVar(&client.TakeOwnership, "take-ownership", false, "if set, install will ignore the check for helm annotations and take ownership of the existing resources")
+ addValueOptionsFlags(f, valueOpts)
+ addChartPathOptionsFlags(f, &client.ChartPathOptions)
+ AddWaitFlag(cmd, &client.WaitStrategy)
+ cmd.MarkFlagsMutuallyExclusive("force-replace", "force-conflicts")
+ cmd.MarkFlagsMutuallyExclusive("force", "force-conflicts")
+
+ err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ requiredArgs := 2
+ if client.GenerateName {
+ requiredArgs = 1
+ }
+ if len(args) != requiredArgs {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return compVersionFlag(args[requiredArgs-1], toComplete)
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func runInstall(args []string, client *action.Install, valueOpts *values.Options, out io.Writer) (*release.Release, error) {
+ slog.Debug("Original chart version", "version", client.Version)
+ if client.Version == "" && client.Devel {
+ slog.Debug("setting version to >0.0.0-0")
+ client.Version = ">0.0.0-0"
+ }
+
+ name, chartRef, err := client.NameAndChart(args)
+ if err != nil {
+ return nil, err
+ }
+ client.ReleaseName = name
+
+ cp, err := client.LocateChart(chartRef, settings)
+ if err != nil {
+ return nil, err
+ }
+
+ slog.Debug("Chart path", "path", cp)
+
+ p := getter.All(settings)
+ vals, err := valueOpts.MergeValues(p)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check chart dependencies to make sure all are present in /charts
+ chartRequested, err := loader.Load(cp)
+ if err != nil {
+ return nil, err
+ }
+
+ ac, err := chart.NewAccessor(chartRequested)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := checkIfInstallable(ac); err != nil {
+ return nil, err
+ }
+
+ if ac.Deprecated() {
+ slog.Warn("this chart is deprecated")
+ }
+
+ if req := ac.MetaDependencies(); len(req) > 0 {
+ // If CheckDependencies returns an error, we have unfulfilled dependencies.
+ // As of Helm 2.4.0, this is treated as a stopping condition:
+ // https://github.com/helm/helm/issues/2209
+ if err := action.CheckDependencies(chartRequested, req); err != nil {
+ if client.DependencyUpdate {
+ man := &downloader.Manager{
+ Out: out,
+ ChartPath: cp,
+ Keyring: client.Keyring,
+ SkipUpdate: false,
+ Getters: p,
+ RepositoryConfig: settings.RepositoryConfig,
+ RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
+ Debug: settings.Debug,
+ RegistryClient: client.GetRegistryClient(),
+ }
+ if err := man.Update(); err != nil {
+ return nil, err
+ }
+ // Reload the chart with the updated Chart.lock file.
+ if chartRequested, err = loader.Load(cp); err != nil {
+ return nil, fmt.Errorf("failed reloading chart after repo update: %w", err)
+ }
+ } else {
+ return nil, fmt.Errorf("an error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies: %w", err)
+ }
+ }
+ }
+
+ client.Namespace = settings.Namespace()
+
+ // Create context and prepare the handle of SIGTERM
+ ctx := context.Background()
+ ctx, cancel := context.WithCancel(ctx)
+
+ // Set up channel on which to send signal notifications.
+ // We must use a buffered channel or risk missing the signal
+ // if we're not ready to receive when the signal is sent.
+ cSignal := make(chan os.Signal, 2)
+ signal.Notify(cSignal, os.Interrupt, syscall.SIGTERM)
+ go func() {
+ <-cSignal
+ fmt.Fprintf(out, "Release %s has been cancelled.\n", args[0])
+ cancel()
+ }()
+
+ ri, err := client.RunWithContext(ctx, chartRequested, vals)
+ rel, rerr := releaserToV1Release(ri)
+ if rerr != nil {
+ return nil, rerr
+ }
+ return rel, err
+}
+
+// checkIfInstallable validates if a chart can be installed
+//
+// Application chart type is only installable
+func checkIfInstallable(ch chart.Accessor) error {
+ meta := ch.MetadataAsMap()
+
+ switch meta["Type"] {
+ case "", "application":
+ return nil
+ }
+ return fmt.Errorf("%s charts are not installable", meta["Type"])
+}
+
+// Provide dynamic auto-completion for the install and template commands
+func compInstall(args []string, toComplete string, client *action.Install) ([]string, cobra.ShellCompDirective) {
+ requiredArgs := 1
+ if client.GenerateName {
+ requiredArgs = 0
+ }
+ if len(args) == requiredArgs {
+ return compListCharts(toComplete, true)
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+}
diff --git a/helm/pkg/cmd/install_test.go b/helm/pkg/cmd/install_test.go
new file mode 100644
index 000000000..f0f12e4f7
--- /dev/null
+++ b/helm/pkg/cmd/install_test.go
@@ -0,0 +1,325 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
+)
+
+func TestInstall(t *testing.T) {
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testcharts/*.tgz*"),
+ repotest.WithMiddleware(repotest.BasicAuthMiddleware(t)),
+ )
+ defer srv.Stop()
+
+ srv2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.FileServer(http.Dir(srv.Root())).ServeHTTP(w, r)
+ }))
+ defer srv2.Close()
+
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ repoFile := filepath.Join(srv.Root(), "repositories.yaml")
+
+ tests := []cmdTestCase{
+ // Install, base case
+ {
+ name: "basic install",
+ cmd: "install aeneas testdata/testcharts/empty --namespace default",
+ golden: "output/install.txt",
+ },
+
+ // Install, values from cli
+ {
+ name: "install with values",
+ cmd: "install virgil testdata/testcharts/alpine --set test.Name=bar",
+ golden: "output/install-with-values.txt",
+ },
+ // Install, values from cli via multiple --set
+ {
+ name: "install with multiple values",
+ cmd: "install virgil testdata/testcharts/alpine --set test.Color=yellow --set test.Name=banana",
+ golden: "output/install-with-multiple-values.txt",
+ },
+ // Install, values from yaml
+ {
+ name: "install with values file",
+ cmd: "install virgil testdata/testcharts/alpine -f testdata/testcharts/alpine/extra_values.yaml",
+ golden: "output/install-with-values-file.txt",
+ },
+ // Install, no hooks
+ {
+ name: "install without hooks",
+ cmd: "install aeneas testdata/testcharts/alpine --no-hooks --set test.Name=hello",
+ golden: "output/install-no-hooks.txt",
+ },
+ // Install, values from multiple yaml
+ {
+ name: "install with values",
+ cmd: "install virgil testdata/testcharts/alpine -f testdata/testcharts/alpine/extra_values.yaml -f testdata/testcharts/alpine/more_values.yaml",
+ golden: "output/install-with-multiple-values-files.txt",
+ },
+ // Install, no charts
+ {
+ name: "install with no chart specified",
+ cmd: "install",
+ golden: "output/install-no-args.txt",
+ wantError: true,
+ },
+ // Install, reuse name
+ {
+ name: "install and replace release",
+ cmd: "install aeneas testdata/testcharts/empty --replace",
+ golden: "output/install-and-replace.txt",
+ },
+ // Install, take ownership
+ {
+ name: "install and replace release",
+ cmd: "install aeneas-take-ownership testdata/testcharts/empty --take-ownership",
+ golden: "output/install-and-take-ownership.txt",
+ },
+ // Install, with timeout
+ {
+ name: "install with a timeout",
+ cmd: "install foobar testdata/testcharts/empty --timeout 120s",
+ golden: "output/install-with-timeout.txt",
+ },
+ // Install, with wait
+ {
+ name: "install with a wait",
+ cmd: "install apollo testdata/testcharts/empty --wait",
+ golden: "output/install-with-wait.txt",
+ },
+ // Install, with wait-for-jobs
+ {
+ name: "install with wait-for-jobs",
+ cmd: "install apollo testdata/testcharts/empty --wait --wait-for-jobs",
+ golden: "output/install-with-wait-for-jobs.txt",
+ },
+ // Install, using the name-template
+ {
+ name: "install with name-template",
+ cmd: "install testdata/testcharts/empty --name-template '{{ \"foobar\"}}'",
+ golden: "output/install-name-template.txt",
+ },
+ // Install, perform chart verification along the way.
+ {
+ name: "install with verification, missing provenance",
+ cmd: "install bogus testdata/testcharts/compressedchart-0.1.0.tgz --verify --keyring testdata/helm-test-key.pub",
+ wantError: true,
+ },
+ {
+ name: "install with verification, directory instead of file",
+ cmd: "install bogus testdata/testcharts/signtest --verify --keyring testdata/helm-test-key.pub",
+ wantError: true,
+ },
+ {
+ name: "install with verification, valid",
+ cmd: "install signtest testdata/testcharts/signtest-0.1.0.tgz --verify --keyring testdata/helm-test-key.pub",
+ },
+ // Install, chart with missing dependencies in /charts
+ {
+ name: "install chart with missing dependencies",
+ cmd: "install nodeps testdata/testcharts/chart-missing-deps",
+ wantError: true,
+ },
+ // Install chart with update-dependency
+ {
+ name: "install chart with missing dependencies",
+ cmd: "install --dependency-update updeps testdata/testcharts/chart-with-subchart-update",
+ golden: "output/chart-with-subchart-update.txt",
+ },
+ // Install, chart with bad dependencies in Chart.yaml in /charts
+ {
+ name: "install chart with bad dependencies in Chart.yaml",
+ cmd: "install badreq testdata/testcharts/chart-bad-requirements",
+ wantError: true,
+ },
+ // Install, chart with library chart dependency
+ {
+ name: "install chart with library chart dependency",
+ cmd: "install withlibchartp testdata/testcharts/chart-with-lib-dep",
+ },
+ // Install, library chart
+ {
+ name: "install library chart",
+ cmd: "install libchart testdata/testcharts/lib-chart",
+ wantError: true,
+ golden: "output/install-lib-chart.txt",
+ },
+ // Install, chart with bad type
+ {
+ name: "install chart with bad type",
+ cmd: "install badtype testdata/testcharts/chart-bad-type",
+ wantError: true,
+ golden: "output/install-chart-bad-type.txt",
+ },
+ // Install, values from yaml, schematized
+ {
+ name: "install with schema file",
+ cmd: "install schema testdata/testcharts/chart-with-schema",
+ golden: "output/schema.txt",
+ },
+ // Install, values from yaml, schematized with errors
+ {
+ name: "install with schema file, with errors",
+ cmd: "install schema testdata/testcharts/chart-with-schema-negative",
+ wantError: true,
+ golden: "output/schema-negative.txt",
+ },
+ // Install, values from yaml, extra values from yaml, schematized with errors
+ {
+ name: "install with schema file, extra values from yaml, with errors",
+ cmd: "install schema testdata/testcharts/chart-with-schema -f testdata/testcharts/chart-with-schema/extra-values.yaml",
+ wantError: true,
+ golden: "output/schema-negative.txt",
+ },
+ // Install, values from yaml, extra values from cli, schematized with errors
+ {
+ name: "install with schema file, extra values from cli, with errors",
+ cmd: "install schema testdata/testcharts/chart-with-schema --set age=-5",
+ wantError: true,
+ golden: "output/schema-negative-cli.txt",
+ },
+ // Install with subchart, values from yaml, schematized with errors
+ {
+ name: "install with schema file and schematized subchart, with errors",
+ cmd: "install schema testdata/testcharts/chart-with-schema-and-subchart",
+ wantError: true,
+ golden: "output/subchart-schema-negative.txt",
+ },
+ // Install with subchart, values from yaml, extra values from cli, schematized with errors
+ {
+ name: "install with schema file and schematized subchart, extra values from cli",
+ cmd: "install schema testdata/testcharts/chart-with-schema-and-subchart --set lastname=doe --set subchart-with-schema.age=25",
+ golden: "output/subchart-schema-cli.txt",
+ },
+ // Install with subchart, values from yaml, extra values from cli, schematized with errors
+ {
+ name: "install with schema file and schematized subchart, extra values from cli, with errors",
+ cmd: "install schema testdata/testcharts/chart-with-schema-and-subchart --set lastname=doe --set subchart-with-schema.age=-25",
+ wantError: true,
+ golden: "output/subchart-schema-cli-negative.txt",
+ },
+ // Install, values from yaml, schematized with errors but skip schema validation, expect success
+ {
+ name: "install with schema file and schematized subchart, extra values from cli, skip schema validation",
+ cmd: "install schema testdata/testcharts/chart-with-schema-and-subchart --set lastname=doe --set subchart-with-schema.age=-25 --skip-schema-validation",
+ golden: "output/schema.txt",
+ },
+ // Install deprecated chart
+ {
+ name: "install with warning about deprecated chart",
+ cmd: "install aeneas testdata/testcharts/deprecated --namespace default",
+ golden: "output/deprecated-chart.txt",
+ },
+ // Install chart with only crds
+ {
+ name: "install chart with only crds",
+ cmd: "install crd-test testdata/testcharts/chart-with-only-crds --namespace default",
+ },
+ // Verify the user/pass works
+ {
+ name: "basic install with credentials",
+ cmd: "install aeneas reqtest --namespace default --repo " + srv.URL() + " --username username --password password",
+ golden: "output/install.txt",
+ },
+ {
+ name: "basic install with credentials",
+ cmd: "install aeneas reqtest --namespace default --repo " + srv2.URL + " --username username --password password --pass-credentials",
+ golden: "output/install.txt",
+ },
+ {
+ name: "basic install with credentials and no repo",
+ cmd: fmt.Sprintf("install aeneas test/reqtest --username username --password password --repository-config %s --repository-cache %s", repoFile, srv.Root()),
+ golden: "output/install.txt",
+ },
+ {
+ name: "dry-run displaying secret",
+ cmd: "install secrets testdata/testcharts/chart-with-secret --dry-run",
+ golden: "output/install-dry-run-with-secret.txt",
+ },
+ {
+ name: "dry-run hiding secret",
+ cmd: "install secrets testdata/testcharts/chart-with-secret --dry-run --hide-secret",
+ golden: "output/install-dry-run-with-secret-hidden.txt",
+ },
+ {
+ name: "hide-secret error without dry-run",
+ cmd: "install secrets testdata/testcharts/chart-with-secret --hide-secret",
+ wantError: true,
+ golden: "output/install-hide-secret.txt",
+ },
+ }
+
+ runTestCmd(t, tests)
+}
+
+func TestInstallOutputCompletion(t *testing.T) {
+ outputFlagCompletionTest(t, "install")
+}
+
+func TestInstallVersionCompletion(t *testing.T) {
+ repoFile := "testdata/helmhome/helm/repositories.yaml"
+ repoCache := "testdata/helmhome/helm/repository"
+
+ repoSetup := fmt.Sprintf("--repository-config %s --repository-cache %s", repoFile, repoCache)
+
+ tests := []cmdTestCase{{
+ name: "completion for install version flag with release name",
+ cmd: fmt.Sprintf("%s __complete install releasename testing/alpine --version ''", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for install version flag with generate-name",
+ cmd: fmt.Sprintf("%s __complete install --generate-name testing/alpine --version ''", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for install version flag, no filter",
+ cmd: fmt.Sprintf("%s __complete install releasename testing/alpine --version 0.3", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for install version flag too few args",
+ cmd: fmt.Sprintf("%s __complete install testing/alpine --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }, {
+ name: "completion for install version flag too many args",
+ cmd: fmt.Sprintf("%s __complete install releasename testing/alpine badarg --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }, {
+ name: "completion for install version flag invalid chart",
+ cmd: fmt.Sprintf("%s __complete install releasename invalid/invalid --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestInstallFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "install", false)
+ checkFileCompletion(t, "install --generate-name", true)
+ checkFileCompletion(t, "install myname", true)
+ checkFileCompletion(t, "install myname mychart", false)
+}
diff --git a/helm/pkg/cmd/lint.go b/helm/pkg/cmd/lint.go
new file mode 100644
index 000000000..ccc53ddd0
--- /dev/null
+++ b/helm/pkg/cmd/lint.go
@@ -0,0 +1,155 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/v2/lint/support"
+ "helm.sh/helm/v4/pkg/cli/values"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/getter"
+)
+
+var longLintHelp = `
+This command takes a path to a chart and runs a series of tests to verify that
+the chart is well-formed.
+
+If the linter encounters things that will cause the chart to fail installation,
+it will emit [ERROR] messages. If it encounters issues that break with convention
+or recommendation, it will emit [WARNING] messages.
+`
+
+func newLintCmd(out io.Writer) *cobra.Command {
+ client := action.NewLint()
+ valueOpts := &values.Options{}
+ var kubeVersion string
+
+ cmd := &cobra.Command{
+ Use: "lint PATH",
+ Short: "examine a chart for possible issues",
+ Long: longLintHelp,
+ Args: require.MinimumNArgs(1),
+ RunE: func(_ *cobra.Command, args []string) error {
+ paths := args
+
+ if kubeVersion != "" {
+ parsedKubeVersion, err := common.ParseKubeVersion(kubeVersion)
+ if err != nil {
+ return fmt.Errorf("invalid kube version '%s': %s", kubeVersion, err)
+ }
+ client.KubeVersion = parsedKubeVersion
+ }
+
+ if client.WithSubcharts {
+ for _, p := range paths {
+ filepath.Walk(filepath.Join(p, "charts"), func(path string, info os.FileInfo, _ error) error {
+ if info != nil {
+ if info.Name() == "Chart.yaml" {
+ paths = append(paths, filepath.Dir(path))
+ } else if strings.HasSuffix(path, ".tgz") || strings.HasSuffix(path, ".tar.gz") {
+ paths = append(paths, path)
+ }
+ }
+ return nil
+ })
+ }
+ }
+
+ client.Namespace = settings.Namespace()
+ vals, err := valueOpts.MergeValues(getter.All(settings))
+ if err != nil {
+ return err
+ }
+
+ var message strings.Builder
+ failed := 0
+ errorsOrWarnings := 0
+
+ for _, path := range paths {
+ result := client.Run([]string{path}, vals)
+
+ // If there is no errors/warnings and quiet flag is set
+ // go to the next chart
+ hasWarningsOrErrors := action.HasWarningsOrErrors(result)
+ if hasWarningsOrErrors {
+ errorsOrWarnings++
+ }
+ if client.Quiet && !hasWarningsOrErrors {
+ continue
+ }
+
+ fmt.Fprintf(&message, "==> Linting %s\n", path)
+
+ // All the Errors that are generated by a chart
+ // that failed a lint will be included in the
+ // results.Messages so we only need to print
+ // the Errors if there are no Messages.
+ if len(result.Messages) == 0 {
+ for _, err := range result.Errors {
+ fmt.Fprintf(&message, "Error %s\n", err)
+ }
+ }
+
+ for _, msg := range result.Messages {
+ if !client.Quiet || msg.Severity > support.InfoSev {
+ fmt.Fprintf(&message, "%s\n", msg)
+ }
+ }
+
+ if len(result.Errors) != 0 {
+ failed++
+ }
+
+ // Adding extra new line here to break up the
+ // results, stops this from being a big wall of
+ // text and makes it easier to follow.
+ fmt.Fprint(&message, "\n")
+ }
+
+ fmt.Fprint(out, message.String())
+
+ summary := fmt.Sprintf("%d chart(s) linted, %d chart(s) failed", len(paths), failed)
+ if failed > 0 {
+ return errors.New(summary)
+ }
+ if !client.Quiet || errorsOrWarnings > 0 {
+ fmt.Fprintln(out, summary)
+ }
+ return nil
+ },
+ }
+
+ f := cmd.Flags()
+ f.BoolVar(&client.Strict, "strict", false, "fail on lint warnings")
+ f.BoolVar(&client.WithSubcharts, "with-subcharts", false, "lint dependent charts")
+ f.BoolVar(&client.Quiet, "quiet", false, "print only warnings and errors")
+ f.BoolVar(&client.SkipSchemaValidation, "skip-schema-validation", false, "if set, disables JSON schema validation")
+ f.StringVar(&kubeVersion, "kube-version", "", "Kubernetes version used for capabilities and deprecation checks")
+ addValueOptionsFlags(f, valueOpts)
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/lint_test.go b/helm/pkg/cmd/lint_test.go
new file mode 100644
index 000000000..f825e36e2
--- /dev/null
+++ b/helm/pkg/cmd/lint_test.go
@@ -0,0 +1,106 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestLintCmdWithSubchartsFlag(t *testing.T) {
+ testChart := "testdata/testcharts/chart-with-bad-subcharts"
+ tests := []cmdTestCase{{
+ name: "lint good chart with bad subcharts",
+ cmd: fmt.Sprintf("lint %s", testChart),
+ golden: "output/lint-chart-with-bad-subcharts.txt",
+ wantError: true,
+ }, {
+ name: "lint good chart with bad subcharts using --with-subcharts flag",
+ cmd: fmt.Sprintf("lint --with-subcharts %s", testChart),
+ golden: "output/lint-chart-with-bad-subcharts-with-subcharts.txt",
+ wantError: true,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestLintCmdWithQuietFlag(t *testing.T) {
+ testChart1 := "testdata/testcharts/alpine"
+ testChart2 := "testdata/testcharts/chart-bad-requirements"
+ tests := []cmdTestCase{{
+ name: "lint good chart using --quiet flag",
+ cmd: fmt.Sprintf("lint --quiet %s", testChart1),
+ golden: "output/lint-quiet.txt",
+ }, {
+ name: "lint two charts, one with error using --quiet flag",
+ cmd: fmt.Sprintf("lint --quiet %s %s", testChart1, testChart2),
+ golden: "output/lint-quiet-with-error.txt",
+ wantError: true,
+ }, {
+ name: "lint chart with warning using --quiet flag",
+ cmd: "lint --quiet testdata/testcharts/chart-with-only-crds",
+ golden: "output/lint-quiet-with-warning.txt",
+ }, {
+ name: "lint non-existent chart using --quiet flag",
+ cmd: "lint --quiet thischartdoesntexist/",
+ golden: "",
+ wantError: true,
+ }}
+ runTestCmd(t, tests)
+
+}
+
+func TestLintCmdWithKubeVersionFlag(t *testing.T) {
+ testChart := "testdata/testcharts/chart-with-deprecated-api"
+ tests := []cmdTestCase{{
+ name: "lint chart with deprecated api version using kube version flag",
+ cmd: fmt.Sprintf("lint --kube-version 1.22.0 %s", testChart),
+ golden: "output/lint-chart-with-deprecated-api.txt",
+ wantError: false,
+ }, {
+ name: "lint chart with deprecated api version using kube version and strict flag",
+ cmd: fmt.Sprintf("lint --kube-version 1.22.0 --strict %s", testChart),
+ golden: "output/lint-chart-with-deprecated-api-strict.txt",
+ wantError: true,
+ }, {
+ // the test builds will use the kubeVersionMinorTesting const in capabilities.go
+ // which is "20"
+ name: "lint chart with deprecated api version without kube version",
+ cmd: fmt.Sprintf("lint %s", testChart),
+ golden: "output/lint-chart-with-deprecated-api-old-k8s.txt",
+ wantError: false,
+ }, {
+ name: "lint chart with deprecated api version with older kube version",
+ cmd: fmt.Sprintf("lint --kube-version 1.21.0 --strict %s", testChart),
+ golden: "output/lint-chart-with-deprecated-api-old-k8s.txt",
+ wantError: false,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestLintCmdRequiresArgs(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "lint without arguments should fail",
+ cmd: "lint",
+ wantError: true,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestLintFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "lint", true)
+ checkFileCompletion(t, "lint mypath", true) // Multiple paths can be given
+}
diff --git a/helm/pkg/cmd/list.go b/helm/pkg/cmd/list.go
new file mode 100644
index 000000000..3c15a0954
--- /dev/null
+++ b/helm/pkg/cmd/list.go
@@ -0,0 +1,289 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "slices"
+ "strconv"
+
+ "github.com/gosuri/uitable"
+ "github.com/spf13/cobra"
+
+ coloroutput "helm.sh/helm/v4/internal/cli/output"
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+var listHelp = `
+This command lists all of the releases for a specified namespace (uses current namespace context if namespace not specified).
+
+By default, it lists all releases in any status. Individual status filters like '--deployed', '--failed',
+'--pending', '--uninstalled', '--superseded', and '--uninstalling' can be used
+to show only releases in specific states. Such flags can be combined:
+'--deployed --failed'.
+
+By default, items are sorted alphabetically. Use the '-d' flag to sort by
+release date.
+
+If the --filter flag is provided, it will be treated as a filter. Filters are
+regular expressions (Perl compatible) that are applied to the list of releases.
+Only items that match the filter will be returned.
+
+ $ helm list --filter 'ara[a-z]+'
+ NAME UPDATED CHART
+ maudlin-arachnid 2020-06-18 14:17:46.125134977 +0000 UTC alpine-0.1.0
+
+If no results are found, 'helm list' will exit 0, but with no output (or in
+the case of no '-q' flag, only headers).
+
+By default, up to 256 items may be returned. To limit this, use the '--max' flag.
+Setting '--max' to 0 will not return all results. Rather, it will return the
+server's default, which may be much higher than 256. Pairing the '--max'
+flag with the '--offset' flag allows you to page through results.
+`
+
+func newListCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewList(cfg)
+ var outfmt output.Format
+
+ cmd := &cobra.Command{
+ Use: "list",
+ Short: "list releases",
+ Long: listHelp,
+ Aliases: []string{"ls"},
+ Args: require.NoArgs,
+ ValidArgsFunction: noMoreArgsCompFunc,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ if client.AllNamespaces {
+ if err := cfg.Init(settings.RESTClientGetter(), "", os.Getenv("HELM_DRIVER")); err != nil {
+ return err
+ }
+ }
+ client.SetStateMask()
+
+ resultsi, err := client.Run()
+ if err != nil {
+ return err
+ }
+ results, err := releaseListToV1List(resultsi)
+ if err != nil {
+ return err
+ }
+
+ if client.Short {
+ names := make([]string, 0, len(results))
+ for _, res := range results {
+ names = append(names, res.Name)
+ }
+
+ outputFlag := cmd.Flag("output")
+
+ switch outputFlag.Value.String() {
+ case "json":
+ output.EncodeJSON(out, names)
+ return nil
+ case "yaml":
+ output.EncodeYAML(out, names)
+ return nil
+ case "table":
+ for _, res := range results {
+ fmt.Fprintln(out, res.Name)
+ }
+ return nil
+ }
+ }
+
+ return outfmt.Write(out, newReleaseListWriter(results, client.TimeFormat, client.NoHeaders, settings.ShouldDisableColor()))
+ },
+ }
+
+ f := cmd.Flags()
+ f.BoolVarP(&client.Short, "short", "q", false, "output short (quiet) listing format")
+ f.BoolVarP(&client.NoHeaders, "no-headers", "", false, "don't print headers when using the default output format")
+ f.StringVar(&client.TimeFormat, "time-format", "", `format time using golang time formatter. Example: --time-format "2006-01-02 15:04:05Z0700"`)
+ f.BoolVarP(&client.ByDate, "date", "d", false, "sort by release date")
+ f.BoolVarP(&client.SortReverse, "reverse", "r", false, "reverse the sort order")
+ f.BoolVar(&client.Uninstalled, "uninstalled", false, "show uninstalled releases (if 'helm uninstall --keep-history' was used)")
+ f.BoolVar(&client.Superseded, "superseded", false, "show superseded releases")
+ f.BoolVar(&client.Uninstalling, "uninstalling", false, "show releases that are currently being uninstalled")
+ f.BoolVar(&client.Deployed, "deployed", false, "show deployed releases")
+ f.BoolVar(&client.Failed, "failed", false, "show failed releases")
+ f.BoolVar(&client.Pending, "pending", false, "show pending releases")
+ f.BoolVarP(&client.AllNamespaces, "all-namespaces", "A", false, "list releases across all namespaces")
+ f.IntVarP(&client.Limit, "max", "m", 256, "maximum number of releases to fetch")
+ f.IntVar(&client.Offset, "offset", 0, "next release index in the list, used to offset from start value")
+ f.StringVarP(&client.Filter, "filter", "f", "", "a regular expression (Perl compatible). Any releases that match the expression will be included in the results")
+ f.StringVarP(&client.Selector, "selector", "l", "", "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Works only for secret(default) and configmap storage backends.")
+ bindOutputFlag(cmd, &outfmt)
+
+ return cmd
+}
+
+type releaseElement struct {
+ Name string `json:"name"`
+ Namespace string `json:"namespace"`
+ Revision string `json:"revision"`
+ Updated string `json:"updated"`
+ Status string `json:"status"`
+ Chart string `json:"chart"`
+ AppVersion string `json:"app_version"`
+}
+
+type releaseListWriter struct {
+ releases []releaseElement
+ noHeaders bool
+ noColor bool
+}
+
+func newReleaseListWriter(releases []*release.Release, timeFormat string, noHeaders bool, noColor bool) *releaseListWriter {
+ // Initialize the array so no results returns an empty array instead of null
+ elements := make([]releaseElement, 0, len(releases))
+ for _, r := range releases {
+ element := releaseElement{
+ Name: r.Name,
+ Namespace: r.Namespace,
+ Revision: strconv.Itoa(r.Version),
+ Status: r.Info.Status.String(),
+ Chart: formatChartName(r.Chart),
+ AppVersion: formatAppVersion(r.Chart),
+ }
+
+ t := "-"
+ if tspb := r.Info.LastDeployed; !tspb.IsZero() {
+ if timeFormat != "" {
+ t = tspb.Format(timeFormat)
+ } else {
+ t = tspb.String()
+ }
+ }
+ element.Updated = t
+
+ elements = append(elements, element)
+ }
+ return &releaseListWriter{elements, noHeaders, noColor}
+}
+
+func (w *releaseListWriter) WriteTable(out io.Writer) error {
+ table := uitable.New()
+ if !w.noHeaders {
+ table.AddRow(
+ coloroutput.ColorizeHeader("NAME", w.noColor),
+ coloroutput.ColorizeHeader("NAMESPACE", w.noColor),
+ coloroutput.ColorizeHeader("REVISION", w.noColor),
+ coloroutput.ColorizeHeader("UPDATED", w.noColor),
+ coloroutput.ColorizeHeader("STATUS", w.noColor),
+ coloroutput.ColorizeHeader("CHART", w.noColor),
+ coloroutput.ColorizeHeader("APP VERSION", w.noColor),
+ )
+ }
+ for _, r := range w.releases {
+ // Parse the status string back to a release.Status to use color
+ var status common.Status
+ switch r.Status {
+ case "deployed":
+ status = common.StatusDeployed
+ case "failed":
+ status = common.StatusFailed
+ case "pending-install":
+ status = common.StatusPendingInstall
+ case "pending-upgrade":
+ status = common.StatusPendingUpgrade
+ case "pending-rollback":
+ status = common.StatusPendingRollback
+ case "uninstalling":
+ status = common.StatusUninstalling
+ case "uninstalled":
+ status = common.StatusUninstalled
+ case "superseded":
+ status = common.StatusSuperseded
+ case "unknown":
+ status = common.StatusUnknown
+ default:
+ status = common.Status(r.Status)
+ }
+ table.AddRow(r.Name, coloroutput.ColorizeNamespace(r.Namespace, w.noColor), r.Revision, r.Updated, coloroutput.ColorizeStatus(status, w.noColor), r.Chart, r.AppVersion)
+ }
+ return output.EncodeTable(out, table)
+}
+
+func (w *releaseListWriter) WriteJSON(out io.Writer) error {
+ return output.EncodeJSON(out, w.releases)
+}
+
+func (w *releaseListWriter) WriteYAML(out io.Writer) error {
+ return output.EncodeYAML(out, w.releases)
+}
+
+// Returns all releases from 'releases', except those with names matching 'ignoredReleases'
+func filterReleases(releases []*release.Release, ignoredReleaseNames []string) []*release.Release {
+ // if ignoredReleaseNames is nil, just return releases
+ if ignoredReleaseNames == nil {
+ return releases
+ }
+
+ var filteredReleases []*release.Release
+ for _, rel := range releases {
+ found := slices.Contains(ignoredReleaseNames, rel.Name)
+ if !found {
+ filteredReleases = append(filteredReleases, rel)
+ }
+ }
+
+ return filteredReleases
+}
+
+// Provide dynamic auto-completion for release names
+func compListReleases(toComplete string, ignoredReleaseNames []string, cfg *action.Configuration) ([]string, cobra.ShellCompDirective) {
+ cobra.CompDebugln(fmt.Sprintf("compListReleases with toComplete %s", toComplete), settings.Debug)
+
+ client := action.NewList(cfg)
+ client.All = true
+ client.Limit = 0
+ // Do not filter so as to get the entire list of releases.
+ // This will allow zsh and fish to match completion choices
+ // on other criteria then prefix. For example:
+ // helm status ingress
+ // can match
+ // helm status nginx-ingress
+ //
+ // client.Filter = fmt.Sprintf("^%s", toComplete)
+
+ client.SetStateMask()
+ releasesi, err := client.Run()
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ releases, err := releaseListToV1List(releasesi)
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+
+ var choices []string
+ filteredReleases := filterReleases(releases, ignoredReleaseNames)
+ for _, rel := range filteredReleases {
+ choices = append(choices,
+ fmt.Sprintf("%s\t%s-%s -> %s", rel.Name, rel.Chart.Metadata.Name, rel.Chart.Metadata.Version, rel.Info.Status.String()))
+ }
+
+ return choices, cobra.ShellCompDirectiveNoFileComp
+}
diff --git a/helm/pkg/cmd/list_test.go b/helm/pkg/cmd/list_test.go
new file mode 100644
index 000000000..35153465a
--- /dev/null
+++ b/helm/pkg/cmd/list_test.go
@@ -0,0 +1,617 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+ "time"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestListCmd(t *testing.T) {
+ defaultNamespace := "default"
+
+ sampleTimeSeconds := int64(1452902400)
+ timestamp1 := time.Unix(sampleTimeSeconds+1, 0).UTC()
+ timestamp2 := time.Unix(sampleTimeSeconds+2, 0).UTC()
+ timestamp3 := time.Unix(sampleTimeSeconds+3, 0).UTC()
+ timestamp4 := time.Unix(sampleTimeSeconds+4, 0).UTC()
+ chartInfo := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "chickadee",
+ Version: "1.0.0",
+ AppVersion: "0.0.1",
+ },
+ }
+
+ releaseFixture := []*release.Release{
+ {
+ Name: "starlord",
+ Version: 1,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp1,
+ Status: common.StatusSuperseded,
+ },
+ Chart: chartInfo,
+ },
+ {
+ Name: "starlord",
+ Version: 2,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp1,
+ Status: common.StatusDeployed,
+ },
+ Chart: chartInfo,
+ },
+ {
+ Name: "groot",
+ Version: 1,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp1,
+ Status: common.StatusUninstalled,
+ },
+ Chart: chartInfo,
+ },
+ {
+ Name: "gamora",
+ Version: 1,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp1,
+ Status: common.StatusSuperseded,
+ },
+ Chart: chartInfo,
+ },
+ {
+ Name: "rocket",
+ Version: 1,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp2,
+ Status: common.StatusFailed,
+ },
+ Chart: chartInfo,
+ },
+ {
+ Name: "drax",
+ Version: 1,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp1,
+ Status: common.StatusUninstalling,
+ },
+ Chart: chartInfo,
+ },
+ {
+ Name: "thanos",
+ Version: 1,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp1,
+ Status: common.StatusPendingInstall,
+ },
+ Chart: chartInfo,
+ },
+ {
+ Name: "hummingbird",
+ Version: 1,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp3,
+ Status: common.StatusDeployed,
+ },
+ Chart: chartInfo,
+ },
+ {
+ Name: "iguana",
+ Version: 2,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp4,
+ Status: common.StatusDeployed,
+ },
+ Chart: chartInfo,
+ },
+ {
+ Name: "starlord",
+ Version: 2,
+ Namespace: "milano",
+ Info: &release.Info{
+ LastDeployed: timestamp1,
+ Status: common.StatusDeployed,
+ },
+ Chart: chartInfo,
+ },
+ }
+
+ tests := []cmdTestCase{{
+ name: "list releases",
+ cmd: "list",
+ golden: "output/list-all.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list without headers",
+ cmd: "list --no-headers",
+ golden: "output/list-all-no-headers.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list releases sorted by release date",
+ cmd: "list --date",
+ golden: "output/list-all-date.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list failed releases",
+ cmd: "list --failed",
+ golden: "output/list-failed.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list filtered releases",
+ cmd: "list --filter='.*'",
+ golden: "output/list-all.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list releases, limited to one release",
+ cmd: "list --max 1",
+ golden: "output/list-all-max.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list releases, offset by one",
+ cmd: "list --offset 1",
+ golden: "output/list-all-offset.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list pending releases",
+ cmd: "list --pending",
+ golden: "output/list-pending.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list releases in reverse order",
+ cmd: "list --reverse",
+ golden: "output/list-all-reverse.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list releases sorted by reversed release date",
+ cmd: "list --date --reverse",
+ golden: "output/list-all-date-reversed.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list releases in short output format",
+ cmd: "list --short",
+ golden: "output/list-all-short.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list releases in short output format",
+ cmd: "list --short --output yaml",
+ golden: "output/list-all-short-yaml.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list releases in short output format",
+ cmd: "list --short --output json",
+ golden: "output/list-all-short-json.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list deployed and failed releases only",
+ cmd: "list --deployed --failed",
+ golden: "output/list.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list superseded releases",
+ cmd: "list --superseded",
+ golden: "output/list-superseded.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list uninstalled releases",
+ cmd: "list --uninstalled",
+ golden: "output/list-uninstalled.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list releases currently uninstalling",
+ cmd: "list --uninstalling",
+ golden: "output/list-uninstalling.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list releases in another namespace",
+ cmd: "list -n milano",
+ golden: "output/list-namespace.txt",
+ rels: releaseFixture,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestListOutputCompletion(t *testing.T) {
+ outputFlagCompletionTest(t, "list")
+}
+
+func TestListFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "list", false)
+}
+
+func TestListOutputFormats(t *testing.T) {
+ defaultNamespace := "default"
+ timestamp := time.Unix(1452902400, 0).UTC()
+ chartInfo := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "0.0.1",
+ },
+ }
+
+ releaseFixture := []*release.Release{
+ {
+ Name: "test-release",
+ Version: 1,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp,
+ Status: common.StatusDeployed,
+ },
+ Chart: chartInfo,
+ },
+ }
+
+ tests := []cmdTestCase{{
+ name: "list releases in json format",
+ cmd: "list --output json",
+ golden: "output/list-json.txt",
+ rels: releaseFixture,
+ }, {
+ name: "list releases in yaml format",
+ cmd: "list --output yaml",
+ golden: "output/list-yaml.txt",
+ rels: releaseFixture,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestReleaseListWriter(t *testing.T) {
+ timestamp := time.Unix(1452902400, 0).UTC()
+ chartInfo := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "0.0.1",
+ },
+ }
+
+ releases := []*release.Release{
+ {
+ Name: "test-release",
+ Version: 1,
+ Namespace: "default",
+ Info: &release.Info{
+ LastDeployed: timestamp,
+ Status: common.StatusDeployed,
+ },
+ Chart: chartInfo,
+ },
+ }
+
+ tests := []struct {
+ name string
+ releases []*release.Release
+ timeFormat string
+ noHeaders bool
+ noColor bool
+ }{
+ {
+ name: "empty releases list",
+ releases: []*release.Release{},
+ timeFormat: "",
+ noHeaders: false,
+ noColor: false,
+ },
+ {
+ name: "custom time format",
+ releases: releases,
+ timeFormat: "2006-01-02",
+ noHeaders: false,
+ noColor: false,
+ },
+ {
+ name: "no headers",
+ releases: releases,
+ timeFormat: "",
+ noHeaders: true,
+ noColor: false,
+ },
+ {
+ name: "no color",
+ releases: releases,
+ timeFormat: "",
+ noHeaders: false,
+ noColor: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ writer := newReleaseListWriter(tt.releases, tt.timeFormat, tt.noHeaders, tt.noColor)
+
+ if writer == nil {
+ t.Error("Expected writer to be non-nil")
+ } else {
+ if len(writer.releases) != len(tt.releases) {
+ t.Errorf("Expected %d releases, got %d", len(tt.releases), len(writer.releases))
+ }
+ }
+ })
+ }
+}
+
+func TestReleaseListWriterMethods(t *testing.T) {
+ timestamp := time.Unix(1452902400, 0).UTC()
+ zeroTimestamp := time.Time{}
+ chartInfo := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "0.0.1",
+ },
+ }
+
+ releases := []*release.Release{
+ {
+ Name: "test-release",
+ Version: 1,
+ Namespace: "default",
+ Info: &release.Info{
+ LastDeployed: timestamp,
+ Status: common.StatusDeployed,
+ },
+ Chart: chartInfo,
+ },
+ {
+ Name: "zero-time-release",
+ Version: 1,
+ Namespace: "default",
+ Info: &release.Info{
+ LastDeployed: zeroTimestamp,
+ Status: common.StatusFailed,
+ },
+ Chart: chartInfo,
+ },
+ }
+
+ tests := []struct {
+ name string
+ status common.Status
+ }{
+ {"deployed", common.StatusDeployed},
+ {"failed", common.StatusFailed},
+ {"pending-install", common.StatusPendingInstall},
+ {"pending-upgrade", common.StatusPendingUpgrade},
+ {"pending-rollback", common.StatusPendingRollback},
+ {"uninstalling", common.StatusUninstalling},
+ {"uninstalled", common.StatusUninstalled},
+ {"superseded", common.StatusSuperseded},
+ {"unknown", common.StatusUnknown},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ testReleases := []*release.Release{
+ {
+ Name: "test-release",
+ Version: 1,
+ Namespace: "default",
+ Info: &release.Info{
+ LastDeployed: timestamp,
+ Status: tt.status,
+ },
+ Chart: chartInfo,
+ },
+ }
+
+ writer := newReleaseListWriter(testReleases, "", false, false)
+
+ var buf []byte
+ out := &bytesWriter{buf: &buf}
+
+ err := writer.WriteJSON(out)
+ if err != nil {
+ t.Errorf("WriteJSON failed: %v", err)
+ }
+
+ err = writer.WriteYAML(out)
+ if err != nil {
+ t.Errorf("WriteYAML failed: %v", err)
+ }
+
+ err = writer.WriteTable(out)
+ if err != nil {
+ t.Errorf("WriteTable failed: %v", err)
+ }
+ })
+ }
+
+ writer := newReleaseListWriter(releases, "", false, false)
+
+ var buf []byte
+ out := &bytesWriter{buf: &buf}
+
+ err := writer.WriteJSON(out)
+ if err != nil {
+ t.Errorf("WriteJSON failed: %v", err)
+ }
+
+ err = writer.WriteYAML(out)
+ if err != nil {
+ t.Errorf("WriteYAML failed: %v", err)
+ }
+
+ err = writer.WriteTable(out)
+ if err != nil {
+ t.Errorf("WriteTable failed: %v", err)
+ }
+}
+
+func TestFilterReleases(t *testing.T) {
+ releases := []*release.Release{
+ {Name: "release1"},
+ {Name: "release2"},
+ {Name: "release3"},
+ }
+
+ tests := []struct {
+ name string
+ releases []*release.Release
+ ignoredReleaseNames []string
+ expectedCount int
+ }{
+ {
+ name: "nil ignored list",
+ releases: releases,
+ ignoredReleaseNames: nil,
+ expectedCount: 3,
+ },
+ {
+ name: "empty ignored list",
+ releases: releases,
+ ignoredReleaseNames: []string{},
+ expectedCount: 3,
+ },
+ {
+ name: "filter one release",
+ releases: releases,
+ ignoredReleaseNames: []string{"release1"},
+ expectedCount: 2,
+ },
+ {
+ name: "filter multiple releases",
+ releases: releases,
+ ignoredReleaseNames: []string{"release1", "release3"},
+ expectedCount: 1,
+ },
+ {
+ name: "filter non-existent release",
+ releases: releases,
+ ignoredReleaseNames: []string{"non-existent"},
+ expectedCount: 3,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := filterReleases(tt.releases, tt.ignoredReleaseNames)
+ if len(result) != tt.expectedCount {
+ t.Errorf("Expected %d releases, got %d", tt.expectedCount, len(result))
+ }
+ })
+ }
+}
+
+type bytesWriter struct {
+ buf *[]byte
+}
+
+func (b *bytesWriter) Write(p []byte) (n int, err error) {
+ *b.buf = append(*b.buf, p...)
+ return len(p), nil
+}
+
+func TestListCustomTimeFormat(t *testing.T) {
+ defaultNamespace := "default"
+ timestamp := time.Unix(1452902400, 0).UTC()
+ chartInfo := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "0.0.1",
+ },
+ }
+
+ releaseFixture := []*release.Release{
+ {
+ Name: "test-release",
+ Version: 1,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp,
+ Status: common.StatusDeployed,
+ },
+ Chart: chartInfo,
+ },
+ }
+
+ tests := []cmdTestCase{{
+ name: "list releases with custom time format",
+ cmd: "list --time-format '2006-01-02 15:04:05'",
+ golden: "output/list-time-format.txt",
+ rels: releaseFixture,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestListStatusMapping(t *testing.T) {
+ defaultNamespace := "default"
+ timestamp := time.Unix(1452902400, 0).UTC()
+ chartInfo := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "test-chart",
+ Version: "1.0.0",
+ AppVersion: "0.0.1",
+ },
+ }
+
+ testCases := []struct {
+ name string
+ status common.Status
+ }{
+ {"deployed", common.StatusDeployed},
+ {"failed", common.StatusFailed},
+ {"pending-install", common.StatusPendingInstall},
+ {"pending-upgrade", common.StatusPendingUpgrade},
+ {"pending-rollback", common.StatusPendingRollback},
+ {"uninstalling", common.StatusUninstalling},
+ {"uninstalled", common.StatusUninstalled},
+ {"superseded", common.StatusSuperseded},
+ {"unknown", common.StatusUnknown},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ releaseFixture := []*release.Release{
+ {
+ Name: "test-release",
+ Version: 1,
+ Namespace: defaultNamespace,
+ Info: &release.Info{
+ LastDeployed: timestamp,
+ Status: tc.status,
+ },
+ Chart: chartInfo,
+ },
+ }
+
+ writer := newReleaseListWriter(releaseFixture, "", false, false)
+ if len(writer.releases) != 1 {
+ t.Errorf("Expected 1 release, got %d", len(writer.releases))
+ }
+
+ if writer.releases[0].Status != tc.status.String() {
+ t.Errorf("Expected status %s, got %s", tc.status.String(), writer.releases[0].Status)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/cmd/load_plugins.go b/helm/pkg/cmd/load_plugins.go
new file mode 100644
index 000000000..7bdbf597d
--- /dev/null
+++ b/helm/pkg/cmd/load_plugins.go
@@ -0,0 +1,402 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "slices"
+ "strconv"
+ "strings"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+
+ "github.com/spf13/cobra"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/internal/plugin"
+)
+
+// TODO: move pluginDynamicCompletionExecutable pkg/plugin/runtime_subprocess.go
+// any references to executables should be for [plugin.SubprocessPluginRuntime] only
+// this should also be for backwards compatibility in [plugin.Legacy] only
+//
+// TODO: for v1 make this configurable with a new CompletionCommand field for
+// [plugin.RuntimeConfigSubprocess]
+const (
+ pluginStaticCompletionFile = "completion.yaml"
+ pluginDynamicCompletionExecutable = "plugin.complete"
+)
+
+// loadCLIPlugins loads CLI plugins into the command list.
+//
+// This follows a different pattern than the other commands because it has
+// to inspect its environment and then add commands to the base command
+// as it finds them.
+func loadCLIPlugins(baseCmd *cobra.Command, out io.Writer) {
+ // If HELM_NO_PLUGINS is set to 1, do not load plugins.
+ if os.Getenv("HELM_NO_PLUGINS") == "1" {
+ return
+ }
+
+ dirs := filepath.SplitList(settings.PluginsDirectory)
+ descriptor := plugin.Descriptor{
+ Type: "cli/v1",
+ }
+ found, err := plugin.FindPlugins(dirs, descriptor)
+ if err != nil {
+ slog.Error("failed to load plugins", slog.String("error", err.Error()))
+ return
+ }
+
+ // Now we create commands for all of these.
+ for _, plug := range found {
+ var use, short, long string
+ var ignoreFlags bool
+ if cliConfig, ok := plug.Metadata().Config.(*schema.ConfigCLIV1); ok {
+ use = cliConfig.Usage
+ short = cliConfig.ShortHelp
+ long = cliConfig.LongHelp
+ ignoreFlags = cliConfig.IgnoreFlags
+ }
+
+ // Set defaults
+ if use == "" {
+ use = plug.Metadata().Name
+ }
+ if short == "" {
+ short = fmt.Sprintf("the %q plugin", plug.Metadata().Name)
+ }
+ // long has no default, empty is ok
+
+ c := &cobra.Command{
+ Use: use,
+ Short: short,
+ Long: long,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ u, err := processParent(cmd, args)
+ if err != nil {
+ return err
+ }
+
+ // For CLI plugin types runtime, set extra args and settings
+ extraArgs := []string{}
+ if !ignoreFlags {
+ extraArgs = u
+ }
+
+ // Prepare environment
+ env := os.Environ()
+ for k, v := range settings.EnvVars() {
+ env = append(env, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ // Invoke plugin
+ input := &plugin.Input{
+ Message: schema.InputMessageCLIV1{
+ ExtraArgs: extraArgs,
+ },
+ Env: env,
+ Stdin: os.Stdin,
+ Stdout: out,
+ Stderr: os.Stderr,
+ }
+ _, err = plug.Invoke(context.Background(), input)
+ if execErr, ok := err.(*plugin.InvokeExecError); ok {
+ return CommandError{
+ error: execErr.Err,
+ ExitCode: execErr.ExitCode,
+ }
+ }
+ return err
+ },
+ // This passes all the flags to the subcommand.
+ DisableFlagParsing: true,
+ }
+
+ for _, cmd := range baseCmd.Commands() {
+ if cmd.Name() == c.Name() {
+ slog.Error("failed to load plugins: name conflicts", slog.String("name", c.Name()))
+ return
+ }
+ }
+
+ baseCmd.AddCommand(c)
+
+ // For completion, we try to load more details about the plugins so as to allow for command and
+ // flag completion of the plugin itself.
+ // We only do this when necessary (for the "completion" and "__complete" commands) to avoid the
+ // risk of a rogue plugin affecting Helm's normal behavior.
+ subCmd, _, err := baseCmd.Find(os.Args[1:])
+ if (err == nil &&
+ ((subCmd.HasParent() && subCmd.Parent().Name() == "completion") || subCmd.Name() == cobra.ShellCompRequestCmd)) ||
+ /* for the tests */ subCmd == baseCmd.Root() {
+ loadCompletionForPlugin(c, plug)
+ }
+ }
+}
+
+func processParent(cmd *cobra.Command, args []string) ([]string, error) {
+ k, u := manuallyProcessArgs(args)
+ if err := cmd.Parent().ParseFlags(k); err != nil {
+ return nil, err
+ }
+ return u, nil
+}
+
+// manuallyProcessArgs processes an arg array, removing special args.
+//
+// Returns two sets of args: known and unknown (in that order)
+func manuallyProcessArgs(args []string) ([]string, []string) {
+ known := []string{}
+ unknown := []string{}
+ kvargs := []string{"--kube-context", "--namespace", "-n", "--kubeconfig", "--kube-apiserver", "--kube-token", "--kube-as-user", "--kube-as-group", "--kube-ca-file", "--registry-config", "--repository-cache", "--repository-config", "--kube-insecure-skip-tls-verify", "--kube-tls-server-name"}
+ knownArg := func(a string) bool {
+ for _, pre := range kvargs {
+ if strings.HasPrefix(a, pre+"=") {
+ return true
+ }
+ }
+ return false
+ }
+
+ isKnown := func(v string) string {
+ if slices.Contains(kvargs, v) {
+ return v
+ }
+ return ""
+ }
+
+ for i := 0; i < len(args); i++ {
+ switch a := args[i]; a {
+ case "--debug":
+ known = append(known, a)
+ case isKnown(a):
+ known = append(known, a)
+ i++
+ if i < len(args) {
+ known = append(known, args[i])
+ }
+ default:
+ if knownArg(a) {
+ known = append(known, a)
+ continue
+ }
+ unknown = append(unknown, a)
+ }
+ }
+ return known, unknown
+}
+
+// pluginCommand represents the optional completion.yaml file of a plugin
+type pluginCommand struct {
+ Name string `json:"name"`
+ ValidArgs []string `json:"validArgs"`
+ Flags []string `json:"flags"`
+ Commands []pluginCommand `json:"commands"`
+}
+
+// loadCompletionForPlugin will load and parse any completion.yaml provided by the plugin
+// and add the dynamic completion hook to call the optional plugin.complete
+func loadCompletionForPlugin(pluginCmd *cobra.Command, plug plugin.Plugin) {
+ // Parse the yaml file providing the plugin's sub-commands and flags
+ cmds, err := loadFile(strings.Join(
+ []string{plug.Dir(), pluginStaticCompletionFile}, string(filepath.Separator)))
+
+ if err != nil {
+ // The file could be missing or invalid. No static completion for this plugin.
+ slog.Debug("plugin completion file loading", slog.String("error", err.Error()))
+ // Continue to setup dynamic completion.
+ cmds = &pluginCommand{}
+ }
+
+ // Preserve the Usage string specified for the plugin
+ cmds.Name = pluginCmd.Use
+
+ addPluginCommands(plug, pluginCmd, cmds)
+}
+
+// addPluginCommands is a recursive method that adds each different level
+// of sub-commands and flags for the plugins that have provided such information
+func addPluginCommands(plug plugin.Plugin, baseCmd *cobra.Command, cmds *pluginCommand) {
+ if cmds == nil {
+ return
+ }
+
+ if len(cmds.Name) == 0 {
+ slog.Debug("sub-command name field missing", slog.String("commandPath", baseCmd.CommandPath()))
+ return
+ }
+
+ baseCmd.Use = cmds.Name
+ baseCmd.ValidArgs = cmds.ValidArgs
+ // Setup the same dynamic completion for each plugin sub-command.
+ // This is because if dynamic completion is triggered, there is a single executable
+ // to call (plugin.complete), so every sub-commands calls it in the same fashion.
+ if cmds.Commands == nil {
+ // Only setup dynamic completion if there are no sub-commands. This avoids
+ // calling plugin.complete at every completion, which greatly simplifies
+ // development of plugin.complete for plugin developers.
+ baseCmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return pluginDynamicComp(plug, cmd, args, toComplete)
+ }
+ }
+
+ // Create fake flags.
+ if len(cmds.Flags) > 0 {
+ // The flags can be created with any type, since we only need them for completion.
+ // pflag does not allow to create short flags without a corresponding long form
+ // so we look for all short flags and match them to any long flag. This will allow
+ // plugins to provide short flags without a long form.
+ // If there are more short-flags than long ones, we'll create an extra long flag with
+ // the same single letter as the short form.
+ shorts := []string{}
+ longs := []string{}
+ for _, flag := range cmds.Flags {
+ if len(flag) == 1 {
+ shorts = append(shorts, flag)
+ } else {
+ longs = append(longs, flag)
+ }
+ }
+
+ f := baseCmd.Flags()
+ if len(longs) >= len(shorts) {
+ for i := range longs {
+ if i < len(shorts) {
+ f.BoolP(longs[i], shorts[i], false, "")
+ } else {
+ f.Bool(longs[i], false, "")
+ }
+ }
+ } else {
+ for i := range shorts {
+ if i < len(longs) {
+ f.BoolP(longs[i], shorts[i], false, "")
+ } else {
+ // Create a long flag with the same name as the short flag.
+ // Not a perfect solution, but it's better than ignoring the extra short flags.
+ f.BoolP(shorts[i], shorts[i], false, "")
+ }
+ }
+ }
+ }
+
+ // Recursively add any sub-commands
+ for _, cmd := range cmds.Commands {
+ // Create a fake command so that completion can be done for the sub-commands of the plugin
+ subCmd := &cobra.Command{
+ // This prevents Cobra from removing the flags. We want to keep the flags to pass them
+ // to the dynamic completion script of the plugin.
+ DisableFlagParsing: true,
+ // A Run is required for it to be a valid command without subcommands
+ Run: func(_ *cobra.Command, _ []string) {},
+ }
+ baseCmd.AddCommand(subCmd)
+ addPluginCommands(plug, subCmd, &cmd)
+ }
+}
+
+// loadFile takes a yaml file at the given path, parses it and returns a pluginCommand object
+func loadFile(path string) (*pluginCommand, error) {
+ cmds := new(pluginCommand)
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return cmds, fmt.Errorf("file (%s) not provided by plugin. No plugin auto-completion possible", path)
+ }
+
+ err = yaml.Unmarshal(b, cmds)
+ return cmds, err
+}
+
+// pluginDynamicComp call the plugin.complete script of the plugin (if available)
+// to obtain the dynamic completion choices. It must pass all the flags and sub-commands
+// specified in the command-line to the plugin.complete executable (except helm's global flags)
+func pluginDynamicComp(plug plugin.Plugin, cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+
+ subprocessPlug, ok := plug.(*plugin.SubprocessPluginRuntime)
+ if !ok {
+ // Completion only supported for subprocess plugins (TODO: fix this)
+ cobra.CompDebugln(fmt.Sprintf("Unsupported plugin runtime: %q", plug.Metadata().Runtime), settings.Debug)
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+
+ var ignoreFlags bool
+ if cliConfig, ok := subprocessPlug.Metadata().Config.(*schema.ConfigCLIV1); ok {
+ ignoreFlags = cliConfig.IgnoreFlags
+ }
+
+ u, err := processParent(cmd, args)
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveError
+ }
+
+ // We will call the dynamic completion script of the plugin
+ main := strings.Join([]string{plug.Dir(), pluginDynamicCompletionExecutable}, string(filepath.Separator))
+
+ // We must include all sub-commands passed on the command-line.
+ // To do that, we pass-in the entire CommandPath, except the first two elements
+ // which are 'helm' and 'pluginName'.
+ argv := strings.Split(cmd.CommandPath(), " ")[2:]
+ if !ignoreFlags {
+ argv = append(argv, u...)
+ argv = append(argv, toComplete)
+ }
+
+ cobra.CompDebugln(fmt.Sprintf("calling %s with args %v", main, argv), settings.Debug)
+ buf := new(bytes.Buffer)
+
+ // Prepare environment
+ env := os.Environ()
+ for k, v := range settings.EnvVars() {
+ env = append(env, fmt.Sprintf("%s=%s", k, v))
+ }
+
+ // For subprocess runtime, use InvokeWithEnv for dynamic completion
+ if err := subprocessPlug.InvokeWithEnv(main, argv, env, nil, buf, buf); err != nil {
+ // The dynamic completion file is optional for a plugin, so this error is ok.
+ cobra.CompDebugln(fmt.Sprintf("Unable to call %s: %v", main, err.Error()), settings.Debug)
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+
+ var completions []string
+ for comp := range strings.SplitSeq(buf.String(), "\n") {
+ // Remove any empty lines
+ if len(comp) > 0 {
+ completions = append(completions, comp)
+ }
+ }
+
+ // Check if the last line of output is of the form :, which
+ // indicates the BashCompletionDirective.
+ directive := cobra.ShellCompDirectiveDefault
+ if len(completions) > 0 {
+ lastLine := completions[len(completions)-1]
+ if len(lastLine) > 1 && lastLine[0] == ':' {
+ if strInt, err := strconv.Atoi(lastLine[1:]); err == nil {
+ directive = cobra.ShellCompDirective(strInt)
+ completions = completions[:len(completions)-1]
+ }
+ }
+ }
+
+ return completions, directive
+}
diff --git a/helm/pkg/cmd/package.go b/helm/pkg/cmd/package.go
new file mode 100644
index 000000000..96c0c47b2
--- /dev/null
+++ b/helm/pkg/cmd/package.go
@@ -0,0 +1,138 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cli/values"
+ "helm.sh/helm/v4/pkg/downloader"
+ "helm.sh/helm/v4/pkg/getter"
+)
+
+const packageDesc = `
+This command packages a chart into a versioned chart archive file. If a path
+is given, this will look at that path for a chart (which must contain a
+Chart.yaml file) and then package that directory.
+
+Versioned chart archives are used by Helm package repositories.
+
+To sign a chart, use the '--sign' flag. In most cases, you should also
+provide '--keyring path/to/secret/keys' and '--key keyname'.
+
+ $ helm package --sign ./mychart --key mykey --keyring ~/.gnupg/secring.gpg
+
+If '--keyring' is not specified, Helm usually defaults to the public keyring
+unless your environment is otherwise configured.
+`
+
+func newPackageCmd(out io.Writer) *cobra.Command {
+ client := action.NewPackage()
+ valueOpts := &values.Options{}
+
+ cmd := &cobra.Command{
+ Use: "package [CHART_PATH] [...]",
+ Short: "package a chart directory into a chart archive",
+ Long: packageDesc,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if len(args) == 0 {
+ return fmt.Errorf("need at least one argument, the path to the chart")
+ }
+ if client.Sign {
+ if client.Key == "" {
+ return errors.New("--key is required for signing a package")
+ }
+ if client.Keyring == "" {
+ return errors.New("--keyring is required for signing a package")
+ }
+ }
+ client.RepositoryConfig = settings.RepositoryConfig
+ client.RepositoryCache = settings.RepositoryCache
+ p := getter.All(settings)
+ vals, err := valueOpts.MergeValues(p)
+ if err != nil {
+ return err
+ }
+
+ registryClient, err := newRegistryClient(client.CertFile, client.KeyFile, client.CaFile,
+ client.InsecureSkipTLSVerify, client.PlainHTTP, client.Username, client.Password)
+ if err != nil {
+ return fmt.Errorf("missing registry client: %w", err)
+ }
+
+ for i := range args {
+ path, err := filepath.Abs(args[i])
+ if err != nil {
+ return err
+ }
+ if _, err := os.Stat(args[i]); err != nil {
+ return err
+ }
+
+ if client.DependencyUpdate {
+ downloadManager := &downloader.Manager{
+ Out: io.Discard,
+ ChartPath: path,
+ Keyring: client.Keyring,
+ Getters: p,
+ Debug: settings.Debug,
+ RegistryClient: registryClient,
+ RepositoryConfig: settings.RepositoryConfig,
+ RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
+ }
+
+ if err := downloadManager.Update(); err != nil {
+ return err
+ }
+ }
+ p, err := client.Run(path, vals)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(out, "Successfully packaged chart and saved it to: %s\n", p)
+ }
+ return nil
+ },
+ }
+
+ f := cmd.Flags()
+ f.BoolVar(&client.Sign, "sign", false, "use a PGP private key to sign this package")
+ f.StringVar(&client.Key, "key", "", "name of the key to use when signing. Used if --sign is true")
+ f.StringVar(&client.Keyring, "keyring", defaultKeyring(), "location of a public keyring")
+ f.StringVar(&client.PassphraseFile, "passphrase-file", "", `location of a file which contains the passphrase for the signing key. Use "-" in order to read from stdin.`)
+ f.StringVar(&client.Version, "version", "", "set the version on the chart to this semver version")
+ f.StringVar(&client.AppVersion, "app-version", "", "set the appVersion on the chart to this version")
+ f.StringVarP(&client.Destination, "destination", "d", ".", "location to write the chart.")
+ f.BoolVarP(&client.DependencyUpdate, "dependency-update", "u", false, `update dependencies from "Chart.yaml" to dir "charts/" before packaging`)
+ f.StringVar(&client.Username, "username", "", "chart repository username where to locate the requested chart")
+ f.StringVar(&client.Password, "password", "", "chart repository password where to locate the requested chart")
+ f.StringVar(&client.CertFile, "cert-file", "", "identify HTTPS client using this SSL certificate file")
+ f.StringVar(&client.KeyFile, "key-file", "", "identify HTTPS client using this SSL key file")
+ f.BoolVar(&client.InsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the chart download")
+ f.BoolVar(&client.PlainHTTP, "plain-http", false, "use insecure HTTP connections for the chart download")
+ f.StringVar(&client.CaFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/package_test.go b/helm/pkg/cmd/package_test.go
new file mode 100644
index 000000000..db4a2523a
--- /dev/null
+++ b/helm/pkg/cmd/package_test.go
@@ -0,0 +1,198 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+)
+
+func TestPackage(t *testing.T) {
+ tests := []struct {
+ name string
+ flags map[string]string
+ args []string
+ expect string
+ hasfile string
+ err bool
+ }{
+ {
+ name: "package without chart path",
+ args: []string{},
+ flags: map[string]string{},
+ expect: "need at least one argument, the path to the chart",
+ err: true,
+ },
+ {
+ name: "package --sign, no --key",
+ args: []string{"testdata/testcharts/alpine"},
+ flags: map[string]string{"sign": "1"},
+ expect: "key is required for signing a package",
+ err: true,
+ },
+ {
+ name: "package --sign, no --keyring",
+ args: []string{"testdata/testcharts/alpine"},
+ flags: map[string]string{"sign": "1", "key": "nosuchkey", "keyring": ""},
+ expect: "keyring is required for signing a package",
+ err: true,
+ },
+ {
+ name: "package testdata/testcharts/alpine, no save",
+ args: []string{"testdata/testcharts/alpine"},
+ flags: map[string]string{"save": "0"},
+ expect: "",
+ hasfile: "alpine-0.1.0.tgz",
+ },
+ {
+ name: "package testdata/testcharts/alpine",
+ args: []string{"testdata/testcharts/alpine"},
+ expect: "",
+ hasfile: "alpine-0.1.0.tgz",
+ },
+ {
+ name: "package testdata/testcharts/issue1979",
+ args: []string{"testdata/testcharts/issue1979"},
+ expect: "",
+ hasfile: "alpine-0.1.0.tgz",
+ },
+ {
+ name: "package --destination toot",
+ args: []string{"testdata/testcharts/alpine"},
+ flags: map[string]string{"destination": "toot"},
+ expect: "",
+ hasfile: "toot/alpine-0.1.0.tgz",
+ },
+ {
+ name: "package --sign --key=KEY --keyring=KEYRING testdata/testcharts/alpine",
+ args: []string{"testdata/testcharts/alpine"},
+ flags: map[string]string{"sign": "1", "keyring": "testdata/helm-test-key.secret", "key": "helm-test"},
+ expect: "",
+ hasfile: "alpine-0.1.0.tgz",
+ },
+ {
+ name: "package testdata/testcharts/chart-missing-deps",
+ args: []string{"testdata/testcharts/chart-missing-deps"},
+ hasfile: "chart-missing-deps-0.1.0.tgz",
+ err: true,
+ },
+ {
+ name: "package testdata/testcharts/chart-bad-type",
+ args: []string{"testdata/testcharts/chart-bad-type"},
+ err: true,
+ },
+ }
+
+ origDir, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Chdir(t.TempDir())
+ ensure.HelmHome(t)
+
+ if err := os.MkdirAll("toot", 0o777); err != nil {
+ t.Fatal(err)
+ }
+
+ // This is an unfortunate byproduct of the tmpdir
+ if v, ok := tt.flags["keyring"]; ok && len(v) > 0 {
+ tt.flags["keyring"] = filepath.Join(origDir, v)
+ }
+
+ re := regexp.MustCompile(tt.expect)
+
+ adjustedArgs := make([]string, len(tt.args))
+ for i, f := range tt.args {
+ adjustedArgs[i] = filepath.Join(origDir, f)
+ }
+
+ cmd := []string{"package"}
+ if len(adjustedArgs) > 0 {
+ cmd = append(cmd, adjustedArgs...)
+ }
+ for k, v := range tt.flags {
+ if v != "0" {
+ cmd = append(cmd, fmt.Sprintf("--%s=%s", k, v))
+ }
+ }
+ _, _, err = executeActionCommand(strings.Join(cmd, " "))
+ if err != nil {
+ if tt.err && re.MatchString(err.Error()) {
+ return
+ }
+ t.Fatalf("%q: expected error %q, got %q", tt.name, tt.expect, err)
+ }
+
+ if len(tt.hasfile) > 0 {
+ if fi, err := os.Stat(tt.hasfile); err != nil {
+ t.Errorf("%q: expected file %q, got err %q", tt.name, tt.hasfile, err)
+ } else if fi.Size() == 0 {
+ t.Errorf("%q: file %q has zero bytes.", tt.name, tt.hasfile)
+ }
+ }
+
+ if v, ok := tt.flags["sign"]; ok && v == "1" {
+ if fi, err := os.Stat(tt.hasfile + ".prov"); err != nil {
+ t.Errorf("%q: expected provenance file", tt.name)
+ } else if fi.Size() == 0 {
+ t.Errorf("%q: provenance file is empty", tt.name)
+ }
+ }
+ })
+ }
+}
+
+func TestSetAppVersion(t *testing.T) {
+ var ch *chart.Chart
+ expectedAppVersion := "app-version-foo"
+ chartToPackage := "testdata/testcharts/alpine"
+ dir := t.TempDir()
+ cmd := fmt.Sprintf("package %s --destination=%s --app-version=%s", chartToPackage, dir, expectedAppVersion)
+ _, output, err := executeActionCommand(cmd)
+ if err != nil {
+ t.Logf("Output: %s", output)
+ t.Fatal(err)
+ }
+ chartPath := filepath.Join(dir, "alpine-0.1.0.tgz")
+ if fi, err := os.Stat(chartPath); err != nil {
+ t.Errorf("expected file %q, got err %q", chartPath, err)
+ } else if fi.Size() == 0 {
+ t.Errorf("file %q has zero bytes.", chartPath)
+ }
+ ch, err = loader.Load(chartPath)
+ if err != nil {
+ t.Fatalf("unexpected error loading packaged chart: %v", err)
+ }
+ if ch.Metadata.AppVersion != expectedAppVersion {
+ t.Errorf("expected app-version %q, found %q", expectedAppVersion, ch.Metadata.AppVersion)
+ }
+}
+
+func TestPackageFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "package", true)
+ checkFileCompletion(t, "package mypath", true) // Multiple paths can be given
+}
diff --git a/helm/pkg/cmd/plugin.go b/helm/pkg/cmd/plugin.go
new file mode 100644
index 000000000..ba904ef5f
--- /dev/null
+++ b/helm/pkg/cmd/plugin.go
@@ -0,0 +1,55 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "io"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/internal/plugin"
+)
+
+const pluginHelp = `
+Manage client-side Helm plugins.
+`
+
+func newPluginCmd(out io.Writer) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "plugin",
+ Short: "install, list, or uninstall Helm plugins",
+ Long: pluginHelp,
+ }
+ cmd.AddCommand(
+ newPluginInstallCmd(out),
+ newPluginListCmd(out),
+ newPluginUninstallCmd(out),
+ newPluginUpdateCmd(out),
+ newPluginPackageCmd(out),
+ newPluginVerifyCmd(out),
+ )
+ return cmd
+}
+
+// runHook will execute a plugin hook.
+func runHook(p plugin.Plugin, event string) error {
+ pluginHook, ok := p.(plugin.PluginHook)
+ if ok {
+ return pluginHook.InvokeHook(event)
+ }
+
+ return nil
+}
diff --git a/helm/pkg/cmd/plugin_install.go b/helm/pkg/cmd/plugin_install.go
new file mode 100644
index 000000000..efa9b466c
--- /dev/null
+++ b/helm/pkg/cmd/plugin_install.go
@@ -0,0 +1,183 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "log/slog"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/installer"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+type pluginInstallOptions struct {
+ source string
+ version string
+ // signing options
+ verify bool
+ keyring string
+ // OCI-specific options
+ certFile string
+ keyFile string
+ caFile string
+ insecureSkipTLSVerify bool
+ plainHTTP bool
+ password string
+ username string
+}
+
+const pluginInstallDesc = `
+This command allows you to install a plugin from a url to a VCS repo or a local path.
+
+By default, plugin signatures are verified before installation when installing from
+tarballs (.tgz or .tar.gz). This requires a corresponding .prov file to be available
+alongside the tarball.
+For local development, plugins installed from local directories are automatically
+treated as "local dev" and do not require signatures.
+Use --verify=false to skip signature verification for remote plugins.
+`
+
+func newPluginInstallCmd(out io.Writer) *cobra.Command {
+ o := &pluginInstallOptions{}
+ cmd := &cobra.Command{
+ Use: "install [options] ",
+ Short: "install a Helm plugin",
+ Long: pluginInstallDesc,
+ Aliases: []string{"add"},
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 0 {
+ // We do file completion, in case the plugin is local
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ // No more completion once the plugin path has been specified
+ return noMoreArgsComp()
+ },
+ PreRunE: func(_ *cobra.Command, args []string) error {
+ return o.complete(args)
+ },
+ RunE: func(_ *cobra.Command, _ []string) error {
+ return o.run(out)
+ },
+ }
+ cmd.Flags().StringVar(&o.version, "version", "", "specify a version constraint. If this is not specified, the latest version is installed")
+ cmd.Flags().BoolVar(&o.verify, "verify", true, "verify the plugin signature before installing")
+ cmd.Flags().StringVar(&o.keyring, "keyring", defaultKeyring(), "location of public keys used for verification")
+
+ // Add OCI-specific flags
+ cmd.Flags().StringVar(&o.certFile, "cert-file", "", "identify registry client using this SSL certificate file")
+ cmd.Flags().StringVar(&o.keyFile, "key-file", "", "identify registry client using this SSL key file")
+ cmd.Flags().StringVar(&o.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
+ cmd.Flags().BoolVar(&o.insecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the plugin download")
+ cmd.Flags().BoolVar(&o.plainHTTP, "plain-http", false, "use insecure HTTP connections for the plugin download")
+ cmd.Flags().StringVar(&o.username, "username", "", "registry username")
+ cmd.Flags().StringVar(&o.password, "password", "", "registry password")
+ return cmd
+}
+
+func (o *pluginInstallOptions) complete(args []string) error {
+ o.source = args[0]
+ return nil
+}
+
+func (o *pluginInstallOptions) newInstallerForSource() (installer.Installer, error) {
+ // Check if source is an OCI registry reference
+ if strings.HasPrefix(o.source, fmt.Sprintf("%s://", registry.OCIScheme)) {
+ // Build getter options for OCI
+ options := []getter.Option{
+ getter.WithTLSClientConfig(o.certFile, o.keyFile, o.caFile),
+ getter.WithInsecureSkipVerifyTLS(o.insecureSkipTLSVerify),
+ getter.WithPlainHTTP(o.plainHTTP),
+ getter.WithBasicAuth(o.username, o.password),
+ }
+
+ return installer.NewOCIInstaller(o.source, options...)
+ }
+
+ // For non-OCI sources, use the original logic
+ return installer.NewForSource(o.source, o.version)
+}
+
+func (o *pluginInstallOptions) run(out io.Writer) error {
+ i, err := o.newInstallerForSource()
+ if err != nil {
+ return err
+ }
+
+ // Determine if we should verify based on installer type and flags
+ shouldVerify := o.verify
+
+ // Check if this is a local directory installation (for development)
+ if localInst, ok := i.(*installer.LocalInstaller); ok && !localInst.SupportsVerification() {
+ // Local directory installations are allowed without verification
+ shouldVerify = false
+ fmt.Fprintf(out, "Installing plugin from local directory (development mode)\n")
+ } else if shouldVerify {
+ // For remote installations, check if verification is supported
+ if verifier, ok := i.(installer.Verifier); !ok || !verifier.SupportsVerification() {
+ return fmt.Errorf("plugin source does not support verification. Use --verify=false to skip verification")
+ }
+ } else {
+ // User explicitly disabled verification
+ fmt.Fprintf(out, "WARNING: Skipping plugin signature verification\n")
+ }
+
+ // Set up installation options
+ opts := installer.Options{
+ Verify: shouldVerify,
+ Keyring: o.keyring,
+ }
+
+ // If verify is requested, show verification output
+ if shouldVerify {
+ fmt.Fprintf(out, "Verifying plugin signature...\n")
+ }
+
+ // Install the plugin with options
+ verifyResult, err := installer.InstallWithOptions(i, opts)
+ if err != nil {
+ return err
+ }
+
+ // If verification was successful, show the details
+ if verifyResult != nil {
+ for _, signer := range verifyResult.SignedBy {
+ fmt.Fprintf(out, "Signed by: %s\n", signer)
+ }
+ fmt.Fprintf(out, "Using Key With Fingerprint: %s\n", verifyResult.Fingerprint)
+ fmt.Fprintf(out, "Plugin Hash Verified: %s\n", verifyResult.FileHash)
+ }
+
+ slog.Debug("loading plugin", "path", i.Path())
+ p, err := plugin.LoadDir(i.Path())
+ if err != nil {
+ return fmt.Errorf("plugin is installed but unusable: %w", err)
+ }
+
+ if err := runHook(p, plugin.Install); err != nil {
+ return err
+ }
+
+ fmt.Fprintf(out, "Installed plugin: %s\n", p.Metadata().Name)
+ return nil
+}
diff --git a/helm/pkg/cmd/plugin_list.go b/helm/pkg/cmd/plugin_list.go
new file mode 100644
index 000000000..74e969e04
--- /dev/null
+++ b/helm/pkg/cmd/plugin_list.go
@@ -0,0 +1,117 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "log/slog"
+ "path/filepath"
+ "slices"
+
+ "github.com/gosuri/uitable"
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/schema"
+)
+
+func newPluginListCmd(out io.Writer) *cobra.Command {
+ var pluginType string
+ cmd := &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ Short: "list installed Helm plugins",
+ ValidArgsFunction: noMoreArgsCompFunc,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ slog.Debug("pluginDirs", "directory", settings.PluginsDirectory)
+ dirs := filepath.SplitList(settings.PluginsDirectory)
+ descriptor := plugin.Descriptor{
+ Type: pluginType,
+ }
+ plugins, err := plugin.FindPlugins(dirs, descriptor)
+ if err != nil {
+ return err
+ }
+
+ // Get signing info for all plugins
+ signingInfo := plugin.GetSigningInfoForPlugins(plugins)
+
+ table := uitable.New()
+ table.AddRow("NAME", "VERSION", "TYPE", "APIVERSION", "PROVENANCE", "SOURCE")
+ for _, p := range plugins {
+ m := p.Metadata()
+ sourceURL := m.SourceURL
+ if sourceURL == "" {
+ sourceURL = "unknown"
+ }
+ // Get signing status
+ signedStatus := "unknown"
+ if info, ok := signingInfo[m.Name]; ok {
+ signedStatus = info.Status
+ }
+ table.AddRow(m.Name, m.Version, m.Type, m.APIVersion, signedStatus, sourceURL)
+ }
+ fmt.Fprintln(out, table)
+ return nil
+ },
+ }
+
+ f := cmd.Flags()
+ f.StringVar(&pluginType, "type", "", "Plugin type")
+
+ return cmd
+}
+
+// Returns all plugins from plugins, except those with names matching ignoredPluginNames
+func filterPlugins(plugins []plugin.Plugin, ignoredPluginNames []string) []plugin.Plugin {
+ // if ignoredPluginNames is nil or empty, just return plugins
+ if len(ignoredPluginNames) == 0 {
+ return plugins
+ }
+
+ var filteredPlugins []plugin.Plugin
+ for _, plugin := range plugins {
+ found := slices.Contains(ignoredPluginNames, plugin.Metadata().Name)
+ if !found {
+ filteredPlugins = append(filteredPlugins, plugin)
+ }
+ }
+
+ return filteredPlugins
+}
+
+// Provide dynamic auto-completion for plugin names
+func compListPlugins(_ string, ignoredPluginNames []string) []string {
+ var pNames []string
+ dirs := filepath.SplitList(settings.PluginsDirectory)
+ descriptor := plugin.Descriptor{
+ Type: "cli/v1",
+ }
+ plugins, err := plugin.FindPlugins(dirs, descriptor)
+ if err == nil && len(plugins) > 0 {
+ filteredPlugins := filterPlugins(plugins, ignoredPluginNames)
+ for _, p := range filteredPlugins {
+ m := p.Metadata()
+ var shortHelp string
+ if config, ok := m.Config.(*schema.ConfigCLIV1); ok {
+ shortHelp = config.ShortHelp
+ }
+ pNames = append(pNames, fmt.Sprintf("%s\t%s", p.Metadata().Name, shortHelp))
+ }
+ }
+ return pNames
+}
diff --git a/helm/pkg/cmd/plugin_package.go b/helm/pkg/cmd/plugin_package.go
new file mode 100644
index 000000000..05f8bb5ad
--- /dev/null
+++ b/helm/pkg/cmd/plugin_package.go
@@ -0,0 +1,216 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+const pluginPackageDesc = `
+This command packages a Helm plugin directory into a tarball.
+
+By default, the command will generate a provenance file signed with a PGP key.
+This ensures the plugin can be verified after installation.
+
+Use --sign=false to skip signing (not recommended for distribution).
+`
+
+type pluginPackageOptions struct {
+ sign bool
+ keyring string
+ key string
+ passphraseFile string
+ pluginPath string
+ destination string
+}
+
+func newPluginPackageCmd(out io.Writer) *cobra.Command {
+ o := &pluginPackageOptions{}
+
+ cmd := &cobra.Command{
+ Use: "package [PATH]",
+ Short: "package a plugin directory into a plugin archive",
+ Long: pluginPackageDesc,
+ Args: require.ExactArgs(1),
+ RunE: func(_ *cobra.Command, args []string) error {
+ o.pluginPath = args[0]
+ return o.run(out)
+ },
+ }
+
+ f := cmd.Flags()
+ f.BoolVar(&o.sign, "sign", true, "use a PGP private key to sign this plugin")
+ f.StringVar(&o.key, "key", "", "name of the key to use when signing. Used if --sign is true")
+ f.StringVar(&o.keyring, "keyring", defaultKeyring(), "location of a public keyring")
+ f.StringVar(&o.passphraseFile, "passphrase-file", "", "location of a file which contains the passphrase for the signing key. Use \"-\" to read from stdin.")
+ f.StringVarP(&o.destination, "destination", "d", ".", "location to write the plugin tarball.")
+
+ return cmd
+}
+
+func (o *pluginPackageOptions) run(out io.Writer) error {
+ // Check if the plugin path exists and is a directory
+ fi, err := os.Stat(o.pluginPath)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return fmt.Errorf("plugin package only supports directories, not tarballs")
+ }
+
+ // Load and validate plugin metadata
+ pluginMeta, err := plugin.LoadDir(o.pluginPath)
+ if err != nil {
+ return fmt.Errorf("invalid plugin directory: %w", err)
+ }
+
+ // Create destination directory if needed
+ if err := os.MkdirAll(o.destination, 0755); err != nil {
+ return err
+ }
+
+ // If signing is requested, prepare the signer first
+ var signer *provenance.Signatory
+ if o.sign {
+ // Load the signing key
+ signer, err = provenance.NewFromKeyring(o.keyring, o.key)
+ if err != nil {
+ return fmt.Errorf("error reading from keyring: %w", err)
+ }
+
+ // Get passphrase
+ passphraseFetcher := o.promptUser
+ if o.passphraseFile != "" {
+ passphraseFetcher, err = o.passphraseFileFetcher()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Decrypt the key
+ if err := signer.DecryptKey(passphraseFetcher); err != nil {
+ return err
+ }
+ } else {
+ // User explicitly disabled signing
+ fmt.Fprintf(out, "WARNING: Skipping plugin signing. This is not recommended for plugins intended for distribution.\n")
+ }
+
+ // Now create the tarball (only after signing prerequisites are met)
+ // Use plugin metadata for filename: PLUGIN_NAME-SEMVER.tgz
+ metadata := pluginMeta.Metadata()
+ filename := fmt.Sprintf("%s-%s.tgz", metadata.Name, metadata.Version)
+ tarballPath := filepath.Join(o.destination, filename)
+
+ tarFile, err := os.Create(tarballPath)
+ if err != nil {
+ return fmt.Errorf("failed to create tarball: %w", err)
+ }
+ defer tarFile.Close()
+
+ if err := plugin.CreatePluginTarball(o.pluginPath, metadata.Name, tarFile); err != nil {
+ os.Remove(tarballPath)
+ return fmt.Errorf("failed to create plugin tarball: %w", err)
+ }
+ tarFile.Close() // Ensure file is closed before signing
+
+ // If signing was requested, sign the tarball
+ if o.sign {
+ // Read the tarball data
+ tarballData, err := os.ReadFile(tarballPath)
+ if err != nil {
+ os.Remove(tarballPath)
+ return fmt.Errorf("failed to read tarball for signing: %w", err)
+ }
+
+ // Sign the plugin tarball data
+ sig, err := plugin.SignPlugin(tarballData, filepath.Base(tarballPath), signer)
+ if err != nil {
+ os.Remove(tarballPath)
+ return fmt.Errorf("failed to sign plugin: %w", err)
+ }
+
+ // Write the signature
+ provFile := tarballPath + ".prov"
+ if err := os.WriteFile(provFile, []byte(sig), 0644); err != nil {
+ os.Remove(tarballPath)
+ return err
+ }
+
+ fmt.Fprintf(out, "Successfully signed. Signature written to: %s\n", provFile)
+ }
+
+ fmt.Fprintf(out, "Successfully packaged plugin and saved it to: %s\n", tarballPath)
+
+ return nil
+}
+
+func (o *pluginPackageOptions) promptUser(name string) ([]byte, error) {
+ fmt.Printf("Password for key %q > ", name)
+ pw, err := term.ReadPassword(int(syscall.Stdin))
+ fmt.Println()
+ return pw, err
+}
+
+func (o *pluginPackageOptions) passphraseFileFetcher() (provenance.PassphraseFetcher, error) {
+ file, err := openPassphraseFile(o.passphraseFile, os.Stdin)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ // Read the entire passphrase
+ passphrase, err := io.ReadAll(file)
+ if err != nil {
+ return nil, err
+ }
+
+ // Trim any trailing newline characters (both \n and \r\n)
+ passphrase = bytes.TrimRight(passphrase, "\r\n")
+
+ return func(_ string) ([]byte, error) {
+ return passphrase, nil
+ }, nil
+}
+
+// copied from action.openPassphraseFile
+// TODO: should we move this to pkg/action so we can reuse the func from there?
+func openPassphraseFile(passphraseFile string, stdin *os.File) (*os.File, error) {
+ if passphraseFile == "-" {
+ stat, err := stdin.Stat()
+ if err != nil {
+ return nil, err
+ }
+ if (stat.Mode() & os.ModeNamedPipe) == 0 {
+ return nil, errors.New("specified reading passphrase from stdin, without input on stdin")
+ }
+ return stdin, nil
+ }
+ return os.Open(passphraseFile)
+}
diff --git a/helm/pkg/cmd/plugin_package_test.go b/helm/pkg/cmd/plugin_package_test.go
new file mode 100644
index 000000000..7d97562f8
--- /dev/null
+++ b/helm/pkg/cmd/plugin_package_test.go
@@ -0,0 +1,170 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+// Common plugin.yaml content for v1 format tests
+const testPluginYAML = `apiVersion: v1
+name: test-plugin
+version: 1.0.0
+type: cli/v1
+runtime: subprocess
+config:
+ usage: test-plugin [flags]
+ shortHelp: A test plugin
+ longHelp: A test plugin for testing purposes
+runtimeConfig:
+ platformCommand:
+ - os: linux
+ command: echo
+ args: ["test"]`
+
+func TestPluginPackageWithoutSigning(t *testing.T) {
+ // Create a test plugin directory
+ tempDir := t.TempDir()
+ pluginDir := filepath.Join(tempDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a plugin.yaml file
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create package options with sign=false
+ o := &pluginPackageOptions{
+ sign: false, // Explicitly disable signing
+ pluginPath: pluginDir,
+ destination: tempDir,
+ }
+
+ // Run the package command
+ out := &bytes.Buffer{}
+ err := o.run(out)
+
+ // Should succeed without error
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+
+ // Check that tarball was created with plugin name and version
+ tarballPath := filepath.Join(tempDir, "test-plugin-1.0.0.tgz")
+ if _, err := os.Stat(tarballPath); os.IsNotExist(err) {
+ t.Error("tarball should exist when sign=false")
+ }
+
+ // Check that no .prov file was created
+ provPath := tarballPath + ".prov"
+ if _, err := os.Stat(provPath); !os.IsNotExist(err) {
+ t.Error("provenance file should not exist when sign=false")
+ }
+
+ // Output should contain warning about skipping signing
+ output := out.String()
+ if !strings.Contains(output, "WARNING: Skipping plugin signing") {
+ t.Error("should print warning when signing is skipped")
+ }
+ if !strings.Contains(output, "Successfully packaged") {
+ t.Error("should print success message")
+ }
+}
+
+func TestPluginPackageDefaultRequiresSigning(t *testing.T) {
+ // Create a test plugin directory
+ tempDir := t.TempDir()
+ pluginDir := filepath.Join(tempDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a plugin.yaml file
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create package options with default sign=true and invalid keyring
+ o := &pluginPackageOptions{
+ sign: true, // This is now the default
+ keyring: "/non/existent/keyring",
+ pluginPath: pluginDir,
+ destination: tempDir,
+ }
+
+ // Run the package command
+ out := &bytes.Buffer{}
+ err := o.run(out)
+
+ // Should fail because signing is required by default
+ if err == nil {
+ t.Error("expected error when signing fails with default settings")
+ }
+
+ // Check that no tarball was created
+ tarballPath := filepath.Join(tempDir, "test-plugin.tgz")
+ if _, err := os.Stat(tarballPath); !os.IsNotExist(err) {
+ t.Error("tarball should not exist when signing fails")
+ }
+}
+
+func TestPluginPackageSigningFailure(t *testing.T) {
+ // Create a test plugin directory
+ tempDir := t.TempDir()
+ pluginDir := filepath.Join(tempDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a plugin.yaml file
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create package options with sign flag but invalid keyring
+ o := &pluginPackageOptions{
+ sign: true,
+ keyring: "/non/existent/keyring", // This will cause signing to fail
+ pluginPath: pluginDir,
+ destination: tempDir,
+ }
+
+ // Run the package command
+ out := &bytes.Buffer{}
+ err := o.run(out)
+
+ // Should get an error
+ if err == nil {
+ t.Error("expected error when signing fails, got nil")
+ }
+
+ // Check that no tarball was created
+ tarballPath := filepath.Join(tempDir, "test-plugin.tgz")
+ if _, err := os.Stat(tarballPath); !os.IsNotExist(err) {
+ t.Error("tarball should not exist when signing fails")
+ }
+
+ // Output should not contain success message
+ if bytes.Contains(out.Bytes(), []byte("Successfully packaged")) {
+ t.Error("should not print success message when signing fails")
+ }
+}
diff --git a/helm/pkg/cmd/plugin_test.go b/helm/pkg/cmd/plugin_test.go
new file mode 100644
index 000000000..a250ba221
--- /dev/null
+++ b/helm/pkg/cmd/plugin_test.go
@@ -0,0 +1,427 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+ "testing"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestManuallyProcessArgs(t *testing.T) {
+ input := []string{
+ "--debug",
+ "--foo", "bar",
+ "--kubeconfig=/home/foo",
+ "--kubeconfig", "/home/foo",
+ "--kube-context=test1",
+ "--kube-context", "test1",
+ "--kube-as-user", "pikachu",
+ "--kube-as-group", "teatime",
+ "--kube-as-group", "admins",
+ "-n=test2",
+ "-n", "test2",
+ "--namespace=test2",
+ "--namespace", "test2",
+ "--home=/tmp",
+ "command",
+ }
+
+ expectKnown := []string{
+ "--debug",
+ "--kubeconfig=/home/foo",
+ "--kubeconfig", "/home/foo",
+ "--kube-context=test1",
+ "--kube-context", "test1",
+ "--kube-as-user", "pikachu",
+ "--kube-as-group", "teatime",
+ "--kube-as-group", "admins",
+ "-n=test2",
+ "-n", "test2",
+ "--namespace=test2",
+ "--namespace", "test2",
+ }
+
+ expectUnknown := []string{
+ "--foo", "bar", "--home=/tmp", "command",
+ }
+
+ known, unknown := manuallyProcessArgs(input)
+
+ for i, k := range known {
+ if k != expectKnown[i] {
+ t.Errorf("expected known flag %d to be %q, got %q", i, expectKnown[i], k)
+ }
+ }
+ for i, k := range unknown {
+ if k != expectUnknown[i] {
+ t.Errorf("expected unknown flag %d to be %q, got %q", i, expectUnknown[i], k)
+ }
+ }
+}
+
+func TestLoadCLIPlugins(t *testing.T) {
+ settings.PluginsDirectory = "testdata/helmhome/helm/plugins"
+ settings.RepositoryConfig = "testdata/helmhome/helm/repositories.yaml"
+ settings.RepositoryCache = "testdata/helmhome/helm/repository"
+
+ var (
+ out bytes.Buffer
+ cmd cobra.Command
+ )
+ loadCLIPlugins(&cmd, &out)
+
+ fullEnvOutput := strings.Join([]string{
+ "HELM_PLUGIN_NAME=fullenv",
+ "HELM_PLUGIN_DIR=testdata/helmhome/helm/plugins/fullenv",
+ "HELM_PLUGINS=testdata/helmhome/helm/plugins",
+ "HELM_REPOSITORY_CONFIG=testdata/helmhome/helm/repositories.yaml",
+ "HELM_REPOSITORY_CACHE=testdata/helmhome/helm/repository",
+ fmt.Sprintf("HELM_BIN=%s", os.Args[0]),
+ }, "\n") + "\n"
+
+ // Test that the YAML file was correctly converted to a command.
+ tests := []struct {
+ use string
+ short string
+ long string
+ expect string
+ args []string
+ code int
+ }{
+ {"args", "echo args", "This echos args", "-a -b -c\n", []string{"-a", "-b", "-c"}, 0},
+ {"echo", "echo stuff", "This echos stuff", "hello\n", []string{}, 0},
+ {"exitwith", "exitwith code", "This exits with the specified exit code", "", []string{"2"}, 2},
+ {"fullenv", "show env vars", "show all env vars", fullEnvOutput, []string{}, 0},
+ {"shortenv", "env stuff", "show the env", "HELM_PLUGIN_NAME=shortenv\n", []string{}, 0},
+ }
+
+ pluginCmds := cmd.Commands()
+
+ require.Len(t, pluginCmds, len(tests), "Expected %d plugins, got %d", len(tests), len(pluginCmds))
+
+ for i := range pluginCmds {
+ out.Reset()
+ tt := tests[i]
+ pluginCmd := pluginCmds[i]
+ t.Run(fmt.Sprintf("%s-%d", pluginCmd.Name(), i), func(t *testing.T) {
+ out.Reset()
+ if pluginCmd.Use != tt.use {
+ t.Errorf("%d: Expected Use=%q, got %q", i, tt.use, pluginCmd.Use)
+ }
+ if pluginCmd.Short != tt.short {
+ t.Errorf("%d: Expected Use=%q, got %q", i, tt.short, pluginCmd.Short)
+ }
+ if pluginCmd.Long != tt.long {
+ t.Errorf("%d: Expected Use=%q, got %q", i, tt.long, pluginCmd.Long)
+ }
+
+ // Currently, plugins assume a Linux subsystem. Skip the execution
+ // tests until this is fixed
+ if runtime.GOOS != "windows" {
+ if err := pluginCmd.RunE(pluginCmd, tt.args); err != nil {
+ if tt.code > 0 {
+ cerr, ok := err.(CommandError)
+ if !ok {
+ t.Errorf("Expected %s to return pluginError: got %v(%T)", tt.use, err, err)
+ }
+ if cerr.ExitCode != tt.code {
+ t.Errorf("Expected %s to return %d: got %d", tt.use, tt.code, cerr.ExitCode)
+ }
+ } else {
+ t.Errorf("Error running %s: %+v", tt.use, err)
+ }
+ }
+ assert.Equal(t, tt.expect, out.String(), "expected output for %q", tt.use)
+ }
+ })
+ }
+}
+
+func TestLoadPluginsWithSpace(t *testing.T) {
+ settings.PluginsDirectory = "testdata/helm home with space/helm/plugins"
+ settings.RepositoryConfig = "testdata/helm home with space/helm/repositories.yaml"
+ settings.RepositoryCache = "testdata/helm home with space/helm/repository"
+
+ var (
+ out bytes.Buffer
+ cmd cobra.Command
+ )
+ loadCLIPlugins(&cmd, &out)
+
+ envs := strings.Join([]string{
+ "fullenv",
+ "testdata/helm home with space/helm/plugins/fullenv",
+ "testdata/helm home with space/helm/plugins",
+ "testdata/helm home with space/helm/repositories.yaml",
+ "testdata/helm home with space/helm/repository",
+ os.Args[0],
+ }, "\n")
+
+ // Test that the YAML file was correctly converted to a command.
+ tests := []struct {
+ use string
+ short string
+ long string
+ expect string
+ args []string
+ code int
+ }{
+ {"fullenv", "show env vars", "show all env vars", envs + "\n", []string{}, 0},
+ }
+
+ plugins := cmd.Commands()
+
+ if len(plugins) != len(tests) {
+ t.Fatalf("Expected %d plugins, got %d", len(tests), len(plugins))
+ }
+
+ for i := range plugins {
+ out.Reset()
+ tt := tests[i]
+ pp := plugins[i]
+ if pp.Use != tt.use {
+ t.Errorf("%d: Expected Use=%q, got %q", i, tt.use, pp.Use)
+ }
+ if pp.Short != tt.short {
+ t.Errorf("%d: Expected Use=%q, got %q", i, tt.short, pp.Short)
+ }
+ if pp.Long != tt.long {
+ t.Errorf("%d: Expected Use=%q, got %q", i, tt.long, pp.Long)
+ }
+
+ // Currently, plugins assume a Linux subsystem. Skip the execution
+ // tests until this is fixed
+ if runtime.GOOS != "windows" {
+ if err := pp.RunE(pp, tt.args); err != nil {
+ if tt.code > 0 {
+ cerr, ok := err.(CommandError)
+ if !ok {
+ t.Errorf("Expected %s to return pluginError: got %v(%T)", tt.use, err, err)
+ }
+ if cerr.ExitCode != tt.code {
+ t.Errorf("Expected %s to return %d: got %d", tt.use, tt.code, cerr.ExitCode)
+ }
+ } else {
+ t.Errorf("Error running %s: %+v", tt.use, err)
+ }
+ }
+ assert.Equal(t, tt.expect, out.String(), "expected output for %s", tt.use)
+ }
+ }
+}
+
+type staticCompletionDetails struct {
+ use string
+ validArgs []string
+ flags []string
+ next []staticCompletionDetails
+}
+
+func TestLoadCLIPluginsForCompletion(t *testing.T) {
+ settings.PluginsDirectory = "testdata/helmhome/helm/plugins"
+
+ var out bytes.Buffer
+
+ cmd := &cobra.Command{
+ Use: "completion",
+ }
+ loadCLIPlugins(cmd, &out)
+
+ tests := []staticCompletionDetails{
+ {"args", []string{}, []string{}, []staticCompletionDetails{}},
+ {"echo", []string{}, []string{}, []staticCompletionDetails{}},
+ {"exitwith", []string{}, []string{}, []staticCompletionDetails{
+ {"code", []string{}, []string{"a", "b"}, []staticCompletionDetails{}},
+ }},
+ {"fullenv", []string{}, []string{"q", "z"}, []staticCompletionDetails{
+ {"empty", []string{}, []string{}, []staticCompletionDetails{}},
+ {"full", []string{}, []string{}, []staticCompletionDetails{
+ {"less", []string{}, []string{"a", "all"}, []staticCompletionDetails{}},
+ {"more", []string{"one", "two"}, []string{"b", "ball"}, []staticCompletionDetails{}},
+ }},
+ }},
+ {"shortenv", []string{}, []string{"global"}, []staticCompletionDetails{
+ {"list", []string{}, []string{"a", "all", "log"}, []staticCompletionDetails{}},
+ {"remove", []string{"all", "one"}, []string{}, []staticCompletionDetails{}},
+ }},
+ }
+ checkCommand(t, cmd.Commands(), tests)
+}
+
+func checkCommand(t *testing.T, plugins []*cobra.Command, tests []staticCompletionDetails) {
+ t.Helper()
+ require.Len(t, plugins, len(tests), "Expected commands %v, got %v", tests, plugins)
+
+ is := assert.New(t)
+ for i := range plugins {
+ pp := plugins[i]
+ tt := tests[i]
+ is.Equal(pp.Use, tt.use, "Expected Use=%q, got %q", tt.use, pp.Use)
+
+ targs := tt.validArgs
+ pargs := pp.ValidArgs
+ is.ElementsMatch(targs, pargs)
+
+ tflags := tt.flags
+ var pflags []string
+ pp.LocalFlags().VisitAll(func(flag *pflag.Flag) {
+ pflags = append(pflags, flag.Name)
+ if len(flag.Shorthand) > 0 && flag.Shorthand != flag.Name {
+ pflags = append(pflags, flag.Shorthand)
+ }
+ })
+ is.ElementsMatch(tflags, pflags)
+
+ // Check the next level
+ checkCommand(t, pp.Commands(), tt.next)
+ }
+}
+
+func TestPluginDynamicCompletion(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "completion for plugin",
+ cmd: "__complete args ''",
+ golden: "output/plugin_args_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin with flag",
+ cmd: "__complete args --myflag ''",
+ golden: "output/plugin_args_flag_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin with global flag",
+ cmd: "__complete args --namespace mynamespace ''",
+ golden: "output/plugin_args_ns_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin with multiple args",
+ cmd: "__complete args --myflag --namespace mynamespace start",
+ golden: "output/plugin_args_many_args_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin no directive",
+ cmd: "__complete echo -n mynamespace ''",
+ golden: "output/plugin_echo_no_directive.txt",
+ rels: []*release.Release{},
+ }}
+ for _, test := range tests {
+ settings.PluginsDirectory = "testdata/helmhome/helm/plugins"
+ runTestCmd(t, []cmdTestCase{test})
+ }
+}
+
+func TestLoadCLIPlugins_HelmNoPlugins(t *testing.T) {
+ settings.PluginsDirectory = "testdata/helmhome/helm/plugins"
+ settings.RepositoryConfig = "testdata/helmhome/helm/repository"
+
+ t.Setenv("HELM_NO_PLUGINS", "1")
+
+ out := bytes.NewBuffer(nil)
+ cmd := &cobra.Command{}
+ loadCLIPlugins(cmd, out)
+ plugins := cmd.Commands()
+
+ if len(plugins) != 0 {
+ t.Fatalf("Expected 0 plugins, got %d", len(plugins))
+ }
+}
+
+func TestPluginCmdsCompletion(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "completion for plugin update",
+ cmd: "__complete plugin update ''",
+ golden: "output/plugin_list_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin update, no filter",
+ cmd: "__complete plugin update full",
+ golden: "output/plugin_list_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin update repetition",
+ cmd: "__complete plugin update args ''",
+ golden: "output/plugin_repeat_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin uninstall",
+ cmd: "__complete plugin uninstall ''",
+ golden: "output/plugin_list_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin uninstall, no filter",
+ cmd: "__complete plugin uninstall full",
+ golden: "output/plugin_list_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin uninstall repetition",
+ cmd: "__complete plugin uninstall args ''",
+ golden: "output/plugin_repeat_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin list",
+ cmd: "__complete plugin list ''",
+ golden: "output/empty_nofile_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin install no args",
+ cmd: "__complete plugin install ''",
+ golden: "output/empty_default_comp.txt",
+ rels: []*release.Release{},
+ }, {
+ name: "completion for plugin install one arg",
+ cmd: "__complete plugin list /tmp ''",
+ golden: "output/empty_nofile_comp.txt",
+ rels: []*release.Release{},
+ }, {}}
+ for _, test := range tests {
+ settings.PluginsDirectory = "testdata/helmhome/helm/plugins"
+ runTestCmd(t, []cmdTestCase{test})
+ }
+}
+
+func TestPluginFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "plugin", false)
+}
+
+func TestPluginInstallFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "plugin install", true)
+ checkFileCompletion(t, "plugin install mypath", false)
+}
+
+func TestPluginListFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "plugin list", false)
+}
+
+func TestPluginUninstallFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "plugin uninstall", false)
+ checkFileCompletion(t, "plugin uninstall myplugin", false)
+}
+
+func TestPluginUpdateFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "plugin update", false)
+ checkFileCompletion(t, "plugin update myplugin", false)
+}
diff --git a/helm/pkg/cmd/plugin_uninstall.go b/helm/pkg/cmd/plugin_uninstall.go
new file mode 100644
index 000000000..85eb46219
--- /dev/null
+++ b/helm/pkg/cmd/plugin_uninstall.go
@@ -0,0 +1,132 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/internal/plugin"
+)
+
+type pluginUninstallOptions struct {
+ names []string
+}
+
+func newPluginUninstallCmd(out io.Writer) *cobra.Command {
+ o := &pluginUninstallOptions{}
+
+ cmd := &cobra.Command{
+ Use: "uninstall ...",
+ Aliases: []string{"rm", "remove"},
+ Short: "uninstall one or more Helm plugins",
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return compListPlugins(toComplete, args), cobra.ShellCompDirectiveNoFileComp
+ },
+ PreRunE: func(_ *cobra.Command, args []string) error {
+ return o.complete(args)
+ },
+ RunE: func(_ *cobra.Command, _ []string) error {
+ return o.run(out)
+ },
+ }
+ return cmd
+}
+
+func (o *pluginUninstallOptions) complete(args []string) error {
+ if len(args) == 0 {
+ return errors.New("please provide plugin name to uninstall")
+ }
+ o.names = args
+ return nil
+}
+
+func (o *pluginUninstallOptions) run(out io.Writer) error {
+ slog.Debug("loading installer plugins", "dir", settings.PluginsDirectory)
+ plugins, err := plugin.LoadAll(settings.PluginsDirectory)
+ if err != nil {
+ return err
+ }
+ var errorPlugins []error
+ for _, name := range o.names {
+ if found := findPlugin(plugins, name); found != nil {
+ if err := uninstallPlugin(found); err != nil {
+ errorPlugins = append(errorPlugins, fmt.Errorf("failed to uninstall plugin %s, got error (%v)", name, err))
+ } else {
+ fmt.Fprintf(out, "Uninstalled plugin: %s\n", name)
+ }
+ } else {
+ errorPlugins = append(errorPlugins, fmt.Errorf("plugin: %s not found", name))
+ }
+ }
+ if len(errorPlugins) > 0 {
+ return errors.Join(errorPlugins...)
+ }
+ return nil
+}
+
+func uninstallPlugin(p plugin.Plugin) error {
+ if err := os.RemoveAll(p.Dir()); err != nil {
+ return err
+ }
+
+ // Clean up versioned tarball and provenance files from HELM_PLUGINS directory
+ // These files are saved with pattern: PLUGIN_NAME-VERSION.tgz and PLUGIN_NAME-VERSION.tgz.prov
+ pluginName := p.Metadata().Name
+ pluginVersion := p.Metadata().Version
+ pluginsDir := settings.PluginsDirectory
+
+ // Remove versioned files: plugin-name-version.tgz and plugin-name-version.tgz.prov
+ if pluginVersion != "" {
+ versionedBasename := fmt.Sprintf("%s-%s.tgz", pluginName, pluginVersion)
+
+ // Remove tarball file
+ tarballPath := filepath.Join(pluginsDir, versionedBasename)
+ if _, err := os.Stat(tarballPath); err == nil {
+ slog.Debug("removing versioned tarball", "path", tarballPath)
+ if err := os.Remove(tarballPath); err != nil {
+ slog.Debug("failed to remove tarball file", "path", tarballPath, "error", err)
+ }
+ }
+
+ // Remove provenance file
+ provPath := filepath.Join(pluginsDir, versionedBasename+".prov")
+ if _, err := os.Stat(provPath); err == nil {
+ slog.Debug("removing versioned provenance", "path", provPath)
+ if err := os.Remove(provPath); err != nil {
+ slog.Debug("failed to remove provenance file", "path", provPath, "error", err)
+ }
+ }
+ }
+
+ return runHook(p, plugin.Delete)
+}
+
+// TODO should this be in pkg/plugin/loader.go?
+func findPlugin(plugins []plugin.Plugin, name string) plugin.Plugin {
+ for _, p := range plugins {
+ if p.Metadata().Name == name {
+ return p
+ }
+ }
+ return nil
+}
diff --git a/helm/pkg/cmd/plugin_uninstall_test.go b/helm/pkg/cmd/plugin_uninstall_test.go
new file mode 100644
index 000000000..93d4dc8a8
--- /dev/null
+++ b/helm/pkg/cmd/plugin_uninstall_test.go
@@ -0,0 +1,146 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+func TestPluginUninstallCleansUpVersionedFiles(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a fake plugin directory structure in a temp directory
+ pluginsDir := t.TempDir()
+ t.Setenv("HELM_PLUGINS", pluginsDir)
+
+ // Create a new settings instance that will pick up the environment variable
+ testSettings := cli.New()
+ pluginName := "test-plugin"
+
+ // Create plugin directory
+ pluginDir := filepath.Join(pluginsDir, pluginName)
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create plugin.yaml
+ pluginYAML := `name: test-plugin
+version: 1.2.3
+description: Test plugin
+command: $HELM_PLUGIN_DIR/test-plugin
+`
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(pluginYAML), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create versioned tarball and provenance files
+ tarballFile := filepath.Join(pluginsDir, "test-plugin-1.2.3.tgz")
+ provFile := filepath.Join(pluginsDir, "test-plugin-1.2.3.tgz.prov")
+ otherVersionTarball := filepath.Join(pluginsDir, "test-plugin-2.0.0.tgz")
+
+ if err := os.WriteFile(tarballFile, []byte("fake tarball"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.WriteFile(provFile, []byte("fake provenance"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ // Create another version that should NOT be removed
+ if err := os.WriteFile(otherVersionTarball, []byte("other version"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ // Load the plugin
+ p, err := plugin.LoadDir(pluginDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a test uninstall function that uses our test settings
+ testUninstallPlugin := func(plugin plugin.Plugin) error {
+ if err := os.RemoveAll(plugin.Dir()); err != nil {
+ return err
+ }
+
+ // Clean up versioned tarball and provenance files from test HELM_PLUGINS directory
+ pluginName := plugin.Metadata().Name
+ pluginVersion := plugin.Metadata().Version
+ testPluginsDir := testSettings.PluginsDirectory
+
+ // Remove versioned files: plugin-name-version.tgz and plugin-name-version.tgz.prov
+ if pluginVersion != "" {
+ versionedBasename := fmt.Sprintf("%s-%s.tgz", pluginName, pluginVersion)
+
+ // Remove tarball file
+ tarballPath := filepath.Join(testPluginsDir, versionedBasename)
+ if _, err := os.Stat(tarballPath); err == nil {
+ if err := os.Remove(tarballPath); err != nil {
+ t.Logf("failed to remove tarball file: %v", err)
+ }
+ }
+
+ // Remove provenance file
+ provPath := filepath.Join(testPluginsDir, versionedBasename+".prov")
+ if _, err := os.Stat(provPath); err == nil {
+ if err := os.Remove(provPath); err != nil {
+ t.Logf("failed to remove provenance file: %v", err)
+ }
+ }
+ }
+
+ // Skip runHook in test
+ return nil
+ }
+
+ // Verify files exist before uninstall
+ if _, err := os.Stat(tarballFile); os.IsNotExist(err) {
+ t.Fatal("tarball file should exist before uninstall")
+ }
+ if _, err := os.Stat(provFile); os.IsNotExist(err) {
+ t.Fatal("provenance file should exist before uninstall")
+ }
+ if _, err := os.Stat(otherVersionTarball); os.IsNotExist(err) {
+ t.Fatal("other version tarball should exist before uninstall")
+ }
+
+ // Uninstall the plugin
+ if err := testUninstallPlugin(p); err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify plugin directory is removed
+ if _, err := os.Stat(pluginDir); !os.IsNotExist(err) {
+ t.Error("plugin directory should be removed")
+ }
+
+ // Verify only exact version files are removed
+ if _, err := os.Stat(tarballFile); !os.IsNotExist(err) {
+ t.Error("versioned tarball file should be removed")
+ }
+ if _, err := os.Stat(provFile); !os.IsNotExist(err) {
+ t.Error("versioned provenance file should be removed")
+ }
+ // Verify other version files are NOT removed
+ if _, err := os.Stat(otherVersionTarball); os.IsNotExist(err) {
+ t.Error("other version tarball should NOT be removed")
+ }
+}
diff --git a/helm/pkg/cmd/plugin_update.go b/helm/pkg/cmd/plugin_update.go
new file mode 100644
index 000000000..6cc2729fc
--- /dev/null
+++ b/helm/pkg/cmd/plugin_update.go
@@ -0,0 +1,113 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/installer"
+)
+
+type pluginUpdateOptions struct {
+ names []string
+}
+
+func newPluginUpdateCmd(out io.Writer) *cobra.Command {
+ o := &pluginUpdateOptions{}
+
+ cmd := &cobra.Command{
+ Use: "update ...",
+ Aliases: []string{"up"},
+ Short: "update one or more Helm plugins",
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return compListPlugins(toComplete, args), cobra.ShellCompDirectiveNoFileComp
+ },
+ PreRunE: func(_ *cobra.Command, args []string) error {
+ return o.complete(args)
+ },
+ RunE: func(_ *cobra.Command, _ []string) error {
+ return o.run(out)
+ },
+ }
+ return cmd
+}
+
+func (o *pluginUpdateOptions) complete(args []string) error {
+ if len(args) == 0 {
+ return errors.New("please provide plugin name to update")
+ }
+ o.names = args
+ return nil
+}
+
+func (o *pluginUpdateOptions) run(out io.Writer) error {
+ slog.Debug("loading installed plugins", "path", settings.PluginsDirectory)
+ plugins, err := plugin.LoadAll(settings.PluginsDirectory)
+ if err != nil {
+ return err
+ }
+ var errorPlugins []error
+
+ for _, name := range o.names {
+ if found := findPlugin(plugins, name); found != nil {
+ if err := updatePlugin(found); err != nil {
+ errorPlugins = append(errorPlugins, fmt.Errorf("failed to update plugin %s, got error (%v)", name, err))
+ } else {
+ fmt.Fprintf(out, "Updated plugin: %s\n", name)
+ }
+ } else {
+ errorPlugins = append(errorPlugins, fmt.Errorf("plugin: %s not found", name))
+ }
+ }
+ if len(errorPlugins) > 0 {
+ return errors.Join(errorPlugins...)
+ }
+ return nil
+}
+
+func updatePlugin(p plugin.Plugin) error {
+ exactLocation, err := filepath.EvalSymlinks(p.Dir())
+ if err != nil {
+ return err
+ }
+ absExactLocation, err := filepath.Abs(exactLocation)
+ if err != nil {
+ return err
+ }
+
+ i, err := installer.FindSource(absExactLocation)
+ if err != nil {
+ return err
+ }
+ if err := installer.Update(i); err != nil {
+ return err
+ }
+
+ slog.Debug("loading plugin", "path", i.Path())
+ updatedPlugin, err := plugin.LoadDir(i.Path())
+ if err != nil {
+ return err
+ }
+
+ return runHook(updatedPlugin, plugin.Update)
+}
diff --git a/helm/pkg/cmd/plugin_verify.go b/helm/pkg/cmd/plugin_verify.go
new file mode 100644
index 000000000..5f89e743e
--- /dev/null
+++ b/helm/pkg/cmd/plugin_verify.go
@@ -0,0 +1,123 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const pluginVerifyDesc = `
+This command verifies that a Helm plugin has a valid provenance file,
+and that the provenance file is signed by a trusted PGP key.
+
+It supports both:
+- Plugin tarballs (.tgz or .tar.gz files)
+- Installed plugin directories
+
+For installed plugins, use the path shown by 'helm env HELM_PLUGINS' followed
+by the plugin name. For example:
+ helm plugin verify ~/.local/share/helm/plugins/example-cli
+
+To generate a signed plugin, use the 'helm plugin package --sign' command.
+`
+
+type pluginVerifyOptions struct {
+ keyring string
+ pluginPath string
+}
+
+func newPluginVerifyCmd(out io.Writer) *cobra.Command {
+ o := &pluginVerifyOptions{}
+
+ cmd := &cobra.Command{
+ Use: "verify [PATH]",
+ Short: "verify that a plugin at the given path has been signed and is valid",
+ Long: pluginVerifyDesc,
+ Args: require.ExactArgs(1),
+ RunE: func(_ *cobra.Command, args []string) error {
+ o.pluginPath = args[0]
+ return o.run(out)
+ },
+ }
+
+ cmd.Flags().StringVar(&o.keyring, "keyring", defaultKeyring(), "keyring containing public keys")
+
+ return cmd
+}
+
+func (o *pluginVerifyOptions) run(out io.Writer) error {
+ // Verify the plugin path exists
+ fi, err := os.Stat(o.pluginPath)
+ if err != nil {
+ return err
+ }
+
+ // Only support tarball verification
+ if fi.IsDir() {
+ return fmt.Errorf("directory verification not supported - only plugin tarballs can be verified")
+ }
+
+ // Verify it's a tarball
+ if !plugin.IsTarball(o.pluginPath) {
+ return fmt.Errorf("plugin file must be a gzipped tarball (.tar.gz or .tgz)")
+ }
+
+ // Look for provenance file
+ provFile := o.pluginPath + ".prov"
+ if _, err := os.Stat(provFile); err != nil {
+ return fmt.Errorf("could not find provenance file %s: %w", provFile, err)
+ }
+
+ // Read the files
+ archiveData, err := os.ReadFile(o.pluginPath)
+ if err != nil {
+ return fmt.Errorf("failed to read plugin file: %w", err)
+ }
+
+ provData, err := os.ReadFile(provFile)
+ if err != nil {
+ return fmt.Errorf("failed to read provenance file: %w", err)
+ }
+
+ // Verify the plugin using data
+ verification, err := plugin.VerifyPlugin(archiveData, provData, filepath.Base(o.pluginPath), o.keyring)
+ if err != nil {
+ return err
+ }
+
+ // Output verification details
+ for name := range verification.SignedBy.Identities {
+ fmt.Fprintf(out, "Signed by: %v\n", name)
+ }
+ fmt.Fprintf(out, "Using Key With Fingerprint: %X\n", verification.SignedBy.PrimaryKey.Fingerprint)
+
+ // Only show hash for tarballs
+ if verification.FileHash != "" {
+ fmt.Fprintf(out, "Plugin Hash Verified: %s\n", verification.FileHash)
+ } else {
+ fmt.Fprintf(out, "Plugin Metadata Verified: %s\n", verification.FileName)
+ }
+
+ return nil
+}
diff --git a/helm/pkg/cmd/plugin_verify_test.go b/helm/pkg/cmd/plugin_verify_test.go
new file mode 100644
index 000000000..e631814dd
--- /dev/null
+++ b/helm/pkg/cmd/plugin_verify_test.go
@@ -0,0 +1,264 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/test/ensure"
+)
+
+func TestPluginVerifyCmd_NoArgs(t *testing.T) {
+ ensure.HelmHome(t)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when no arguments provided")
+ }
+ if !strings.Contains(err.Error(), "requires 1 argument") {
+ t.Errorf("expected 'requires 1 argument' error, got: %v", err)
+ }
+}
+
+func TestPluginVerifyCmd_TooManyArgs(t *testing.T) {
+ ensure.HelmHome(t)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{"plugin1", "plugin2"})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when too many arguments provided")
+ }
+ if !strings.Contains(err.Error(), "requires 1 argument") {
+ t.Errorf("expected 'requires 1 argument' error, got: %v", err)
+ }
+}
+
+func TestPluginVerifyCmd_NonexistentFile(t *testing.T) {
+ ensure.HelmHome(t)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{"/nonexistent/plugin.tgz"})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when plugin file doesn't exist")
+ }
+}
+
+func TestPluginVerifyCmd_MissingProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a plugin tarball without .prov file
+ pluginTgz := createTestPluginTarball(t)
+ defer os.Remove(pluginTgz)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{pluginTgz})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when .prov file is missing")
+ }
+ if !strings.Contains(err.Error(), "could not find provenance file") {
+ t.Errorf("expected 'could not find provenance file' error, got: %v", err)
+ }
+}
+
+func TestPluginVerifyCmd_InvalidProvenance(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a plugin tarball with invalid .prov file
+ pluginTgz := createTestPluginTarball(t)
+ defer os.Remove(pluginTgz)
+
+ // Create invalid .prov file
+ provFile := pluginTgz + ".prov"
+ if err := os.WriteFile(provFile, []byte("invalid provenance"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(provFile)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{pluginTgz})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when .prov file is invalid")
+ }
+}
+
+func TestPluginVerifyCmd_DirectoryNotSupported(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a plugin directory
+ pluginDir := createTestPluginDir(t)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{pluginDir})
+
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error when verifying directory")
+ }
+ if !strings.Contains(err.Error(), "directory verification not supported") {
+ t.Errorf("expected 'directory verification not supported' error, got: %v", err)
+ }
+}
+
+func TestPluginVerifyCmd_KeyringFlag(t *testing.T) {
+ ensure.HelmHome(t)
+
+ // Create a plugin tarball with .prov file
+ pluginTgz := createTestPluginTarball(t)
+ defer os.Remove(pluginTgz)
+
+ // Create .prov file
+ provFile := pluginTgz + ".prov"
+ createProvFile(t, provFile, pluginTgz, "")
+ defer os.Remove(provFile)
+
+ // Create empty keyring file
+ keyring := createTestKeyring(t)
+ defer os.Remove(keyring)
+
+ out := &bytes.Buffer{}
+ cmd := newPluginVerifyCmd(out)
+ cmd.SetArgs([]string{"--keyring", keyring, pluginTgz})
+
+ // Should fail with keyring error but command parsing should work
+ err := cmd.Execute()
+ if err == nil {
+ t.Error("expected error with empty keyring")
+ }
+ // The important thing is that the keyring flag was parsed and used
+}
+
+func TestPluginVerifyOptions_Run_Success(t *testing.T) {
+ // Skip this test as it would require real PGP keys and valid signatures
+ // The core verification logic is thoroughly tested in internal/plugin/verify_test.go
+ t.Skip("Success case requires real PGP keys - core logic tested in internal/plugin/verify_test.go")
+}
+
+// Helper functions for test setup
+
+func createTestPluginDir(t *testing.T) string {
+ t.Helper()
+
+ // Create temporary directory with plugin structure
+ tmpDir := t.TempDir()
+ pluginDir := filepath.Join(tmpDir, "test-plugin")
+ if err := os.MkdirAll(pluginDir, 0755); err != nil {
+ t.Fatalf("Failed to create plugin directory: %v", err)
+ }
+
+ // Use the same plugin YAML as other cmd tests
+ if err := os.WriteFile(filepath.Join(pluginDir, "plugin.yaml"), []byte(testPluginYAML), 0644); err != nil {
+ t.Fatalf("Failed to create plugin.yaml: %v", err)
+ }
+
+ return pluginDir
+}
+
+func createTestPluginTarball(t *testing.T) string {
+ t.Helper()
+
+ pluginDir := createTestPluginDir(t)
+
+ // Create tarball using the plugin package helper
+ tmpDir := filepath.Dir(pluginDir)
+ tgzPath := filepath.Join(tmpDir, "test-plugin-1.0.0.tgz")
+ tarFile, err := os.Create(tgzPath)
+ if err != nil {
+ t.Fatalf("Failed to create tarball file: %v", err)
+ }
+ defer tarFile.Close()
+
+ if err := plugin.CreatePluginTarball(pluginDir, "test-plugin", tarFile); err != nil {
+ t.Fatalf("Failed to create tarball: %v", err)
+ }
+
+ return tgzPath
+}
+
+func createProvFile(t *testing.T, provFile, pluginTgz, hash string) {
+ t.Helper()
+
+ var hashStr string
+ if hash == "" {
+ // Calculate actual hash of the tarball
+ data, err := os.ReadFile(pluginTgz)
+ if err != nil {
+ t.Fatalf("Failed to read tarball for hashing: %v", err)
+ }
+ hashSum := sha256.Sum256(data)
+ hashStr = fmt.Sprintf("sha256:%x", hashSum)
+ } else {
+ // Use provided hash
+ hashStr = hash
+ }
+
+ // Create properly formatted provenance file with specified hash
+ provContent := fmt.Sprintf(`-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA256
+
+name: test-plugin
+version: 1.0.0
+description: Test plugin for verification
+files:
+ test-plugin-1.0.0.tgz: %s
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1
+
+iQEcBAEBCAAGBQJktest...
+-----END PGP SIGNATURE-----
+`, hashStr)
+ if err := os.WriteFile(provFile, []byte(provContent), 0644); err != nil {
+ t.Fatalf("Failed to create provenance file: %v", err)
+ }
+}
+
+func createTestKeyring(t *testing.T) string {
+ t.Helper()
+
+ // Create a temporary keyring file
+ tmpDir := t.TempDir()
+ keyringPath := filepath.Join(tmpDir, "pubring.gpg")
+
+ // Create empty keyring for testing
+ if err := os.WriteFile(keyringPath, []byte{}, 0644); err != nil {
+ t.Fatalf("Failed to create test keyring: %v", err)
+ }
+
+ return keyringPath
+}
diff --git a/helm/pkg/cmd/printer.go b/helm/pkg/cmd/printer.go
new file mode 100644
index 000000000..30238f5bb
--- /dev/null
+++ b/helm/pkg/cmd/printer.go
@@ -0,0 +1,30 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "io"
+ "text/template"
+)
+
+func tpl(t string, vals map[string]interface{}, out io.Writer) error {
+ tt, err := template.New("_").Parse(t)
+ if err != nil {
+ return err
+ }
+ return tt.Execute(out, vals)
+}
diff --git a/helm/pkg/cmd/profiling.go b/helm/pkg/cmd/profiling.go
new file mode 100644
index 000000000..45e7b9342
--- /dev/null
+++ b/helm/pkg/cmd/profiling.go
@@ -0,0 +1,91 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+var (
+ cpuProfileFile *os.File
+ cpuProfilePath string
+ memProfilePath string
+)
+
+func init() {
+ cpuProfilePath = os.Getenv("HELM_PPROF_CPU_PROFILE")
+ memProfilePath = os.Getenv("HELM_PPROF_MEM_PROFILE")
+}
+
+// startProfiling starts profiling CPU usage if HELM_PPROF_CPU_PROFILE is set
+// to a file path. It returns an error if the file could not be created or
+// CPU profiling could not be started.
+func startProfiling() error {
+ if cpuProfilePath != "" {
+ var err error
+ cpuProfileFile, err = os.Create(cpuProfilePath)
+ if err != nil {
+ return fmt.Errorf("could not create CPU profile: %w", err)
+ }
+ if err := pprof.StartCPUProfile(cpuProfileFile); err != nil {
+ cpuProfileFile.Close()
+ cpuProfileFile = nil
+ return fmt.Errorf("could not start CPU profile: %w", err)
+ }
+ }
+ return nil
+}
+
+// stopProfiling stops profiling CPU and memory usage.
+// It writes memory profile to the file path specified in HELM_PPROF_MEM_PROFILE
+// environment variable.
+func stopProfiling() error {
+ errs := []error{}
+
+ // Stop CPU profiling if it was started
+ if cpuProfileFile != nil {
+ pprof.StopCPUProfile()
+ err := cpuProfileFile.Close()
+ if err != nil {
+ errs = append(errs, err)
+ }
+ cpuProfileFile = nil
+ }
+
+ if memProfilePath != "" {
+ f, err := os.Create(memProfilePath)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ defer f.Close()
+
+ runtime.GC() // get up-to-date statistics
+ if err := pprof.WriteHeapProfile(f); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ if err := errors.Join(errs...); err != nil {
+ return fmt.Errorf("error(s) while stopping profiling: %w", err)
+ }
+
+ return nil
+}
diff --git a/helm/pkg/cmd/pull.go b/helm/pkg/cmd/pull.go
new file mode 100644
index 000000000..bb7a8d1c0
--- /dev/null
+++ b/helm/pkg/cmd/pull.go
@@ -0,0 +1,106 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "log/slog"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const pullDesc = `
+Retrieve a package from a package repository, and download it locally.
+
+This is useful for fetching packages to inspect, modify, or repackage. It can
+also be used to perform cryptographic verification of a chart without installing
+the chart.
+
+There are options for unpacking the chart after download. This will create a
+directory for the chart and uncompress into that directory.
+
+If the --verify flag is specified, the requested chart MUST have a provenance
+file, and MUST pass the verification process. Failure in any part of this will
+result in an error, and the chart will not be saved locally.
+`
+
+func newPullCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewPull(action.WithConfig(cfg))
+
+ cmd := &cobra.Command{
+ Use: "pull [chart URL | repo/chartname] [...]",
+ Short: "download a chart from a repository and (optionally) unpack it in local directory",
+ Aliases: []string{"fetch"},
+ Long: pullDesc,
+ Args: require.MinimumNArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return compListCharts(toComplete, false)
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ client.Settings = settings
+ if client.Version == "" && client.Devel {
+ slog.Debug("setting version to >0.0.0-0")
+ client.Version = ">0.0.0-0"
+ }
+
+ registryClient, err := newRegistryClient(client.CertFile, client.KeyFile, client.CaFile,
+ client.InsecureSkipTLSVerify, client.PlainHTTP, client.Username, client.Password)
+ if err != nil {
+ return fmt.Errorf("missing registry client: %w", err)
+ }
+ client.SetRegistryClient(registryClient)
+
+ for i := range args {
+ output, err := client.Run(args[i])
+ if err != nil {
+ return err
+ }
+ fmt.Fprint(out, output)
+ }
+ return nil
+ },
+ }
+
+ f := cmd.Flags()
+ f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored.")
+ f.BoolVar(&client.Untar, "untar", false, "if set to true, will untar the chart after downloading it")
+ f.BoolVar(&client.VerifyLater, "prov", false, "fetch the provenance file, but don't perform verification")
+ f.StringVar(&client.UntarDir, "untardir", ".", "if untar is specified, this flag specifies the name of the directory into which the chart is expanded")
+ f.StringVarP(&client.DestDir, "destination", "d", ".", "location to write the chart. If this and untardir are specified, untardir is appended to this")
+ addChartPathOptionsFlags(f, &client.ChartPathOptions)
+
+ err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 1 {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return compVersionFlag(args[0], toComplete)
+ })
+
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/pull_test.go b/helm/pkg/cmd/pull_test.go
new file mode 100644
index 000000000..96631fe05
--- /dev/null
+++ b/helm/pkg/cmd/pull_test.go
@@ -0,0 +1,508 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
+)
+
+func TestPullCmd(t *testing.T) {
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testcharts/*.tgz*"),
+ )
+ defer srv.Stop()
+
+ ociSrv, err := repotest.NewOCIServer(t, srv.Root())
+ if err != nil {
+ t.Fatal(err)
+ }
+ ociSrv.Run(t)
+
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ helmTestKeyOut := "Signed by: Helm Testing (This key should only be used for testing. DO NOT TRUST.) \n" +
+ "Using Key With Fingerprint: 5E615389B53CA37F0EE60BD3843BBF981FC18762\n" +
+ "Chart Hash Verified: "
+
+ // all flags will get "-d outdir" appended.
+ tests := []struct {
+ name string
+ args string
+ existFile string
+ existDir string
+ wantError bool
+ wantErrorMsg string
+ failExpect string
+ expectFile string
+ expectDir bool
+ expectVerify bool
+ expectSha string
+ }{
+ {
+ name: "Basic chart fetch",
+ args: "test/signtest",
+ expectFile: "./signtest-0.1.0.tgz",
+ },
+ {
+ name: "Chart fetch with version",
+ args: "test/signtest --version=0.1.0",
+ expectFile: "./signtest-0.1.0.tgz",
+ },
+ {
+ name: "Fail chart fetch with non-existent version",
+ args: "test/signtest --version=99.1.0",
+ wantError: true,
+ failExpect: "no such chart",
+ },
+ {
+ name: "Fail fetching non-existent chart",
+ args: "test/nosuchthing",
+ failExpect: "Failed to fetch",
+ wantError: true,
+ },
+ {
+ name: "Fetch and verify",
+ args: "test/signtest --verify --keyring testdata/helm-test-key.pub",
+ expectFile: "./signtest-0.1.0.tgz",
+ expectVerify: true,
+ expectSha: "sha256:e5ef611620fb97704d8751c16bab17fedb68883bfb0edc76f78a70e9173f9b55",
+ },
+ {
+ name: "Fetch and fail verify",
+ args: "test/reqtest --verify --keyring testdata/helm-test-key.pub",
+ failExpect: "Failed to fetch provenance",
+ wantError: true,
+ },
+ {
+ name: "Fetch and untar",
+ args: "test/signtest --untar --untardir signtest",
+ expectFile: "./signtest",
+ expectDir: true,
+ },
+ {
+ name: "Fetch untar when file with same name existed",
+ args: "test/test1 --untar --untardir test1",
+ existFile: "test1/test1",
+ wantError: true,
+ wantErrorMsg: fmt.Sprintf("failed to untar: a file or directory with the name %s already exists", filepath.Join(srv.Root(), "test1", "test1")),
+ },
+ {
+ name: "Fetch untar when dir with same name existed",
+ args: "test/test --untar --untardir test2",
+ existDir: "test2/test",
+ wantError: true,
+ wantErrorMsg: fmt.Sprintf("failed to untar: a file or directory with the name %s already exists", filepath.Join(srv.Root(), "test2", "test")),
+ },
+ {
+ name: "Fetch, verify, untar",
+ args: "test/signtest --verify --keyring=testdata/helm-test-key.pub --untar --untardir signtest2",
+ expectFile: "./signtest2",
+ expectDir: true,
+ expectVerify: true,
+ expectSha: "sha256:e5ef611620fb97704d8751c16bab17fedb68883bfb0edc76f78a70e9173f9b55",
+ },
+ {
+ name: "Chart fetch using repo URL",
+ expectFile: "./signtest-0.1.0.tgz",
+ args: "signtest --repo " + srv.URL(),
+ },
+ {
+ name: "Fail fetching non-existent chart on repo URL",
+ args: "someChart --repo " + srv.URL(),
+ failExpect: "Failed to fetch chart",
+ wantError: true,
+ },
+ {
+ name: "Specific version chart fetch using repo URL",
+ expectFile: "./signtest-0.1.0.tgz",
+ args: "signtest --version=0.1.0 --repo " + srv.URL(),
+ },
+ {
+ name: "Specific version chart fetch using repo URL",
+ args: "signtest --version=0.2.0 --repo " + srv.URL(),
+ failExpect: "Failed to fetch chart version",
+ wantError: true,
+ },
+ {
+ name: "Chart fetch using repo URL with untardir",
+ args: "signtest --version=0.1.0 --untar --untardir repo-url-test --repo " + srv.URL(),
+ expectFile: "./signtest",
+ expectDir: true,
+ },
+ {
+ name: "Chart fetch using repo URL with untardir and previous pull",
+ args: "signtest --version=0.1.0 --untar --untardir repo-url-test --repo " + srv.URL(),
+ failExpect: "failed to untar",
+ wantError: true,
+ },
+ {
+ name: "Fetch OCI Chart",
+ args: fmt.Sprintf("oci://%s/u/ocitestuser/oci-dependent-chart --version 0.1.0", ociSrv.RegistryURL),
+ expectFile: "./oci-dependent-chart-0.1.0.tgz",
+ },
+ {
+ name: "Fetch OCI Chart with untar",
+ args: fmt.Sprintf("oci://%s/u/ocitestuser/oci-dependent-chart --version 0.1.0 --untar", ociSrv.RegistryURL),
+ expectFile: "./oci-dependent-chart",
+ expectDir: true,
+ },
+ {
+ name: "Fetch OCI Chart with untar and untardir",
+ args: fmt.Sprintf("oci://%s/u/ocitestuser/oci-dependent-chart --version 0.1.0 --untar --untardir ocitest2", ociSrv.RegistryURL),
+ expectFile: "./ocitest2",
+ expectDir: true,
+ },
+ {
+ name: "OCI Fetch untar when dir with same name existed",
+ args: fmt.Sprintf("oci://%s/u/ocitestuser/oci-dependent-chart --version 0.1.0 --untar --untardir ocitest2", ociSrv.RegistryURL),
+ existDir: "ocitest2/oci-dependent-chart",
+ wantError: true,
+ wantErrorMsg: fmt.Sprintf("failed to untar: a file or directory with the name %s already exists", filepath.Join(srv.Root(), "ocitest2", "oci-dependent-chart")),
+ },
+ {
+ name: "Fail fetching non-existent OCI chart",
+ args: fmt.Sprintf("oci://%s/u/ocitestuser/nosuchthing --version 0.1.0", ociSrv.RegistryURL),
+ failExpect: "Failed to fetch",
+ wantError: true,
+ },
+ {
+ name: "Fail fetching OCI chart without version specified",
+ args: fmt.Sprintf("oci://%s/u/ocitestuser/nosuchthing", ociSrv.RegistryURL),
+ wantError: true,
+ },
+ {
+ name: "Fetching OCI chart without version option specified",
+ args: fmt.Sprintf("oci://%s/u/ocitestuser/oci-dependent-chart:0.1.0", ociSrv.RegistryURL),
+ expectFile: "./oci-dependent-chart-0.1.0.tgz",
+ },
+ {
+ name: "Fetching OCI chart with version specified",
+ args: fmt.Sprintf("oci://%s/u/ocitestuser/oci-dependent-chart:0.1.0 --version 0.1.0", ociSrv.RegistryURL),
+ expectFile: "./oci-dependent-chart-0.1.0.tgz",
+ },
+ {
+ name: "Fail fetching OCI chart with version mismatch",
+ args: fmt.Sprintf("oci://%s/u/ocitestuser/oci-dependent-chart:0.2.0 --version 0.1.0", ociSrv.RegistryURL),
+ wantErrorMsg: "chart reference and version mismatch: 0.1.0 is not 0.2.0",
+ wantError: true,
+ },
+ }
+
+ contentCache := t.TempDir()
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ outdir := srv.Root()
+ cmd := fmt.Sprintf("fetch %s -d '%s' --repository-config %s --repository-cache %s --registry-config %s --content-cache %s --plain-http",
+ tt.args,
+ outdir,
+ filepath.Join(outdir, "repositories.yaml"),
+ outdir,
+ filepath.Join(outdir, "config.json"),
+ contentCache,
+ )
+ // Create file or Dir before helm pull --untar, see: https://github.com/helm/helm/issues/7182
+ if tt.existFile != "" {
+ file := filepath.Join(outdir, tt.existFile)
+ if err := os.MkdirAll(filepath.Dir(file), 0755); err != nil {
+ t.Fatal(err)
+ }
+ _, err := os.Create(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ if tt.existDir != "" {
+ file := filepath.Join(outdir, tt.existDir)
+ err := os.MkdirAll(file, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ _, out, err := executeActionCommand(cmd)
+ if err != nil {
+ if tt.wantError {
+ if tt.wantErrorMsg != "" && tt.wantErrorMsg != err.Error() {
+ t.Fatalf("Actual error '%s', not equal to expected error '%s'", err, tt.wantErrorMsg)
+ }
+ return
+ }
+ t.Fatalf("%q reported error: %s", tt.name, err)
+ }
+
+ if tt.expectVerify {
+ outString := helmTestKeyOut + tt.expectSha + "\n"
+ if out != outString {
+ t.Errorf("%q: expected verification output %q, got %q", tt.name, outString, out)
+ }
+
+ }
+
+ ef := filepath.Join(outdir, tt.expectFile)
+ fi, err := os.Stat(ef)
+ if err != nil {
+ t.Errorf("%q: expected a file at %s. %s", tt.name, ef, err)
+ }
+ if fi.IsDir() != tt.expectDir {
+ t.Errorf("%q: expected directory=%t, but it's not.", tt.name, tt.expectDir)
+ }
+ })
+ }
+}
+
+// runPullTests is a helper function to run pull command tests with common logic
+func runPullTests(t *testing.T, tests []struct {
+ name string
+ args string
+ existFile string
+ existDir string
+ wantError bool
+ wantErrorMsg string
+ expectFile string
+ expectDir bool
+}, outdir string, additionalFlags string) {
+ t.Helper()
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cmd := fmt.Sprintf("pull %s -d '%s' --repository-config %s --repository-cache %s --registry-config %s %s",
+ tt.args,
+ outdir,
+ filepath.Join(outdir, "repositories.yaml"),
+ outdir,
+ filepath.Join(outdir, "config.json"),
+ additionalFlags,
+ )
+ // Create file or Dir before helm pull --untar, see: https://github.com/helm/helm/issues/7182
+ if tt.existFile != "" {
+ file := filepath.Join(outdir, tt.existFile)
+ _, err := os.Create(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ if tt.existDir != "" {
+ file := filepath.Join(outdir, tt.existDir)
+ err := os.MkdirAll(file, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ _, _, err := executeActionCommand(cmd)
+ if tt.wantError && err == nil {
+ t.Fatalf("%q: expected error but got none", tt.name)
+ }
+ if err != nil {
+ if tt.wantError {
+ if tt.wantErrorMsg != "" && tt.wantErrorMsg != err.Error() {
+ t.Fatalf("Actual error '%s', not equal to expected error '%s'", err, tt.wantErrorMsg)
+ }
+ return
+ }
+ t.Fatalf("%q reported error: %s", tt.name, err)
+ }
+
+ ef := filepath.Join(outdir, tt.expectFile)
+ fi, err := os.Stat(ef)
+ if err != nil {
+ t.Errorf("%q: expected a file at %s. %s", tt.name, ef, err)
+ }
+ if fi.IsDir() != tt.expectDir {
+ t.Errorf("%q: expected directory=%t, but it's not.", tt.name, tt.expectDir)
+ }
+ })
+ }
+}
+
+// buildOCIURL is a helper function to build OCI URLs with credentials
+func buildOCIURL(registryURL, chartName, version, username, password string) string {
+ baseURL := fmt.Sprintf("oci://%s/u/ocitestuser/%s", registryURL, chartName)
+ if version != "" {
+ baseURL += fmt.Sprintf(" --version %s", version)
+ }
+ if username != "" && password != "" {
+ baseURL += fmt.Sprintf(" --username %s --password %s", username, password)
+ }
+ return baseURL
+}
+
+func TestPullWithCredentialsCmd(t *testing.T) {
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testcharts/*.tgz*"),
+ repotest.WithMiddleware(repotest.BasicAuthMiddleware(t)),
+ )
+ defer srv.Stop()
+
+ srv2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.FileServer(http.Dir(srv.Root())).ServeHTTP(w, r)
+ }))
+ defer srv2.Close()
+
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ // all flags will get "-d outdir" appended.
+ tests := []struct {
+ name string
+ args string
+ existFile string
+ existDir string
+ wantError bool
+ wantErrorMsg string
+ expectFile string
+ expectDir bool
+ }{
+ {
+ name: "Chart fetch using repo URL",
+ expectFile: "./signtest-0.1.0.tgz",
+ args: "signtest --repo " + srv.URL() + " --username username --password password",
+ },
+ {
+ name: "Fail fetching non-existent chart on repo URL",
+ args: "someChart --repo " + srv.URL() + " --username username --password password",
+ wantError: true,
+ },
+ {
+ name: "Specific version chart fetch using repo URL",
+ expectFile: "./signtest-0.1.0.tgz",
+ args: "signtest --version=0.1.0 --repo " + srv.URL() + " --username username --password password",
+ },
+ {
+ name: "Specific version chart fetch using repo URL",
+ args: "signtest --version=0.2.0 --repo " + srv.URL() + " --username username --password password",
+ wantError: true,
+ },
+ {
+ name: "Chart located on different domain with credentials passed",
+ args: "reqtest --repo " + srv2.URL + " --username username --password password --pass-credentials",
+ expectFile: "./reqtest-0.1.0.tgz",
+ },
+ }
+
+ runPullTests(t, tests, srv.Root(), "")
+}
+
+func TestPullVersionCompletion(t *testing.T) {
+ repoFile := "testdata/helmhome/helm/repositories.yaml"
+ repoCache := "testdata/helmhome/helm/repository"
+
+ repoSetup := fmt.Sprintf("--repository-config %s --repository-cache %s", repoFile, repoCache)
+
+ tests := []cmdTestCase{{
+ name: "completion for pull version flag",
+ cmd: fmt.Sprintf("%s __complete pull testing/alpine --version ''", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for pull version flag, no filter",
+ cmd: fmt.Sprintf("%s __complete pull testing/alpine --version 0.3", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for pull version flag too few args",
+ cmd: fmt.Sprintf("%s __complete pull --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }, {
+ name: "completion for pull version flag too many args",
+ cmd: fmt.Sprintf("%s __complete pull testing/alpine badarg --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }, {
+ name: "completion for pull version flag invalid chart",
+ cmd: fmt.Sprintf("%s __complete pull invalid/invalid --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestPullWithCredentialsCmdOCIRegistry(t *testing.T) {
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testcharts/*.tgz*"),
+ )
+ defer srv.Stop()
+
+ ociSrv, err := repotest.NewOCIServer(t, srv.Root())
+ if err != nil {
+ t.Fatal(err)
+ }
+ ociSrv.Run(t)
+
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ // all flags will get "-d outdir" appended.
+ tests := []struct {
+ name string
+ args string
+ existFile string
+ existDir string
+ wantError bool
+ wantErrorMsg string
+ expectFile string
+ expectDir bool
+ }{
+ {
+ name: "OCI Chart fetch with credentials",
+ args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword),
+ expectFile: "./oci-dependent-chart-0.1.0.tgz",
+ },
+ {
+ name: "OCI Chart fetch with credentials and untar",
+ args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword) + " --untar",
+ expectFile: "./oci-dependent-chart",
+ expectDir: true,
+ },
+ {
+ name: "OCI Chart fetch with credentials and untardir",
+ args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword) + " --untar --untardir ocitest-credentials",
+ expectFile: "./ocitest-credentials",
+ expectDir: true,
+ },
+ {
+ name: "Fail fetching OCI chart with wrong credentials",
+ args: buildOCIURL(ociSrv.RegistryURL, "oci-dependent-chart", "0.1.0", "wronguser", "wrongpass"),
+ wantError: true,
+ },
+ {
+ name: "Fail fetching non-existent OCI chart with credentials",
+ args: buildOCIURL(ociSrv.RegistryURL, "nosuchthing", "0.1.0", ociSrv.TestUsername, ociSrv.TestPassword),
+ wantError: true,
+ },
+ {
+ name: "Fail fetching OCI chart without version specified",
+ args: buildOCIURL(ociSrv.RegistryURL, "nosuchthing", "", ociSrv.TestUsername, ociSrv.TestPassword),
+ wantError: true,
+ },
+ }
+
+ runPullTests(t, tests, srv.Root(), "--plain-http")
+}
+
+func TestPullFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "pull", false)
+ checkFileCompletion(t, "pull repo/chart", false)
+}
diff --git a/helm/pkg/cmd/push.go b/helm/pkg/cmd/push.go
new file mode 100644
index 000000000..f57a7c52f
--- /dev/null
+++ b/helm/pkg/cmd/push.go
@@ -0,0 +1,108 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/pusher"
+)
+
+const pushDesc = `
+Upload a chart to a registry.
+
+If the chart has an associated provenance file,
+it will also be uploaded.
+`
+
+type registryPushOptions struct {
+ certFile string
+ keyFile string
+ caFile string
+ insecureSkipTLSVerify bool
+ plainHTTP bool
+ password string
+ username string
+}
+
+func newPushCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ o := ®istryPushOptions{}
+
+ cmd := &cobra.Command{
+ Use: "push [chart] [remote]",
+ Short: "push a chart to remote",
+ Long: pushDesc,
+ Args: require.MinimumNArgs(2),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 0 {
+ // Do file completion for the chart file to push
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ if len(args) == 1 {
+ providers := []pusher.Provider(pusher.All(settings))
+ var comps []string
+ for _, p := range providers {
+ for _, scheme := range p.Schemes {
+ comps = append(comps, fmt.Sprintf("%s://", scheme))
+ }
+ }
+ return comps, cobra.ShellCompDirectiveNoFileComp | cobra.ShellCompDirectiveNoSpace
+ }
+ return noMoreArgsComp()
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ registryClient, err := newRegistryClient(
+ o.certFile, o.keyFile, o.caFile, o.insecureSkipTLSVerify, o.plainHTTP, o.username, o.password,
+ )
+
+ if err != nil {
+ return fmt.Errorf("missing registry client: %w", err)
+ }
+ cfg.RegistryClient = registryClient
+ chartRef := args[0]
+ remote := args[1]
+ client := action.NewPushWithOpts(action.WithPushConfig(cfg),
+ action.WithTLSClientConfig(o.certFile, o.keyFile, o.caFile),
+ action.WithInsecureSkipTLSVerify(o.insecureSkipTLSVerify),
+ action.WithPlainHTTP(o.plainHTTP),
+ action.WithPushOptWriter(out))
+ client.Settings = settings
+ output, err := client.Run(chartRef, remote)
+ if err != nil {
+ return err
+ }
+ fmt.Fprint(out, output)
+ return nil
+ },
+ }
+
+ f := cmd.Flags()
+ f.StringVar(&o.certFile, "cert-file", "", "identify registry client using this SSL certificate file")
+ f.StringVar(&o.keyFile, "key-file", "", "identify registry client using this SSL key file")
+ f.StringVar(&o.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
+ f.BoolVar(&o.insecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the chart upload")
+ f.BoolVar(&o.plainHTTP, "plain-http", false, "use insecure HTTP connections for the chart upload")
+ f.StringVar(&o.username, "username", "", "chart repository username where to locate the requested chart")
+ f.StringVar(&o.password, "password", "", "chart repository password where to locate the requested chart")
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/push_test.go b/helm/pkg/cmd/push_test.go
new file mode 100644
index 000000000..80d08b48f
--- /dev/null
+++ b/helm/pkg/cmd/push_test.go
@@ -0,0 +1,27 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+)
+
+func TestPushFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "push", true)
+ checkFileCompletion(t, "push package.tgz", false)
+ checkFileCompletion(t, "push package.tgz oci://localhost:5000", false)
+}
diff --git a/helm/pkg/cmd/registry.go b/helm/pkg/cmd/registry.go
new file mode 100644
index 000000000..fcd06f13b
--- /dev/null
+++ b/helm/pkg/cmd/registry.go
@@ -0,0 +1,41 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "io"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+)
+
+const registryHelp = `
+This command consists of multiple subcommands to interact with registries.
+`
+
+func newRegistryCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "registry",
+ Short: "login to or logout from a registry",
+ Long: registryHelp,
+ }
+ cmd.AddCommand(
+ newRegistryLoginCmd(cfg, out),
+ newRegistryLogoutCmd(cfg, out),
+ )
+ return cmd
+}
diff --git a/helm/pkg/cmd/registry_login.go b/helm/pkg/cmd/registry_login.go
new file mode 100644
index 000000000..1350fb244
--- /dev/null
+++ b/helm/pkg/cmd/registry_login.go
@@ -0,0 +1,159 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "os"
+ "strings"
+
+ "github.com/moby/term"
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const registryLoginDesc = `
+Authenticate to a remote registry.
+
+For example for Github Container Registry:
+
+ echo "$GITHUB_TOKEN" | helm registry login ghcr.io -u $GITHUB_USER --password-stdin
+`
+
+type registryLoginOptions struct {
+ username string
+ password string
+ passwordFromStdinOpt bool
+ certFile string
+ keyFile string
+ caFile string
+ insecure bool
+ plainHTTP bool
+}
+
+func newRegistryLoginCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ o := ®istryLoginOptions{}
+
+ cmd := &cobra.Command{
+ Use: "login [host]",
+ Short: "login to a registry",
+ Long: registryLoginDesc,
+ Args: require.MinimumNArgs(1),
+ ValidArgsFunction: cobra.NoFileCompletions,
+ RunE: func(_ *cobra.Command, args []string) error {
+ hostname := args[0]
+
+ username, password, err := getUsernamePassword(o.username, o.password, o.passwordFromStdinOpt)
+ if err != nil {
+ return err
+ }
+
+ return action.NewRegistryLogin(cfg).Run(out, hostname, username, password,
+ action.WithCertFile(o.certFile),
+ action.WithKeyFile(o.keyFile),
+ action.WithCAFile(o.caFile),
+ action.WithInsecure(o.insecure),
+ action.WithPlainHTTPLogin(o.plainHTTP))
+ },
+ }
+
+ f := cmd.Flags()
+ f.StringVarP(&o.username, "username", "u", "", "registry username")
+ f.StringVarP(&o.password, "password", "p", "", "registry password or identity token")
+ f.BoolVarP(&o.passwordFromStdinOpt, "password-stdin", "", false, "read password or identity token from stdin")
+ f.BoolVarP(&o.insecure, "insecure", "", false, "allow connections to TLS registry without certs")
+ f.StringVar(&o.certFile, "cert-file", "", "identify registry client using this SSL certificate file")
+ f.StringVar(&o.keyFile, "key-file", "", "identify registry client using this SSL key file")
+ f.StringVar(&o.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
+ f.BoolVar(&o.plainHTTP, "plain-http", false, "use insecure HTTP connections for the chart upload")
+
+ return cmd
+}
+
+// Adapted from https://github.com/oras-project/oras
+func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStdinOpt bool) (string, string, error) {
+ var err error
+ username := usernameOpt
+ password := passwordOpt
+
+ if passwordFromStdinOpt {
+ passwordFromStdin, err := io.ReadAll(os.Stdin)
+ if err != nil {
+ return "", "", err
+ }
+ password = strings.TrimSuffix(string(passwordFromStdin), "\n")
+ password = strings.TrimSuffix(password, "\r")
+ } else if password == "" {
+ if username == "" {
+ username, err = readLine("Username: ", false)
+ if err != nil {
+ return "", "", err
+ }
+ username = strings.TrimSpace(username)
+ }
+ if username == "" {
+ password, err = readLine("Token: ", true)
+ if err != nil {
+ return "", "", err
+ } else if password == "" {
+ return "", "", errors.New("token required")
+ }
+ } else {
+ password, err = readLine("Password: ", true)
+ if err != nil {
+ return "", "", err
+ } else if password == "" {
+ return "", "", errors.New("password required")
+ }
+ }
+ } else {
+ slog.Warn("using --password via the CLI is insecure. Use --password-stdin")
+ }
+
+ return username, password, nil
+}
+
+// Copied/adapted from https://github.com/oras-project/oras
+func readLine(prompt string, silent bool) (string, error) {
+ fmt.Print(prompt)
+ if silent {
+ fd := os.Stdin.Fd()
+ state, err := term.SaveState(fd)
+ if err != nil {
+ return "", err
+ }
+ term.DisableEcho(fd, state)
+ defer term.RestoreTerminal(fd, state)
+ }
+
+ reader := bufio.NewReader(os.Stdin)
+ line, _, err := reader.ReadLine()
+ if err != nil {
+ return "", err
+ }
+ if silent {
+ fmt.Println()
+ }
+
+ return string(line), nil
+}
diff --git a/helm/pkg/cmd/registry_login_test.go b/helm/pkg/cmd/registry_login_test.go
new file mode 100644
index 000000000..6e4f2116e
--- /dev/null
+++ b/helm/pkg/cmd/registry_login_test.go
@@ -0,0 +1,25 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+)
+
+func TestRegistryLoginFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "registry login", false)
+}
diff --git a/helm/pkg/cmd/registry_logout.go b/helm/pkg/cmd/registry_logout.go
new file mode 100644
index 000000000..300453705
--- /dev/null
+++ b/helm/pkg/cmd/registry_logout.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "io"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const registryLogoutDesc = `
+Remove credentials stored for a remote registry.
+`
+
+func newRegistryLogoutCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ return &cobra.Command{
+ Use: "logout [host]",
+ Short: "logout from a registry",
+ Long: registryLogoutDesc,
+ Args: require.MinimumNArgs(1),
+ ValidArgsFunction: cobra.NoFileCompletions,
+ RunE: func(_ *cobra.Command, args []string) error {
+ hostname := args[0]
+ return action.NewRegistryLogout(cfg).Run(out, hostname)
+ },
+ }
+}
diff --git a/helm/pkg/cmd/registry_logout_test.go b/helm/pkg/cmd/registry_logout_test.go
new file mode 100644
index 000000000..31a21b277
--- /dev/null
+++ b/helm/pkg/cmd/registry_logout_test.go
@@ -0,0 +1,25 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+)
+
+func TestRegistryLogoutFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "registry logout", false)
+}
diff --git a/helm/pkg/cmd/release_testing.go b/helm/pkg/cmd/release_testing.go
new file mode 100644
index 000000000..5a6159e7d
--- /dev/null
+++ b/helm/pkg/cmd/release_testing.go
@@ -0,0 +1,117 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const releaseTestHelp = `
+The test command runs the tests for a release.
+
+The argument this command takes is the name of a deployed release.
+The tests to be run are defined in the chart that was installed.
+`
+
+func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewReleaseTesting(cfg)
+ outfmt := output.Table
+ var outputLogs bool
+ var filter []string
+
+ cmd := &cobra.Command{
+ Use: "test [RELEASE]",
+ Short: "run tests for a release",
+ Long: releaseTestHelp,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return noMoreArgsComp()
+ }
+ return compListReleases(toComplete, args, cfg)
+ },
+ RunE: func(_ *cobra.Command, args []string) (returnError error) {
+ client.Namespace = settings.Namespace()
+ notName := regexp.MustCompile(`^!\s?name=`)
+ for _, f := range filter {
+ if after, ok := strings.CutPrefix(f, "name="); ok {
+ client.Filters[action.IncludeNameFilter] = append(client.Filters[action.IncludeNameFilter], after)
+ } else if notName.MatchString(f) {
+ client.Filters[action.ExcludeNameFilter] = append(client.Filters[action.ExcludeNameFilter], notName.ReplaceAllLiteralString(f, ""))
+ }
+ }
+
+ reli, shutdown, runErr := client.Run(args[0])
+ defer func() {
+ if shutdownErr := shutdown(); shutdownErr != nil {
+ if returnError == nil {
+ returnError = shutdownErr
+ }
+ }
+ }()
+
+ // We only return an error if we weren't even able to get the
+ // release, otherwise we keep going so we can print status and logs
+ // if requested
+ if runErr != nil && reli == nil {
+ return runErr
+ }
+ rel, err := releaserToV1Release(reli)
+ if err != nil {
+ return err
+ }
+
+ if err := outfmt.Write(out, &statusPrinter{
+ release: rel,
+ debug: settings.Debug,
+ showMetadata: false,
+ hideNotes: true,
+ noColor: settings.ShouldDisableColor(),
+ }); err != nil {
+ return err
+ }
+
+ if outputLogs {
+ // Print a newline to stdout to separate the output
+ fmt.Fprintln(out)
+ if err := client.GetPodLogs(out, rel); err != nil {
+ return errors.Join(runErr, err)
+ }
+ }
+
+ return runErr
+ },
+ }
+
+ f := cmd.Flags()
+ f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
+ f.BoolVar(&outputLogs, "logs", false, "dump the logs from test pods (this runs after all tests are complete, but before any cleanup)")
+ f.StringSliceVar(&filter, "filter", []string{}, "specify tests by attribute (currently \"name\") using attribute=value syntax or '!attribute=value' to exclude a test (can specify multiple or separate values with commas: name=test1,name=test2)")
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/release_testing_test.go b/helm/pkg/cmd/release_testing_test.go
new file mode 100644
index 000000000..fdb5df1e9
--- /dev/null
+++ b/helm/pkg/cmd/release_testing_test.go
@@ -0,0 +1,81 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "io"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ rcommon "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestReleaseTestingCompletion(t *testing.T) {
+ checkReleaseCompletion(t, "test", false)
+}
+
+func TestReleaseTestingFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "test", false)
+ checkFileCompletion(t, "test myrelease", false)
+}
+
+func TestReleaseTestNotesHandling(t *testing.T) {
+ // Test that ensures notes behavior is correct for test command
+ // This is a simpler test that focuses on the core functionality
+
+ rel := &release.Release{
+ Name: "test-release",
+ Namespace: "default",
+ Info: &release.Info{
+ Status: rcommon.StatusDeployed,
+ Notes: "Some important notes that should be hidden by default",
+ },
+ Chart: &chart.Chart{Metadata: &chart.Metadata{Name: "test", Version: "1.0.0"}},
+ }
+
+ // Set up storage
+ store := storageFixture()
+ store.Create(rel)
+
+ // Set up action configuration properly
+ actionConfig := &action.Configuration{
+ Releases: store,
+ KubeClient: &kubefake.FailingKubeClient{PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}},
+ Capabilities: common.DefaultCapabilities,
+ }
+
+ // Test the newReleaseTestCmd function directly
+ var buf1 bytes.Buffer
+
+ // Test 1: Default behavior (should hide notes)
+ cmd1 := newReleaseTestCmd(actionConfig, &buf1)
+ cmd1.SetArgs([]string{"test-release"})
+ err1 := cmd1.Execute()
+ if err1 != nil {
+ t.Fatalf("Unexpected error for default test: %v", err1)
+ }
+ output1 := buf1.String()
+ if strings.Contains(output1, "NOTES:") {
+ t.Errorf("Expected notes to be hidden by default, but found NOTES section in output: %s", output1)
+ }
+}
diff --git a/helm/pkg/cmd/repo.go b/helm/pkg/cmd/repo.go
new file mode 100644
index 000000000..0dc2a7175
--- /dev/null
+++ b/helm/pkg/cmd/repo.go
@@ -0,0 +1,54 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "io"
+ "io/fs"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+var repoHelm = `
+This command consists of multiple subcommands to interact with chart repositories.
+
+It can be used to add, remove, list, and index chart repositories.
+`
+
+func newRepoCmd(out io.Writer) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "repo add|remove|list|index|update [ARGS]",
+ Short: "add, list, remove, update, and index chart repositories",
+ Long: repoHelm,
+ Args: require.NoArgs,
+ }
+
+ cmd.AddCommand(newRepoAddCmd(out))
+ cmd.AddCommand(newRepoListCmd(out))
+ cmd.AddCommand(newRepoRemoveCmd(out))
+ cmd.AddCommand(newRepoIndexCmd(out))
+ cmd.AddCommand(newRepoUpdateCmd(out))
+
+ return cmd
+}
+
+func isNotExist(err error) bool {
+ return errors.Is(err, fs.ErrNotExist)
+}
diff --git a/helm/pkg/cmd/repo_add.go b/helm/pkg/cmd/repo_add.go
new file mode 100644
index 000000000..3fc1a7249
--- /dev/null
+++ b/helm/pkg/cmd/repo_add.go
@@ -0,0 +1,223 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/gofrs/flock"
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+// Repositories that have been permanently deleted and no longer work
+var deprecatedRepos = map[string]string{
+ "//kubernetes-charts.storage.googleapis.com": "https://charts.helm.sh/stable",
+ "//kubernetes-charts-incubator.storage.googleapis.com": "https://charts.helm.sh/incubator",
+}
+
+type repoAddOptions struct {
+ name string
+ url string
+ username string
+ password string
+ passwordFromStdinOpt bool
+ passCredentialsAll bool
+ forceUpdate bool
+ allowDeprecatedRepos bool
+ timeout time.Duration
+
+ certFile string
+ keyFile string
+ caFile string
+ insecureSkipTLSVerify bool
+
+ repoFile string
+ repoCache string
+}
+
+func newRepoAddCmd(out io.Writer) *cobra.Command {
+ o := &repoAddOptions{}
+
+ cmd := &cobra.Command{
+ Use: "add [NAME] [URL]",
+ Short: "add a chart repository",
+ Args: require.ExactArgs(2),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) {
+ if len(args) > 1 {
+ return noMoreArgsComp()
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ o.name = args[0]
+ o.url = args[1]
+ o.repoFile = settings.RepositoryConfig
+ o.repoCache = settings.RepositoryCache
+
+ return o.run(out)
+ },
+ }
+
+ f := cmd.Flags()
+ f.StringVar(&o.username, "username", "", "chart repository username")
+ f.StringVar(&o.password, "password", "", "chart repository password")
+ f.BoolVarP(&o.passwordFromStdinOpt, "password-stdin", "", false, "read chart repository password from stdin")
+ f.BoolVar(&o.forceUpdate, "force-update", false, "replace (overwrite) the repo if it already exists")
+ f.StringVar(&o.certFile, "cert-file", "", "identify HTTPS client using this SSL certificate file")
+ f.StringVar(&o.keyFile, "key-file", "", "identify HTTPS client using this SSL key file")
+ f.StringVar(&o.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
+ f.BoolVar(&o.insecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the repository")
+ f.BoolVar(&o.allowDeprecatedRepos, "allow-deprecated-repos", false, "by default, this command will not allow adding official repos that have been permanently deleted. This disables that behavior")
+ f.BoolVar(&o.passCredentialsAll, "pass-credentials", false, "pass credentials to all domains")
+ f.DurationVar(&o.timeout, "timeout", getter.DefaultHTTPTimeout*time.Second, "time to wait for the index file download to complete")
+
+ return cmd
+}
+
+func (o *repoAddOptions) run(out io.Writer) error {
+ // Block deprecated repos
+ if !o.allowDeprecatedRepos {
+ for oldURL, newURL := range deprecatedRepos {
+ if strings.Contains(o.url, oldURL) {
+ return fmt.Errorf("repo %q is no longer available; try %q instead", o.url, newURL)
+ }
+ }
+ }
+
+ // Ensure the file directory exists as it is required for file locking
+ err := os.MkdirAll(filepath.Dir(o.repoFile), os.ModePerm)
+ if err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ // Acquire a file lock for process synchronization
+ repoFileExt := filepath.Ext(o.repoFile)
+ var lockPath string
+ if len(repoFileExt) > 0 && len(repoFileExt) < len(o.repoFile) {
+ lockPath = strings.TrimSuffix(o.repoFile, repoFileExt) + ".lock"
+ } else {
+ lockPath = o.repoFile + ".lock"
+ }
+ fileLock := flock.New(lockPath)
+ lockCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ locked, err := fileLock.TryLockContext(lockCtx, time.Second)
+ if err == nil && locked {
+ defer fileLock.Unlock()
+ }
+ if err != nil {
+ return err
+ }
+
+ b, err := os.ReadFile(o.repoFile)
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return err
+ }
+
+ var f repo.File
+ if err := yaml.Unmarshal(b, &f); err != nil {
+ return err
+ }
+
+ if o.username != "" && o.password == "" {
+ if o.passwordFromStdinOpt {
+ passwordFromStdin, err := io.ReadAll(os.Stdin)
+ if err != nil {
+ return err
+ }
+ password := strings.TrimSuffix(string(passwordFromStdin), "\n")
+ password = strings.TrimSuffix(password, "\r")
+ o.password = password
+ } else {
+ fd := int(os.Stdin.Fd())
+ fmt.Fprint(out, "Password: ")
+ password, err := term.ReadPassword(fd)
+ fmt.Fprintln(out)
+ if err != nil {
+ return err
+ }
+ o.password = string(password)
+ }
+ }
+
+ c := repo.Entry{
+ Name: o.name,
+ URL: o.url,
+ Username: o.username,
+ Password: o.password,
+ PassCredentialsAll: o.passCredentialsAll,
+ CertFile: o.certFile,
+ KeyFile: o.keyFile,
+ CAFile: o.caFile,
+ InsecureSkipTLSVerify: o.insecureSkipTLSVerify,
+ }
+
+ // Check if the repo name is legal
+ if strings.Contains(o.name, "/") {
+ return fmt.Errorf("repository name (%s) contains '/', please specify a different name without '/'", o.name)
+ }
+
+ // If the repo exists do one of two things:
+ // 1. If the configuration for the name is the same continue without error
+ // 2. When the config is different require --force-update
+ if !o.forceUpdate && f.Has(o.name) {
+ existing := f.Get(o.name)
+ if c != *existing {
+ // The input coming in for the name is different from what is already
+ // configured. Return an error.
+ return fmt.Errorf("repository name (%s) already exists, please specify a different name", o.name)
+ }
+
+ // The add is idempotent so do nothing
+ fmt.Fprintf(out, "%q already exists with the same configuration, skipping\n", o.name)
+ return nil
+ }
+
+ r, err := repo.NewChartRepository(&c, getter.All(settings, getter.WithTimeout(o.timeout)))
+ if err != nil {
+ return err
+ }
+
+ if o.repoCache != "" {
+ r.CachePath = o.repoCache
+ }
+ if _, err := r.DownloadIndexFile(); err != nil {
+ return fmt.Errorf("looks like %q is not a valid chart repository or cannot be reached: %w", o.url, err)
+ }
+
+ f.Update(&c)
+
+ if err := f.WriteFile(o.repoFile, 0o600); err != nil {
+ return err
+ }
+ fmt.Fprintf(out, "%q has been added to your repositories\n", o.name)
+ return nil
+}
diff --git a/helm/pkg/cmd/repo_add_test.go b/helm/pkg/cmd/repo_add_test.go
new file mode 100644
index 000000000..df9451d34
--- /dev/null
+++ b/helm/pkg/cmd/repo_add_test.go
@@ -0,0 +1,276 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "testing"
+
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/helmpath/xdg"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
+)
+
+func TestRepoAddCmd(t *testing.T) {
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testserver/*.*"),
+ )
+ defer srv.Stop()
+
+ // A second test server is setup to verify URL changing
+ srv2 := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testserver/*.*"),
+ )
+ defer srv2.Stop()
+
+ tmpdir := filepath.Join(t.TempDir(), "path-component.yaml/data")
+ if err := os.MkdirAll(tmpdir, 0o777); err != nil {
+ t.Fatal(err)
+ }
+ repoFile := filepath.Join(tmpdir, "repositories.yaml")
+
+ tests := []cmdTestCase{
+ {
+ name: "add a repository",
+ cmd: fmt.Sprintf("repo add test-name %s --repository-config %s --repository-cache %s", srv.URL(), repoFile, tmpdir),
+ golden: "output/repo-add.txt",
+ },
+ {
+ name: "add repository second time",
+ cmd: fmt.Sprintf("repo add test-name %s --repository-config %s --repository-cache %s", srv.URL(), repoFile, tmpdir),
+ golden: "output/repo-add2.txt",
+ },
+ {
+ name: "add repository different url",
+ cmd: fmt.Sprintf("repo add test-name %s --repository-config %s --repository-cache %s", srv2.URL(), repoFile, tmpdir),
+ wantError: true,
+ },
+ {
+ name: "add repository second time",
+ cmd: fmt.Sprintf("repo add test-name %s --repository-config %s --repository-cache %s --force-update", srv2.URL(), repoFile, tmpdir),
+ golden: "output/repo-add.txt",
+ },
+ }
+
+ runTestCmd(t, tests)
+}
+
+func TestRepoAdd(t *testing.T) {
+ ts := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testserver/*.*"),
+ )
+ defer ts.Stop()
+
+ rootDir := t.TempDir()
+ repoFile := filepath.Join(rootDir, "repositories.yaml")
+
+ const testRepoName = "test-name"
+
+ o := &repoAddOptions{
+ name: testRepoName,
+ url: ts.URL(),
+ forceUpdate: false,
+ repoFile: repoFile,
+ }
+ t.Setenv(xdg.CacheHomeEnvVar, rootDir)
+
+ if err := o.run(io.Discard); err != nil {
+ t.Error(err)
+ }
+
+ f, err := repo.LoadFile(repoFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !f.Has(testRepoName) {
+ t.Errorf("%s was not successfully inserted into %s", testRepoName, repoFile)
+ }
+
+ idx := filepath.Join(helmpath.CachePath("repository"), helmpath.CacheIndexFile(testRepoName))
+ if _, err := os.Stat(idx); errors.Is(err, fs.ErrNotExist) {
+ t.Errorf("Error cache index file was not created for repository %s", testRepoName)
+ }
+ idx = filepath.Join(helmpath.CachePath("repository"), helmpath.CacheChartsFile(testRepoName))
+ if _, err := os.Stat(idx); errors.Is(err, fs.ErrNotExist) {
+ t.Errorf("Error cache charts file was not created for repository %s", testRepoName)
+ }
+
+ o.forceUpdate = true
+
+ if err := o.run(io.Discard); err != nil {
+ t.Errorf("Repository was not updated: %s", err)
+ }
+
+ if err := o.run(io.Discard); err != nil {
+ t.Errorf("Duplicate repository name was added")
+ }
+}
+
+func TestRepoAddCheckLegalName(t *testing.T) {
+ ts := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testserver/*.*"),
+ )
+ defer ts.Stop()
+ defer resetEnv()()
+
+ const testRepoName = "test-hub/test-name"
+
+ rootDir := t.TempDir()
+ repoFile := filepath.Join(t.TempDir(), "repositories.yaml")
+
+ o := &repoAddOptions{
+ name: testRepoName,
+ url: ts.URL(),
+ forceUpdate: false,
+ repoFile: repoFile,
+ }
+ t.Setenv(xdg.CacheHomeEnvVar, rootDir)
+
+ wantErrorMsg := fmt.Sprintf("repository name (%s) contains '/', please specify a different name without '/'", testRepoName)
+
+ if err := o.run(io.Discard); err != nil {
+ if wantErrorMsg != err.Error() {
+ t.Fatalf("Actual error %s, not equal to expected error %s", err, wantErrorMsg)
+ }
+ } else {
+ t.Fatalf("expect reported an error.")
+ }
+}
+
+func TestRepoAddConcurrentGoRoutines(t *testing.T) {
+ const testName = "test-name"
+ repoFile := filepath.Join(t.TempDir(), "repositories.yaml")
+ repoAddConcurrent(t, testName, repoFile)
+}
+
+func TestRepoAddConcurrentDirNotExist(t *testing.T) {
+ const testName = "test-name-2"
+ repoFile := filepath.Join(t.TempDir(), "foo", "repositories.yaml")
+ repoAddConcurrent(t, testName, repoFile)
+}
+
+func TestRepoAddConcurrentNoFileExtension(t *testing.T) {
+ const testName = "test-name-3"
+ repoFile := filepath.Join(t.TempDir(), "repositories")
+ repoAddConcurrent(t, testName, repoFile)
+}
+
+func TestRepoAddConcurrentHiddenFile(t *testing.T) {
+ const testName = "test-name-4"
+ repoFile := filepath.Join(t.TempDir(), ".repositories")
+ repoAddConcurrent(t, testName, repoFile)
+}
+
+func repoAddConcurrent(t *testing.T, testName, repoFile string) {
+ t.Helper()
+ ts := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testserver/*.*"),
+ )
+ defer ts.Stop()
+
+ var wg sync.WaitGroup
+ wg.Add(3)
+ for i := range 3 {
+ go func(name string) {
+ defer wg.Done()
+ o := &repoAddOptions{
+ name: name,
+ url: ts.URL(),
+ forceUpdate: false,
+ repoFile: repoFile,
+ }
+ if err := o.run(io.Discard); err != nil {
+ t.Error(err)
+ }
+ }(fmt.Sprintf("%s-%d", testName, i))
+ }
+ wg.Wait()
+
+ b, err := os.ReadFile(repoFile)
+ if err != nil {
+ t.Error(err)
+ }
+
+ var f repo.File
+ if err := yaml.Unmarshal(b, &f); err != nil {
+ t.Error(err)
+ }
+
+ var name string
+ for i := range 3 {
+ name = fmt.Sprintf("%s-%d", testName, i)
+ if !f.Has(name) {
+ t.Errorf("%s was not successfully inserted into %s: %s", name, repoFile, f.Repositories[0])
+ }
+ }
+}
+
+func TestRepoAddFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "repo add", false)
+ checkFileCompletion(t, "repo add reponame", false)
+ checkFileCompletion(t, "repo add reponame https://example.com", false)
+}
+
+func TestRepoAddWithPasswordFromStdin(t *testing.T) {
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testserver/*.*"),
+ repotest.WithMiddleware(repotest.BasicAuthMiddleware(t)),
+ )
+ defer srv.Stop()
+
+ defer resetEnv()()
+
+ in, err := os.Open("testdata/password")
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ tmpdir := t.TempDir()
+ repoFile := filepath.Join(tmpdir, "repositories.yaml")
+
+ store := storageFixture()
+
+ const testName = "test-name"
+ const username = "username"
+ cmd := fmt.Sprintf("repo add %s %s --repository-config %s --repository-cache %s --username %s --password-stdin", testName, srv.URL(), repoFile, tmpdir, username)
+ var result string
+ _, result, err = executeActionCommandStdinC(store, in, cmd)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ if !strings.Contains(result, fmt.Sprintf("\"%s\" has been added to your repositories", testName)) {
+ t.Errorf("Repo was not successfully added. Output: %s", result)
+ }
+}
diff --git a/helm/pkg/cmd/repo_index.go b/helm/pkg/cmd/repo_index.go
new file mode 100644
index 000000000..ece0ce811
--- /dev/null
+++ b/helm/pkg/cmd/repo_index.go
@@ -0,0 +1,122 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+const repoIndexDesc = `
+Read the current directory, generate an index file based on the charts found
+and write the result to 'index.yaml' in the current directory.
+
+This tool is used for creating an 'index.yaml' file for a chart repository. To
+set an absolute URL to the charts, use '--url' flag.
+
+To merge the generated index with an existing index file, use the '--merge'
+flag. In this case, the charts found in the current directory will be merged
+into the index passed in with --merge, with local charts taking priority over
+existing charts.
+`
+
+type repoIndexOptions struct {
+ dir string
+ url string
+ merge string
+ json bool
+}
+
+func newRepoIndexCmd(out io.Writer) *cobra.Command {
+ o := &repoIndexOptions{}
+
+ cmd := &cobra.Command{
+ Use: "index [DIR]",
+ Short: "generate an index file given a directory containing packaged charts",
+ Long: repoIndexDesc,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 0 {
+ // Allow file completion when completing the argument for the directory
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ // No more completions, so disable file completion
+ return noMoreArgsComp()
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ o.dir = args[0]
+ return o.run(out)
+ },
+ }
+
+ f := cmd.Flags()
+ f.StringVar(&o.url, "url", "", "url of chart repository")
+ f.StringVar(&o.merge, "merge", "", "merge the generated index into the given index")
+ f.BoolVar(&o.json, "json", false, "output in JSON format")
+
+ return cmd
+}
+
+func (i *repoIndexOptions) run(_ io.Writer) error {
+ path, err := filepath.Abs(i.dir)
+ if err != nil {
+ return err
+ }
+
+ return index(path, i.url, i.merge, i.json)
+}
+
+func index(dir, url, mergeTo string, json bool) error {
+ out := filepath.Join(dir, "index.yaml")
+
+ i, err := repo.IndexDirectory(dir, url)
+ if err != nil {
+ return err
+ }
+ if mergeTo != "" {
+ // if index.yaml is missing then create an empty one to merge into
+ var i2 *repo.IndexFile
+ if _, err := os.Stat(mergeTo); errors.Is(err, fs.ErrNotExist) {
+ i2 = repo.NewIndexFile()
+ writeIndexFile(i2, mergeTo, json)
+ } else {
+ i2, err = repo.LoadIndexFile(mergeTo)
+ if err != nil {
+ return fmt.Errorf("merge failed: %w", err)
+ }
+ }
+ i.Merge(i2)
+ }
+ i.SortEntries()
+ return writeIndexFile(i, out, json)
+}
+
+func writeIndexFile(i *repo.IndexFile, out string, json bool) error {
+ if json {
+ return i.WriteJSONFile(out, 0o644)
+ }
+ return i.WriteFile(out, 0o644)
+}
diff --git a/helm/pkg/cmd/repo_index_test.go b/helm/pkg/cmd/repo_index_test.go
new file mode 100644
index 000000000..c8959f21e
--- /dev/null
+++ b/helm/pkg/cmd/repo_index_test.go
@@ -0,0 +1,194 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+func TestRepoIndexCmd(t *testing.T) {
+
+ dir := t.TempDir()
+
+ comp := filepath.Join(dir, "compressedchart-0.1.0.tgz")
+ if err := linkOrCopy("testdata/testcharts/compressedchart-0.1.0.tgz", comp); err != nil {
+ t.Fatal(err)
+ }
+ comp2 := filepath.Join(dir, "compressedchart-0.2.0.tgz")
+ if err := linkOrCopy("testdata/testcharts/compressedchart-0.2.0.tgz", comp2); err != nil {
+ t.Fatal(err)
+ }
+
+ buf := bytes.NewBuffer(nil)
+ c := newRepoIndexCmd(buf)
+
+ if err := c.RunE(c, []string{dir}); err != nil {
+ t.Error(err)
+ }
+
+ destIndex := filepath.Join(dir, "index.yaml")
+
+ index, err := repo.LoadIndexFile(destIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(index.Entries) != 1 {
+ t.Errorf("expected 1 entry, got %d: %#v", len(index.Entries), index.Entries)
+ }
+
+ vs := index.Entries["compressedchart"]
+ if len(vs) != 2 {
+ t.Errorf("expected 2 versions, got %d: %#v", len(vs), vs)
+ }
+
+ expectedVersion := "0.2.0"
+ if vs[0].Version != expectedVersion {
+ t.Errorf("expected %q, got %q", expectedVersion, vs[0].Version)
+ }
+
+ b, err := os.ReadFile(destIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if json.Valid(b) {
+ t.Error("did not expect index file to be valid json")
+ }
+
+ // Test with `--json`
+
+ c.ParseFlags([]string{"--json", "true"})
+ if err := c.RunE(c, []string{dir}); err != nil {
+ t.Error(err)
+ }
+
+ if b, err = os.ReadFile(destIndex); err != nil {
+ t.Fatal(err)
+ }
+ if !json.Valid(b) {
+ t.Error("index file is not valid json")
+ }
+
+ // Test with `--merge`
+
+ // Remove first two charts.
+ if err := os.Remove(comp); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Remove(comp2); err != nil {
+ t.Fatal(err)
+ }
+ // Add a new chart and a new version of an existing chart
+ if err := linkOrCopy("testdata/testcharts/reqtest-0.1.0.tgz", filepath.Join(dir, "reqtest-0.1.0.tgz")); err != nil {
+ t.Fatal(err)
+ }
+ if err := linkOrCopy("testdata/testcharts/compressedchart-0.3.0.tgz", filepath.Join(dir, "compressedchart-0.3.0.tgz")); err != nil {
+ t.Fatal(err)
+ }
+
+ c.ParseFlags([]string{"--merge", destIndex})
+ if err := c.RunE(c, []string{dir}); err != nil {
+ t.Error(err)
+ }
+
+ index, err = repo.LoadIndexFile(destIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(index.Entries) != 2 {
+ t.Errorf("expected 2 entries, got %d: %#v", len(index.Entries), index.Entries)
+ }
+
+ vs = index.Entries["compressedchart"]
+ if len(vs) != 3 {
+ t.Errorf("expected 3 versions, got %d: %#v", len(vs), vs)
+ }
+
+ expectedVersion = "0.3.0"
+ if vs[0].Version != expectedVersion {
+ t.Errorf("expected %q, got %q", expectedVersion, vs[0].Version)
+ }
+
+ // test that index.yaml gets generated on merge even when it doesn't exist
+ if err := os.Remove(destIndex); err != nil {
+ t.Fatal(err)
+ }
+
+ c.ParseFlags([]string{"--merge", destIndex})
+ if err := c.RunE(c, []string{dir}); err != nil {
+ t.Error(err)
+ }
+
+ index, err = repo.LoadIndexFile(destIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // verify it didn't create an empty index.yaml and the merged happened
+ if len(index.Entries) != 2 {
+ t.Errorf("expected 2 entries, got %d: %#v", len(index.Entries), index.Entries)
+ }
+
+ vs = index.Entries["compressedchart"]
+ if len(vs) != 1 {
+ t.Errorf("expected 1 versions, got %d: %#v", len(vs), vs)
+ }
+
+ expectedVersion = "0.3.0"
+ if vs[0].Version != expectedVersion {
+ t.Errorf("expected %q, got %q", expectedVersion, vs[0].Version)
+ }
+}
+
+func linkOrCopy(source, target string) error {
+ if err := os.Link(source, target); err != nil {
+ return copyFile(source, target)
+ }
+
+ return nil
+}
+
+func copyFile(dst, src string) error {
+ i, err := os.Open(dst)
+ if err != nil {
+ return err
+ }
+ defer i.Close()
+
+ o, err := os.Create(src)
+ if err != nil {
+ return err
+ }
+ defer o.Close()
+
+ _, err = io.Copy(o, i)
+
+ return err
+}
+
+func TestRepoIndexFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "repo index", true)
+ checkFileCompletion(t, "repo index mydir", false)
+}
diff --git a/helm/pkg/cmd/repo_list.go b/helm/pkg/cmd/repo_list.go
new file mode 100644
index 000000000..450294948
--- /dev/null
+++ b/helm/pkg/cmd/repo_list.go
@@ -0,0 +1,149 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/gosuri/uitable"
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+func newRepoListCmd(out io.Writer) *cobra.Command {
+ var outfmt output.Format
+ var noHeaders bool
+ cmd := &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ Short: "list chart repositories",
+ Args: require.NoArgs,
+ ValidArgsFunction: noMoreArgsCompFunc,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ // The error is silently ignored. If no repository file exists, it cannot be loaded,
+ // or the file isn't the right format to be parsed the error is ignored. The
+ // repositories will be 0.
+ f, _ := repo.LoadFile(settings.RepositoryConfig)
+ if len(f.Repositories) == 0 && outfmt != output.JSON && outfmt != output.YAML {
+ fmt.Fprintln(cmd.ErrOrStderr(), "no repositories to show")
+ return nil
+ }
+
+ w := &repoListWriter{
+ repos: f.Repositories,
+ noHeaders: noHeaders,
+ }
+
+ return outfmt.Write(out, w)
+ },
+ }
+
+ cmd.Flags().BoolVar(&noHeaders, "no-headers", false, "suppress headers in the output")
+ bindOutputFlag(cmd, &outfmt)
+ return cmd
+}
+
+type repositoryElement struct {
+ Name string `json:"name"`
+ URL string `json:"url"`
+}
+
+type repoListWriter struct {
+ repos []*repo.Entry
+ noHeaders bool
+}
+
+func (r *repoListWriter) WriteTable(out io.Writer) error {
+ table := uitable.New()
+ if !r.noHeaders {
+ table.AddRow("NAME", "URL")
+ }
+ for _, re := range r.repos {
+ table.AddRow(re.Name, re.URL)
+ }
+ return output.EncodeTable(out, table)
+}
+
+func (r *repoListWriter) WriteJSON(out io.Writer) error {
+ return r.encodeByFormat(out, output.JSON)
+}
+
+func (r *repoListWriter) WriteYAML(out io.Writer) error {
+ return r.encodeByFormat(out, output.YAML)
+}
+
+func (r *repoListWriter) encodeByFormat(out io.Writer, format output.Format) error {
+ // Initialize the array so no results returns an empty array instead of null
+ repolist := make([]repositoryElement, 0, len(r.repos))
+
+ for _, re := range r.repos {
+ repolist = append(repolist, repositoryElement{Name: re.Name, URL: re.URL})
+ }
+
+ switch format {
+ case output.JSON:
+ return output.EncodeJSON(out, repolist)
+ case output.YAML:
+ return output.EncodeYAML(out, repolist)
+ default:
+ // Because this is a non-exported function and only called internally by
+ // WriteJSON and WriteYAML, we shouldn't get invalid types
+ return nil
+ }
+}
+
+// Returns all repos from repos, except those with names matching ignoredRepoNames
+// Inspired by https://stackoverflow.com/a/28701031/893211
+func filterRepos(repos []*repo.Entry, ignoredRepoNames []string) []*repo.Entry {
+ // if ignoredRepoNames is nil, just return repo
+ if ignoredRepoNames == nil {
+ return repos
+ }
+
+ filteredRepos := make([]*repo.Entry, 0)
+
+ ignored := make(map[string]bool, len(ignoredRepoNames))
+ for _, repoName := range ignoredRepoNames {
+ ignored[repoName] = true
+ }
+
+ for _, repo := range repos {
+ if _, removed := ignored[repo.Name]; !removed {
+ filteredRepos = append(filteredRepos, repo)
+ }
+ }
+
+ return filteredRepos
+}
+
+// Provide dynamic auto-completion for repo names
+func compListRepos(_ string, ignoredRepoNames []string) []string {
+ var rNames []string
+
+ f, err := repo.LoadFile(settings.RepositoryConfig)
+ if err == nil && len(f.Repositories) > 0 {
+ filteredRepos := filterRepos(f.Repositories, ignoredRepoNames)
+ for _, repo := range filteredRepos {
+ rNames = append(rNames, fmt.Sprintf("%s\t%s", repo.Name, repo.URL))
+ }
+ }
+ return rNames
+}
diff --git a/helm/pkg/cmd/repo_list_test.go b/helm/pkg/cmd/repo_list_test.go
new file mode 100644
index 000000000..94cdf3969
--- /dev/null
+++ b/helm/pkg/cmd/repo_list_test.go
@@ -0,0 +1,60 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "path/filepath"
+ "testing"
+)
+
+func TestRepoListOutputCompletion(t *testing.T) {
+ outputFlagCompletionTest(t, "repo list")
+}
+
+func TestRepoListFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "repo list", false)
+}
+
+func TestRepoList(t *testing.T) {
+ rootDir := t.TempDir()
+ repoFile := filepath.Join(rootDir, "repositories.yaml")
+ repoFile2 := "testdata/repositories.yaml"
+
+ tests := []cmdTestCase{
+ {
+ name: "list with no repos",
+ cmd: fmt.Sprintf("repo list --repository-config %s --repository-cache %s", repoFile, rootDir),
+ golden: "output/repo-list-empty.txt",
+ wantError: false,
+ },
+ {
+ name: "list with repos",
+ cmd: fmt.Sprintf("repo list --repository-config %s --repository-cache %s", repoFile2, rootDir),
+ golden: "output/repo-list.txt",
+ wantError: false,
+ },
+ {
+ name: "list without headers",
+ cmd: fmt.Sprintf("repo list --repository-config %s --repository-cache %s --no-headers", repoFile2, rootDir),
+ golden: "output/repo-list-no-headers.txt",
+ wantError: false,
+ },
+ }
+
+ runTestCmd(t, tests)
+}
diff --git a/helm/pkg/cmd/repo_remove.go b/helm/pkg/cmd/repo_remove.go
new file mode 100644
index 000000000..330e69d3a
--- /dev/null
+++ b/helm/pkg/cmd/repo_remove.go
@@ -0,0 +1,97 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+type repoRemoveOptions struct {
+ names []string
+ repoFile string
+ repoCache string
+}
+
+func newRepoRemoveCmd(out io.Writer) *cobra.Command {
+ o := &repoRemoveOptions{}
+
+ cmd := &cobra.Command{
+ Use: "remove [REPO1 [REPO2 ...]]",
+ Aliases: []string{"rm"},
+ Short: "remove one or more chart repositories",
+ Args: require.MinimumNArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return compListRepos(toComplete, args), cobra.ShellCompDirectiveNoFileComp
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ o.repoFile = settings.RepositoryConfig
+ o.repoCache = settings.RepositoryCache
+ o.names = args
+ return o.run(out)
+ },
+ }
+ return cmd
+}
+
+func (o *repoRemoveOptions) run(out io.Writer) error {
+ r, err := repo.LoadFile(o.repoFile)
+ if isNotExist(err) || len(r.Repositories) == 0 {
+ return errors.New("no repositories configured")
+ }
+
+ for _, name := range o.names {
+ if !r.Remove(name) {
+ return fmt.Errorf("no repo named %q found", name)
+ }
+ if err := r.WriteFile(o.repoFile, 0600); err != nil {
+ return err
+ }
+
+ if err := removeRepoCache(o.repoCache, name); err != nil {
+ return err
+ }
+ fmt.Fprintf(out, "%q has been removed from your repositories\n", name)
+ }
+
+ return nil
+}
+
+func removeRepoCache(root, name string) error {
+ idx := filepath.Join(root, helmpath.CacheChartsFile(name))
+ if _, err := os.Stat(idx); err == nil {
+ os.Remove(idx)
+ }
+
+ idx = filepath.Join(root, helmpath.CacheIndexFile(name))
+ if _, err := os.Stat(idx); errors.Is(err, fs.ErrNotExist) {
+ return nil
+ } else if err != nil {
+ return fmt.Errorf("can't remove index file %s: %w", idx, err)
+ }
+ return os.Remove(idx)
+}
diff --git a/helm/pkg/cmd/repo_remove_test.go b/helm/pkg/cmd/repo_remove_test.go
new file mode 100644
index 000000000..fce15bb73
--- /dev/null
+++ b/helm/pkg/cmd/repo_remove_test.go
@@ -0,0 +1,218 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
+)
+
+func TestRepoRemove(t *testing.T) {
+ ts := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testserver/*.*"),
+ )
+ defer ts.Stop()
+
+ rootDir := t.TempDir()
+ repoFile := filepath.Join(rootDir, "repositories.yaml")
+
+ const testRepoName = "test-name"
+
+ b := bytes.NewBuffer(nil)
+
+ rmOpts := repoRemoveOptions{
+ names: []string{testRepoName},
+ repoFile: repoFile,
+ repoCache: rootDir,
+ }
+
+ if err := rmOpts.run(os.Stderr); err == nil {
+ t.Errorf("Expected error removing %s, but did not get one.", testRepoName)
+ }
+ o := &repoAddOptions{
+ name: testRepoName,
+ url: ts.URL(),
+ repoFile: repoFile,
+ }
+
+ if err := o.run(os.Stderr); err != nil {
+ t.Error(err)
+ }
+
+ cacheIndexFile, cacheChartsFile := createCacheFiles(rootDir, testRepoName)
+
+ // Reset the buffer before running repo remove
+ b.Reset()
+
+ if err := rmOpts.run(b); err != nil {
+ t.Errorf("Error removing %s from repositories", testRepoName)
+ }
+ if !strings.Contains(b.String(), "has been removed") {
+ t.Errorf("Unexpected output: %s", b.String())
+ }
+
+ testCacheFiles(t, cacheIndexFile, cacheChartsFile, testRepoName)
+
+ f, err := repo.LoadFile(repoFile)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if f.Has(testRepoName) {
+ t.Errorf("%s was not successfully removed from repositories list", testRepoName)
+ }
+
+ // Test removal of multiple repos in one go
+ var testRepoNames = []string{"foo", "bar", "baz"}
+ cacheFiles := make(map[string][]string, len(testRepoNames))
+
+ // Add test repos
+ for _, repoName := range testRepoNames {
+ o := &repoAddOptions{
+ name: repoName,
+ url: ts.URL(),
+ repoFile: repoFile,
+ }
+
+ if err := o.run(os.Stderr); err != nil {
+ t.Error(err)
+ }
+
+ cacheIndex, cacheChart := createCacheFiles(rootDir, repoName)
+ cacheFiles[repoName] = []string{cacheIndex, cacheChart}
+
+ }
+
+ // Create repo remove command
+ multiRmOpts := repoRemoveOptions{
+ names: testRepoNames,
+ repoFile: repoFile,
+ repoCache: rootDir,
+ }
+
+ // Reset the buffer before running repo remove
+ b.Reset()
+
+ // Run repo remove command
+ if err := multiRmOpts.run(b); err != nil {
+ t.Errorf("Error removing list of repos from repositories: %q", testRepoNames)
+ }
+
+ // Check that stuff were removed
+ if !strings.Contains(b.String(), "has been removed") {
+ t.Errorf("Unexpected output: %s", b.String())
+ }
+
+ for _, repoName := range testRepoNames {
+ f, err := repo.LoadFile(repoFile)
+ if err != nil {
+ t.Error(err)
+ }
+ if f.Has(repoName) {
+ t.Errorf("%s was not successfully removed from repositories list", repoName)
+ }
+ cacheIndex := cacheFiles[repoName][0]
+ cacheChart := cacheFiles[repoName][1]
+ testCacheFiles(t, cacheIndex, cacheChart, repoName)
+ }
+}
+
+func createCacheFiles(rootDir string, repoName string) (cacheIndexFile string, cacheChartsFile string) {
+ cacheIndexFile = filepath.Join(rootDir, helmpath.CacheIndexFile(repoName))
+ mf, _ := os.Create(cacheIndexFile)
+ mf.Close()
+
+ cacheChartsFile = filepath.Join(rootDir, helmpath.CacheChartsFile(repoName))
+ mf, _ = os.Create(cacheChartsFile)
+ mf.Close()
+
+ return cacheIndexFile, cacheChartsFile
+}
+
+func testCacheFiles(t *testing.T, cacheIndexFile string, cacheChartsFile string, repoName string) {
+ t.Helper()
+ if _, err := os.Stat(cacheIndexFile); err == nil {
+ t.Errorf("Error cache index file was not removed for repository %s", repoName)
+ }
+ if _, err := os.Stat(cacheChartsFile); err == nil {
+ t.Errorf("Error cache chart file was not removed for repository %s", repoName)
+ }
+}
+
+func TestRepoRemoveCompletion(t *testing.T) {
+ ts := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testserver/*.*"),
+ )
+
+ defer ts.Stop()
+
+ rootDir := t.TempDir()
+ repoFile := filepath.Join(rootDir, "repositories.yaml")
+ repoCache := filepath.Join(rootDir, "cache/")
+
+ var testRepoNames = []string{"foo", "bar", "baz"}
+
+ // Add test repos
+ for _, repoName := range testRepoNames {
+ o := &repoAddOptions{
+ name: repoName,
+ url: ts.URL(),
+ repoFile: repoFile,
+ }
+
+ if err := o.run(os.Stderr); err != nil {
+ t.Error(err)
+ }
+ }
+
+ repoSetup := fmt.Sprintf("--repository-config %s --repository-cache %s", repoFile, repoCache)
+
+ // In the following tests, we turn off descriptions for completions by using __completeNoDesc.
+ // We have to do this because the description will contain the port used by the webserver,
+ // and that port changes each time we run the test.
+ tests := []cmdTestCase{{
+ name: "completion for repo remove",
+ cmd: fmt.Sprintf("%s __completeNoDesc repo remove ''", repoSetup),
+ golden: "output/repo_list_comp.txt",
+ }, {
+ name: "completion for repo remove, no filter",
+ cmd: fmt.Sprintf("%s __completeNoDesc repo remove fo", repoSetup),
+ golden: "output/repo_list_comp.txt",
+ }, {
+ name: "completion for repo remove repetition",
+ cmd: fmt.Sprintf("%s __completeNoDesc repo remove foo ''", repoSetup),
+ golden: "output/repo_repeat_comp.txt",
+ }}
+ for _, test := range tests {
+ runTestCmd(t, []cmdTestCase{test})
+ }
+}
+
+func TestRepoRemoveFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "repo remove", false)
+ checkFileCompletion(t, "repo remove repo1", false)
+}
diff --git a/helm/pkg/cmd/repo_test.go b/helm/pkg/cmd/repo_test.go
new file mode 100644
index 000000000..6b89a66c3
--- /dev/null
+++ b/helm/pkg/cmd/repo_test.go
@@ -0,0 +1,25 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+)
+
+func TestRepoFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "repo", false)
+}
diff --git a/helm/pkg/cmd/repo_update.go b/helm/pkg/cmd/repo_update.go
new file mode 100644
index 000000000..f2e7c0e0f
--- /dev/null
+++ b/helm/pkg/cmd/repo_update.go
@@ -0,0 +1,176 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "slices"
+ "sync"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+const updateDesc = `
+Update gets the latest information about charts from the respective chart repositories.
+Information is cached locally, where it is used by commands like 'helm search'.
+
+You can optionally specify a list of repositories you want to update.
+ $ helm repo update ...
+To update all the repositories, use 'helm repo update'.
+`
+
+var errNoRepositories = errors.New("no repositories found. You must add one before updating")
+
+type repoUpdateOptions struct {
+ update func([]*repo.ChartRepository, io.Writer) error
+ repoFile string
+ repoCache string
+ names []string
+ timeout time.Duration
+}
+
+func newRepoUpdateCmd(out io.Writer) *cobra.Command {
+ o := &repoUpdateOptions{update: updateCharts}
+
+ cmd := &cobra.Command{
+ Use: "update [REPO1 [REPO2 ...]]",
+ Aliases: []string{"up"},
+ Short: "update information of available charts locally from chart repositories",
+ Long: updateDesc,
+ Args: require.MinimumNArgs(0),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return compListRepos(toComplete, args), cobra.ShellCompDirectiveNoFileComp
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ o.repoFile = settings.RepositoryConfig
+ o.repoCache = settings.RepositoryCache
+ o.names = args
+ return o.run(out)
+ },
+ }
+
+ f := cmd.Flags()
+ f.DurationVar(&o.timeout, "timeout", getter.DefaultHTTPTimeout*time.Second, "time to wait for the index file download to complete")
+
+ return cmd
+}
+
+func (o *repoUpdateOptions) run(out io.Writer) error {
+ f, err := repo.LoadFile(o.repoFile)
+ switch {
+ case isNotExist(err):
+ return errNoRepositories
+ case err != nil:
+ return fmt.Errorf("failed loading file: %s: %w", o.repoFile, err)
+ case len(f.Repositories) == 0:
+ return errNoRepositories
+ }
+
+ var repos []*repo.ChartRepository
+ updateAllRepos := len(o.names) == 0
+
+ if !updateAllRepos {
+ // Fail early if the user specified an invalid repo to update
+ if err := checkRequestedRepos(o.names, f.Repositories); err != nil {
+ return err
+ }
+ }
+
+ for _, cfg := range f.Repositories {
+ if updateAllRepos || isRepoRequested(cfg.Name, o.names) {
+ r, err := repo.NewChartRepository(cfg, getter.All(settings, getter.WithTimeout(o.timeout)))
+ if err != nil {
+ return err
+ }
+ if o.repoCache != "" {
+ r.CachePath = o.repoCache
+ }
+ repos = append(repos, r)
+ }
+ }
+
+ return o.update(repos, out)
+}
+
+func updateCharts(repos []*repo.ChartRepository, out io.Writer) error {
+ fmt.Fprintln(out, "Hang tight while we grab the latest from your chart repositories...")
+ var wg sync.WaitGroup
+ failRepoURLChan := make(chan string, len(repos))
+
+ writeMutex := sync.Mutex{}
+ for _, re := range repos {
+ wg.Add(1)
+ go func(re *repo.ChartRepository) {
+ defer wg.Done()
+ if _, err := re.DownloadIndexFile(); err != nil {
+ writeMutex.Lock()
+ defer writeMutex.Unlock()
+ fmt.Fprintf(out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", re.Config.Name, re.Config.URL, err)
+ failRepoURLChan <- re.Config.URL
+ } else {
+ writeMutex.Lock()
+ defer writeMutex.Unlock()
+ fmt.Fprintf(out, "...Successfully got an update from the %q chart repository\n", re.Config.Name)
+ }
+ }(re)
+ }
+
+ go func() {
+ wg.Wait()
+ close(failRepoURLChan)
+ }()
+
+ var repoFailList []string
+ for url := range failRepoURLChan {
+ repoFailList = append(repoFailList, url)
+ }
+
+ if len(repoFailList) > 0 {
+ return fmt.Errorf("failed to update the following repositories: %s",
+ repoFailList)
+ }
+
+ fmt.Fprintln(out, "Update Complete. ⎈Happy Helming!⎈")
+ return nil
+}
+
+func checkRequestedRepos(requestedRepos []string, validRepos []*repo.Entry) error {
+ for _, requestedRepo := range requestedRepos {
+ found := false
+ for _, repo := range validRepos {
+ if requestedRepo == repo.Name {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("no repositories found matching '%s'. Nothing will be updated", requestedRepo)
+ }
+ }
+ return nil
+}
+
+func isRepoRequested(repoName string, requestedRepos []string) bool {
+ return slices.Contains(requestedRepos, repoName)
+}
diff --git a/helm/pkg/cmd/repo_update_test.go b/helm/pkg/cmd/repo_update_test.go
new file mode 100644
index 000000000..7aa4d414f
--- /dev/null
+++ b/helm/pkg/cmd/repo_update_test.go
@@ -0,0 +1,212 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
+)
+
+func TestUpdateCmd(t *testing.T) {
+ var out bytes.Buffer
+ // Instead of using the HTTP updater, we provide our own for this test.
+ // The TestUpdateCharts test verifies the HTTP behavior independently.
+ updater := func(repos []*repo.ChartRepository, out io.Writer) error {
+ for _, re := range repos {
+ fmt.Fprintln(out, re.Config.Name)
+ }
+ return nil
+ }
+ o := &repoUpdateOptions{
+ update: updater,
+ repoFile: "testdata/repositories.yaml",
+ }
+ if err := o.run(&out); err != nil {
+ t.Fatal(err)
+ }
+
+ if got := out.String(); !strings.Contains(got, "charts") ||
+ !strings.Contains(got, "firstexample") ||
+ !strings.Contains(got, "secondexample") {
+ t.Errorf("Expected 'charts', 'firstexample' and 'secondexample' but got %q", got)
+ }
+}
+
+func TestUpdateCmdMultiple(t *testing.T) {
+ var out bytes.Buffer
+ // Instead of using the HTTP updater, we provide our own for this test.
+ // The TestUpdateCharts test verifies the HTTP behavior independently.
+ updater := func(repos []*repo.ChartRepository, out io.Writer) error {
+ for _, re := range repos {
+ fmt.Fprintln(out, re.Config.Name)
+ }
+ return nil
+ }
+ o := &repoUpdateOptions{
+ update: updater,
+ repoFile: "testdata/repositories.yaml",
+ names: []string{"firstexample", "charts"},
+ }
+ if err := o.run(&out); err != nil {
+ t.Fatal(err)
+ }
+
+ if got := out.String(); !strings.Contains(got, "charts") ||
+ !strings.Contains(got, "firstexample") ||
+ strings.Contains(got, "secondexample") {
+ t.Errorf("Expected 'charts' and 'firstexample' but not 'secondexample' but got %q", got)
+ }
+}
+
+func TestUpdateCmdInvalid(t *testing.T) {
+ var out bytes.Buffer
+ // Instead of using the HTTP updater, we provide our own for this test.
+ // The TestUpdateCharts test verifies the HTTP behavior independently.
+ updater := func(repos []*repo.ChartRepository, out io.Writer) error {
+ for _, re := range repos {
+ fmt.Fprintln(out, re.Config.Name)
+ }
+ return nil
+ }
+ o := &repoUpdateOptions{
+ update: updater,
+ repoFile: "testdata/repositories.yaml",
+ names: []string{"firstexample", "invalid"},
+ }
+ if err := o.run(&out); err == nil {
+ t.Fatal("expected error but did not get one")
+ }
+}
+
+func TestUpdateCustomCacheCmd(t *testing.T) {
+ rootDir := t.TempDir()
+ cachePath := filepath.Join(rootDir, "updcustomcache")
+ os.Mkdir(cachePath, os.ModePerm)
+
+ ts := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testserver/*.*"),
+ )
+
+ defer ts.Stop()
+
+ o := &repoUpdateOptions{
+ update: updateCharts,
+ repoFile: filepath.Join(ts.Root(), "repositories.yaml"),
+ repoCache: cachePath,
+ }
+ b := io.Discard
+ if err := o.run(b); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := os.Stat(filepath.Join(cachePath, "test-index.yaml")); err != nil {
+ t.Fatalf("error finding created index file in custom cache: %v", err)
+ }
+}
+
+func TestUpdateCharts(t *testing.T) {
+ defer resetEnv()()
+ ensure.HelmHome(t)
+
+ ts := repotest.NewTempServer(t,
+ repotest.WithChartSourceGlob("testdata/testserver/*.*"),
+ )
+ defer ts.Stop()
+
+ r, err := repo.NewChartRepository(&repo.Entry{
+ Name: "charts",
+ URL: ts.URL(),
+ }, getter.All(settings))
+ if err != nil {
+ t.Error(err)
+ }
+
+ b := bytes.NewBuffer(nil)
+ updateCharts([]*repo.ChartRepository{r}, b)
+
+ got := b.String()
+ if strings.Contains(got, "Unable to get an update") {
+ t.Errorf("Failed to get a repo: %q", got)
+ }
+ if !strings.Contains(got, "Update Complete.") {
+ t.Error("Update was not successful")
+ }
+}
+
+func TestRepoUpdateFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "repo update", false)
+ checkFileCompletion(t, "repo update repo1", false)
+}
+
+func TestUpdateChartsFailWithError(t *testing.T) {
+ defer resetEnv()()
+ ensure.HelmHome(t)
+
+ ts := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testserver/*.*"),
+ )
+ defer ts.Stop()
+
+ var invalidURL = ts.URL() + "55"
+ r1, err := repo.NewChartRepository(&repo.Entry{
+ Name: "charts",
+ URL: invalidURL,
+ }, getter.All(settings))
+ if err != nil {
+ t.Error(err)
+ }
+ r2, err := repo.NewChartRepository(&repo.Entry{
+ Name: "charts",
+ URL: invalidURL,
+ }, getter.All(settings))
+ if err != nil {
+ t.Error(err)
+ }
+
+ b := bytes.NewBuffer(nil)
+ err = updateCharts([]*repo.ChartRepository{r1, r2}, b)
+ if err == nil {
+ t.Error("Repo update should return error because update of repository fails and 'fail-on-repo-update-fail' flag set")
+ return
+ }
+ var expectedErr = "failed to update the following repositories"
+ var receivedErr = err.Error()
+ if !strings.Contains(receivedErr, expectedErr) {
+ t.Errorf("Expected error (%s) but got (%s) instead", expectedErr, receivedErr)
+ }
+ if !strings.Contains(receivedErr, invalidURL) {
+ t.Errorf("Expected invalid URL (%s) in error message but got (%s) instead", invalidURL, receivedErr)
+ }
+
+ got := b.String()
+ if !strings.Contains(got, "Unable to get an update") {
+ t.Errorf("Repo should have failed update but instead got: %q", got)
+ }
+ if strings.Contains(got, "Update Complete.") {
+ t.Error("Update was not successful and should return error message because 'fail-on-repo-update-fail' flag set")
+ }
+}
diff --git a/helm/pkg/cmd/require/args.go b/helm/pkg/cmd/require/args.go
new file mode 100644
index 000000000..f5e0888f1
--- /dev/null
+++ b/helm/pkg/cmd/require/args.go
@@ -0,0 +1,89 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package require
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *cobra.Command, args []string) error {
+ if len(args) > 0 {
+ return fmt.Errorf(
+ "%q accepts no arguments\n\nUsage: %s",
+ cmd.CommandPath(),
+ cmd.UseLine(),
+ )
+ }
+ return nil
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) cobra.PositionalArgs {
+ return func(cmd *cobra.Command, args []string) error {
+ if len(args) != n {
+ return fmt.Errorf(
+ "%q requires %d %s\n\nUsage: %s",
+ cmd.CommandPath(),
+ n,
+ pluralize("argument", n),
+ cmd.UseLine(),
+ )
+ }
+ return nil
+ }
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) cobra.PositionalArgs {
+ return func(cmd *cobra.Command, args []string) error {
+ if len(args) > n {
+ return fmt.Errorf(
+ "%q accepts at most %d %s\n\nUsage: %s",
+ cmd.CommandPath(),
+ n,
+ pluralize("argument", n),
+ cmd.UseLine(),
+ )
+ }
+ return nil
+ }
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) cobra.PositionalArgs {
+ return func(cmd *cobra.Command, args []string) error {
+ if len(args) < n {
+ return fmt.Errorf(
+ "%q requires at least %d %s\n\nUsage: %s",
+ cmd.CommandPath(),
+ n,
+ pluralize("argument", n),
+ cmd.UseLine(),
+ )
+ }
+ return nil
+ }
+}
+
+func pluralize(word string, n int) string {
+ if n == 1 {
+ return word
+ }
+ return word + "s"
+}
diff --git a/helm/pkg/cmd/require/args_test.go b/helm/pkg/cmd/require/args_test.go
new file mode 100644
index 000000000..b6c430fc0
--- /dev/null
+++ b/helm/pkg/cmd/require/args_test.go
@@ -0,0 +1,93 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package require
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+
+ "github.com/spf13/cobra"
+)
+
+func TestArgs(t *testing.T) {
+ runTestCases(t, []testCase{{
+ validateFunc: NoArgs,
+ }, {
+ args: []string{"one"},
+ validateFunc: NoArgs,
+ wantError: `"root" accepts no arguments`,
+ }, {
+ args: []string{"one"},
+ validateFunc: ExactArgs(1),
+ }, {
+ validateFunc: ExactArgs(1),
+ wantError: `"root" requires 1 argument`,
+ }, {
+ validateFunc: ExactArgs(2),
+ wantError: `"root" requires 2 arguments`,
+ }, {
+ args: []string{"one"},
+ validateFunc: MaximumNArgs(1),
+ }, {
+ args: []string{"one", "two"},
+ validateFunc: MaximumNArgs(1),
+ wantError: `"root" accepts at most 1 argument`,
+ }, {
+ validateFunc: MinimumNArgs(1),
+ wantError: `"root" requires at least 1 argument`,
+ }, {
+ args: []string{"one", "two"},
+ validateFunc: MinimumNArgs(1),
+ }})
+}
+
+type testCase struct {
+ args []string
+ validateFunc cobra.PositionalArgs
+ wantError string
+}
+
+func runTestCases(t *testing.T, testCases []testCase) {
+ t.Helper()
+ for i, tc := range testCases {
+ t.Run(fmt.Sprint(i), func(t *testing.T) {
+ cmd := &cobra.Command{
+ Use: "root",
+ Run: func(*cobra.Command, []string) {},
+ Args: tc.validateFunc,
+ }
+ cmd.SetArgs(tc.args)
+ cmd.SetOut(io.Discard)
+ cmd.SetErr(io.Discard)
+
+ err := cmd.Execute()
+ if tc.wantError == "" {
+ if err != nil {
+ t.Fatalf("unexpected error, got '%v'", err)
+ }
+ return
+ }
+ if !strings.Contains(err.Error(), tc.wantError) {
+ t.Fatalf("unexpected error \n\nWANT:\n%q\n\nGOT:\n%q\n", tc.wantError, err)
+ }
+ if !strings.Contains(err.Error(), "Usage:") {
+ t.Fatalf("unexpected error: want Usage string\n\nGOT:\n%q\n", err)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/cmd/rollback.go b/helm/pkg/cmd/rollback.go
new file mode 100644
index 000000000..00a2725bc
--- /dev/null
+++ b/helm/pkg/cmd/rollback.go
@@ -0,0 +1,101 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const rollbackDesc = `
+This command rolls back a release to a previous revision.
+
+The first argument of the rollback command is the name of a release, and the
+second is a revision (version) number. If this argument is omitted or set to
+0, it will roll back to the previous release.
+
+To see revision numbers, run 'helm history RELEASE'.
+`
+
+func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewRollback(cfg)
+
+ cmd := &cobra.Command{
+ Use: "rollback [REVISION]",
+ Short: "roll back a release to a previous revision",
+ Long: rollbackDesc,
+ Args: require.MinimumNArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 0 {
+ return compListReleases(toComplete, args, cfg)
+ }
+
+ if len(args) == 1 {
+ return compListRevisions(toComplete, cfg, args[0])
+ }
+
+ return noMoreArgsComp()
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if len(args) > 1 {
+ ver, err := strconv.Atoi(args[1])
+ if err != nil {
+ return fmt.Errorf("could not convert revision to a number: %v", err)
+ }
+ client.Version = ver
+ }
+
+ dryRunStrategy, err := cmdGetDryRunFlagStrategy(cmd, false)
+ if err != nil {
+ return err
+ }
+ client.DryRunStrategy = dryRunStrategy
+
+ if err := client.Run(args[0]); err != nil {
+ return err
+ }
+
+ fmt.Fprintf(out, "Rollback was a success! Happy Helming!\n")
+ return nil
+ },
+ }
+
+ f := cmd.Flags()
+ f.BoolVar(&client.ForceReplace, "force-replace", false, "force resource updates by replacement")
+ f.BoolVar(&client.ForceReplace, "force", false, "deprecated")
+ f.MarkDeprecated("force", "use --force-replace instead")
+ f.BoolVar(&client.ForceConflicts, "force-conflicts", false, "if set server-side apply will force changes against conflicts")
+ f.StringVar(&client.ServerSideApply, "server-side", "auto", "must be \"true\", \"false\" or \"auto\". Object updates run in the server instead of the client (\"auto\" defaults the value from the previous chart release's method)")
+ f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during rollback")
+ f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
+ f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
+ f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails")
+ f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit")
+ addDryRunFlag(cmd)
+ AddWaitFlag(cmd, &client.WaitStrategy)
+ cmd.MarkFlagsMutuallyExclusive("force-replace", "force-conflicts")
+ cmd.MarkFlagsMutuallyExclusive("force", "force-conflicts")
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/rollback_test.go b/helm/pkg/cmd/rollback_test.go
new file mode 100644
index 000000000..116e158fd
--- /dev/null
+++ b/helm/pkg/cmd/rollback_test.go
@@ -0,0 +1,171 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestRollbackCmd(t *testing.T) {
+ rels := []*release.Release{
+ {
+ Name: "funny-honey",
+ Info: &release.Info{Status: common.StatusSuperseded},
+ Chart: &chart.Chart{},
+ Version: 1,
+ },
+ {
+ Name: "funny-honey",
+ Info: &release.Info{Status: common.StatusDeployed},
+ Chart: &chart.Chart{},
+ Version: 2,
+ },
+ }
+
+ tests := []cmdTestCase{{
+ name: "rollback a release",
+ cmd: "rollback funny-honey 1",
+ golden: "output/rollback.txt",
+ rels: rels,
+ }, {
+ name: "rollback a release with timeout",
+ cmd: "rollback funny-honey 1 --timeout 120s",
+ golden: "output/rollback-timeout.txt",
+ rels: rels,
+ }, {
+ name: "rollback a release with wait",
+ cmd: "rollback funny-honey 1 --wait",
+ golden: "output/rollback-wait.txt",
+ rels: rels,
+ }, {
+ name: "rollback a release with wait-for-jobs",
+ cmd: "rollback funny-honey 1 --wait --wait-for-jobs",
+ golden: "output/rollback-wait-for-jobs.txt",
+ rels: rels,
+ }, {
+ name: "rollback a release without revision",
+ cmd: "rollback funny-honey",
+ golden: "output/rollback-no-revision.txt",
+ rels: rels,
+ }, {
+ name: "rollback a release with non-existent version",
+ cmd: "rollback funny-honey 3",
+ golden: "output/rollback-non-existent-version.txt",
+ rels: rels,
+ wantError: true,
+ }, {
+ name: "rollback a release without release name",
+ cmd: "rollback",
+ golden: "output/rollback-no-args.txt",
+ rels: rels,
+ wantError: true,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestRollbackRevisionCompletion(t *testing.T) {
+ mk := func(name string, vers int, status common.Status) *release.Release {
+ return release.Mock(&release.MockReleaseOptions{
+ Name: name,
+ Version: vers,
+ Status: status,
+ })
+ }
+
+ releases := []*release.Release{
+ mk("musketeers", 11, common.StatusDeployed),
+ mk("musketeers", 10, common.StatusSuperseded),
+ mk("musketeers", 9, common.StatusSuperseded),
+ mk("musketeers", 8, common.StatusSuperseded),
+ mk("carabins", 1, common.StatusSuperseded),
+ }
+
+ tests := []cmdTestCase{{
+ name: "completion for release parameter",
+ cmd: "__complete rollback ''",
+ rels: releases,
+ golden: "output/rollback-comp.txt",
+ }, {
+ name: "completion for revision parameter",
+ cmd: "__complete rollback musketeers ''",
+ rels: releases,
+ golden: "output/revision-comp.txt",
+ }, {
+ name: "completion for with too many args",
+ cmd: "__complete rollback musketeers 11 ''",
+ rels: releases,
+ golden: "output/rollback-wrong-args-comp.txt",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestRollbackFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "rollback", false)
+ checkFileCompletion(t, "rollback myrelease", false)
+ checkFileCompletion(t, "rollback myrelease 1", false)
+}
+
+func TestRollbackWithLabels(t *testing.T) {
+ labels1 := map[string]string{"operation": "install", "firstLabel": "firstValue"}
+ labels2 := map[string]string{"operation": "upgrade", "secondLabel": "secondValue"}
+
+ releaseName := "funny-bunny-labels"
+ rels := []*release.Release{
+ {
+ Name: releaseName,
+ Info: &release.Info{Status: common.StatusSuperseded},
+ Chart: &chart.Chart{},
+ Version: 1,
+ Labels: labels1,
+ },
+ {
+ Name: releaseName,
+ Info: &release.Info{Status: common.StatusDeployed},
+ Chart: &chart.Chart{},
+ Version: 2,
+ Labels: labels2,
+ },
+ }
+ storage := storageFixture()
+ for _, rel := range rels {
+ if err := storage.Create(rel); err != nil {
+ t.Fatal(err)
+ }
+ }
+ _, _, err := executeActionCommandC(storage, fmt.Sprintf("rollback %s 1", releaseName))
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+ updatedReli, err := storage.Get(releaseName, 3)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+ updatedRel, err := releaserToV1Release(updatedReli)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ if !reflect.DeepEqual(updatedRel.Labels, labels1) {
+ t.Errorf("Expected {%v}, got {%v}", labels1, updatedRel.Labels)
+ }
+}
diff --git a/helm/pkg/cmd/root.go b/helm/pkg/cmd/root.go
new file mode 100644
index 000000000..04ba91c1f
--- /dev/null
+++ b/helm/pkg/cmd/root.go
@@ -0,0 +1,506 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd // import "helm.sh/helm/v4/pkg/cmd"
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+ "log/slog"
+ "net/http"
+ "os"
+ "strings"
+
+ "github.com/fatih/color"
+ "github.com/spf13/cobra"
+ "sigs.k8s.io/yaml"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/tools/clientcmd"
+
+ "helm.sh/helm/v4/internal/logging"
+ "helm.sh/helm/v4/internal/tlsutil"
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cli"
+ kubefake "helm.sh/helm/v4/pkg/kube/fake"
+ "helm.sh/helm/v4/pkg/registry"
+ ri "helm.sh/helm/v4/pkg/release"
+ release "helm.sh/helm/v4/pkg/release/v1"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+var globalUsage = `The Kubernetes package manager
+
+Common actions for Helm:
+
+- helm search: search for charts
+- helm pull: download a chart to your local directory to view
+- helm install: upload the chart to Kubernetes
+- helm list: list releases of charts
+
+Environment variables:
+
+| Name | Description |
+|------------------------------------|------------------------------------------------------------------------------------------------------------|
+| $HELM_CACHE_HOME | set an alternative location for storing cached files. |
+| $HELM_CONFIG_HOME | set an alternative location for storing Helm configuration. |
+| $HELM_DATA_HOME | set an alternative location for storing Helm data. |
+| $HELM_DEBUG | indicate whether or not Helm is running in Debug mode |
+| $HELM_DRIVER | set the backend storage driver. Values are: configmap, secret, memory, sql. |
+| $HELM_DRIVER_SQL_CONNECTION_STRING | set the connection string the SQL storage driver should use. |
+| $HELM_MAX_HISTORY | set the maximum number of helm release history. |
+| $HELM_NAMESPACE | set the namespace used for the helm operations. |
+| $HELM_NO_PLUGINS | disable plugins. Set HELM_NO_PLUGINS=1 to disable plugins. |
+| $HELM_PLUGINS | set the path to the plugins directory |
+| $HELM_REGISTRY_CONFIG | set the path to the registry config file. |
+| $HELM_REPOSITORY_CACHE | set the path to the repository cache directory |
+| $HELM_REPOSITORY_CONFIG | set the path to the repositories file. |
+| $KUBECONFIG | set an alternative Kubernetes configuration file (default "~/.kube/config") |
+| $HELM_KUBEAPISERVER | set the Kubernetes API Server Endpoint for authentication |
+| $HELM_KUBECAFILE | set the Kubernetes certificate authority file. |
+| $HELM_KUBEASGROUPS | set the Groups to use for impersonation using a comma-separated list. |
+| $HELM_KUBEASUSER | set the Username to impersonate for the operation. |
+| $HELM_KUBECONTEXT | set the name of the kubeconfig context. |
+| $HELM_KUBETOKEN | set the Bearer KubeToken used for authentication. |
+| $HELM_KUBEINSECURE_SKIP_TLS_VERIFY | indicate if the Kubernetes API server's certificate validation should be skipped (insecure) |
+| $HELM_KUBETLS_SERVER_NAME | set the server name used to validate the Kubernetes API server certificate |
+| $HELM_BURST_LIMIT | set the default burst limit in the case the server contains many CRDs (default 100, -1 to disable) |
+| $HELM_QPS | set the Queries Per Second in cases where a high number of calls exceed the option for higher burst values |
+| $HELM_COLOR | set color output mode. Allowed values: never, always, auto (default: never) |
+| $NO_COLOR | set to any non-empty value to disable all colored output (overrides $HELM_COLOR) |
+
+Helm stores cache, configuration, and data based on the following configuration order:
+
+- If a HELM_*_HOME environment variable is set, it will be used
+- Otherwise, on systems supporting the XDG base directory specification, the XDG variables will be used
+- When no other location is set a default location will be used based on the operating system
+
+By default, the default directories depend on the Operating System. The defaults are listed below:
+
+| Operating System | Cache Path | Configuration Path | Data Path |
+|------------------|---------------------------|--------------------------------|-------------------------|
+| Linux | $HOME/.cache/helm | $HOME/.config/helm | $HOME/.local/share/helm |
+| macOS | $HOME/Library/Caches/helm | $HOME/Library/Preferences/helm | $HOME/Library/helm |
+| Windows | %TEMP%\helm | %APPDATA%\helm | %APPDATA%\helm |
+`
+
+var settings = cli.New()
+
+func NewRootCmd(out io.Writer, args []string, logSetup func(bool)) (*cobra.Command, error) {
+ actionConfig := action.NewConfiguration()
+ cmd, err := newRootCmdWithConfig(actionConfig, out, args, logSetup)
+ if err != nil {
+ return nil, err
+ }
+ cobra.OnInitialize(func() {
+ helmDriver := os.Getenv("HELM_DRIVER")
+ if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), helmDriver); err != nil {
+ log.Fatal(err)
+ }
+ if helmDriver == "memory" {
+ loadReleasesInMemory(actionConfig)
+ }
+ actionConfig.SetHookOutputFunc(hookOutputWriter)
+ })
+ return cmd, nil
+}
+
+// SetupLogging sets up Helm logging used by the Helm client.
+// This function is passed to the NewRootCmd function to enable logging. Any other
+// application that uses the NewRootCmd function to setup all the Helm commands may
+// use this function to setup logging or their own. Using a custom logging setup function
+// enables applications using Helm commands to integrate with their existing logging
+// system.
+// The debug argument is the value if Helm is set for debugging (i.e. --debug flag)
+func SetupLogging(debug bool) {
+ logger := logging.NewLogger(func() bool { return debug })
+ slog.SetDefault(logger)
+}
+
+// configureColorOutput configures the color output based on the ColorMode setting
+func configureColorOutput(settings *cli.EnvSettings) {
+ switch settings.ColorMode {
+ case "never":
+ color.NoColor = true
+ case "always":
+ color.NoColor = false
+ case "auto":
+ // Let fatih/color handle automatic detection
+ // It will check if output is a terminal and NO_COLOR env var
+ // We don't need to do anything here
+ }
+}
+
+func newRootCmdWithConfig(actionConfig *action.Configuration, out io.Writer, args []string, logSetup func(bool)) (*cobra.Command, error) {
+ cmd := &cobra.Command{
+ Use: "helm",
+ Short: "The Helm package manager for Kubernetes.",
+ Long: globalUsage,
+ SilenceUsage: true,
+ PersistentPreRun: func(_ *cobra.Command, _ []string) {
+ if err := startProfiling(); err != nil {
+ log.Printf("Warning: Failed to start profiling: %v", err)
+ }
+ },
+ PersistentPostRun: func(_ *cobra.Command, _ []string) {
+ if err := stopProfiling(); err != nil {
+ log.Printf("Warning: Failed to stop profiling: %v", err)
+ }
+ },
+ }
+
+ flags := cmd.PersistentFlags()
+
+ settings.AddFlags(flags)
+ addKlogFlags(flags)
+
+ // We can safely ignore any errors that flags.Parse encounters since
+ // those errors will be caught later during the call to cmd.Execution.
+ // This call is required to gather configuration information prior to
+ // execution.
+ flags.ParseErrorsAllowlist.UnknownFlags = true
+ flags.Parse(args)
+
+ logSetup(settings.Debug)
+
+ // newRootCmdWithConfig is only called from NewRootCmd. NewRootCmd sets up
+ // NewConfiguration without a custom logger. So, the slog default is used. logSetup
+ // can change the default logger to the one in the logger package. This happens for
+ // the Helm client. This means the actionConfig logger is different from the slog
+ // default logger. If they are different we sync the actionConfig logger to the slog
+ // current default one.
+ if actionConfig.Logger() != slog.Default() {
+ actionConfig.SetLogger(slog.Default().Handler())
+ }
+
+ // Validate color mode setting
+ switch settings.ColorMode {
+ case "never", "auto", "always":
+ // Valid color mode
+ default:
+ return nil, fmt.Errorf("invalid color mode %q: must be one of: never, auto, always", settings.ColorMode)
+ }
+
+ // Configure color output based on ColorMode setting
+ configureColorOutput(settings)
+
+ // Setup shell completion for the color flag
+ _ = cmd.RegisterFlagCompletionFunc("color", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{"never", "auto", "always"}, cobra.ShellCompDirectiveNoFileComp
+ })
+
+ // Setup shell completion for the colour flag
+ _ = cmd.RegisterFlagCompletionFunc("colour", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{"never", "auto", "always"}, cobra.ShellCompDirectiveNoFileComp
+ })
+
+ // Setup shell completion for the namespace flag
+ err := cmd.RegisterFlagCompletionFunc("namespace", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ if client, err := actionConfig.KubernetesClientSet(); err == nil {
+ // Choose a long enough timeout that the user notices something is not working
+ // but short enough that the user is not made to wait very long
+ to := int64(3)
+ cobra.CompDebugln(fmt.Sprintf("About to call kube client for namespaces with timeout of: %d", to), settings.Debug)
+
+ nsNames := []string{}
+ if namespaces, err := client.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{TimeoutSeconds: &to}); err == nil {
+ for _, ns := range namespaces.Items {
+ nsNames = append(nsNames, ns.Name)
+ }
+ return nsNames, cobra.ShellCompDirectiveNoFileComp
+ }
+ }
+ return nil, cobra.ShellCompDirectiveDefault
+ })
+
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Setup shell completion for the kube-context flag
+ err = cmd.RegisterFlagCompletionFunc("kube-context", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ cobra.CompDebugln("About to get the different kube-contexts", settings.Debug)
+
+ loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
+ if len(settings.KubeConfig) > 0 {
+ loadingRules = &clientcmd.ClientConfigLoadingRules{ExplicitPath: settings.KubeConfig}
+ }
+ if config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
+ loadingRules,
+ &clientcmd.ConfigOverrides{}).RawConfig(); err == nil {
+ comps := []string{}
+ for name, context := range config.Contexts {
+ comps = append(comps, fmt.Sprintf("%s\t%s", name, context.Cluster))
+ }
+ return comps, cobra.ShellCompDirectiveNoFileComp
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ })
+
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ registryClient, err := newDefaultRegistryClient(false, "", "")
+ if err != nil {
+ return nil, err
+ }
+ actionConfig.RegistryClient = registryClient
+
+ // Add subcommands
+ cmd.AddCommand(
+ // chart commands
+ newCreateCmd(out),
+ newDependencyCmd(actionConfig, out),
+ newPullCmd(actionConfig, out),
+ newShowCmd(actionConfig, out),
+ newLintCmd(out),
+ newPackageCmd(out),
+ newRepoCmd(out),
+ newSearchCmd(out),
+ newVerifyCmd(out),
+
+ // release commands
+ newGetCmd(actionConfig, out),
+ newHistoryCmd(actionConfig, out),
+ newInstallCmd(actionConfig, out),
+ newListCmd(actionConfig, out),
+ newReleaseTestCmd(actionConfig, out),
+ newRollbackCmd(actionConfig, out),
+ newStatusCmd(actionConfig, out),
+ newTemplateCmd(actionConfig, out),
+ newUninstallCmd(actionConfig, out),
+ newUpgradeCmd(actionConfig, out),
+
+ newCompletionCmd(out),
+ newEnvCmd(out),
+ newPluginCmd(out),
+ newVersionCmd(out),
+
+ // Hidden documentation generator command: 'helm docs'
+ newDocsCmd(out),
+ )
+
+ cmd.AddCommand(
+ newRegistryCmd(actionConfig, out),
+ newPushCmd(actionConfig, out),
+ )
+
+ // Find and add CLI plugins
+ loadCLIPlugins(cmd, out)
+
+ // Check for expired repositories
+ checkForExpiredRepos(settings.RepositoryConfig)
+
+ return cmd, nil
+}
+
+// This function loads releases into the memory storage if the
+// environment variable is properly set.
+func loadReleasesInMemory(actionConfig *action.Configuration) {
+ filePaths := strings.Split(os.Getenv("HELM_MEMORY_DRIVER_DATA"), ":")
+ if len(filePaths) == 0 {
+ return
+ }
+
+ store := actionConfig.Releases
+ mem, ok := store.Driver.(*driver.Memory)
+ if !ok {
+ // For an unexpected reason we are not dealing with the memory storage driver.
+ return
+ }
+
+ actionConfig.KubeClient = &kubefake.PrintingKubeClient{Out: io.Discard}
+
+ for _, path := range filePaths {
+ b, err := os.ReadFile(path)
+ if err != nil {
+ log.Fatal("Unable to read memory driver data", err)
+ }
+
+ releases := []*release.Release{}
+ if err := yaml.Unmarshal(b, &releases); err != nil {
+ log.Fatal("Unable to unmarshal memory driver data: ", err)
+ }
+
+ for _, rel := range releases {
+ if err := store.Create(rel); err != nil {
+ log.Fatal(err)
+ }
+ }
+ }
+ // Must reset namespace to the proper one
+ mem.SetNamespace(settings.Namespace())
+}
+
+// hookOutputWriter provides the writer for writing hook logs.
+func hookOutputWriter(_, _, _ string) io.Writer {
+ return log.Writer()
+}
+
+func checkForExpiredRepos(repofile string) {
+
+ expiredRepos := []struct {
+ name string
+ old string
+ new string
+ }{
+ {
+ name: "stable",
+ old: "kubernetes-charts.storage.googleapis.com",
+ new: "https://charts.helm.sh/stable",
+ },
+ {
+ name: "incubator",
+ old: "kubernetes-charts-incubator.storage.googleapis.com",
+ new: "https://charts.helm.sh/incubator",
+ },
+ }
+
+ // parse repo file.
+ // Ignore the error because it is okay for a repo file to be unparsable at this
+ // stage. Later checks will trap the error and respond accordingly.
+ repoFile, err := repo.LoadFile(repofile)
+ if err != nil {
+ return
+ }
+
+ for _, exp := range expiredRepos {
+ r := repoFile.Get(exp.name)
+ if r == nil {
+ return
+ }
+
+ if url := r.URL; strings.Contains(url, exp.old) {
+ fmt.Fprintf(
+ os.Stderr,
+ "WARNING: %q is deprecated for %q and will be deleted Nov. 13, 2020.\nWARNING: You should switch to %q via:\nWARNING: helm repo add %q %q --force-update\n",
+ exp.old,
+ exp.name,
+ exp.new,
+ exp.name,
+ exp.new,
+ )
+ }
+ }
+
+}
+
+func newRegistryClient(
+ certFile, keyFile, caFile string, insecureSkipTLSVerify, plainHTTP bool, username, password string,
+) (*registry.Client, error) {
+ if certFile != "" && keyFile != "" || caFile != "" || insecureSkipTLSVerify {
+ registryClient, err := newRegistryClientWithTLS(certFile, keyFile, caFile, insecureSkipTLSVerify, username, password)
+ if err != nil {
+ return nil, err
+ }
+ return registryClient, nil
+ }
+ registryClient, err := newDefaultRegistryClient(plainHTTP, username, password)
+ if err != nil {
+ return nil, err
+ }
+ return registryClient, nil
+}
+
+func newDefaultRegistryClient(plainHTTP bool, username, password string) (*registry.Client, error) {
+ opts := []registry.ClientOption{
+ registry.ClientOptDebug(settings.Debug),
+ registry.ClientOptEnableCache(true),
+ registry.ClientOptWriter(os.Stderr),
+ registry.ClientOptCredentialsFile(settings.RegistryConfig),
+ registry.ClientOptBasicAuth(username, password),
+ }
+ if plainHTTP {
+ opts = append(opts, registry.ClientOptPlainHTTP())
+ }
+
+ // Create a new registry client
+ registryClient, err := registry.NewClient(opts...)
+ if err != nil {
+ return nil, err
+ }
+ return registryClient, nil
+}
+
+func newRegistryClientWithTLS(
+ certFile, keyFile, caFile string, insecureSkipTLSVerify bool, username, password string,
+) (*registry.Client, error) {
+ tlsConf, err := tlsutil.NewTLSConfig(
+ tlsutil.WithInsecureSkipVerify(insecureSkipTLSVerify),
+ tlsutil.WithCertKeyPairFiles(certFile, keyFile),
+ tlsutil.WithCAFile(caFile),
+ )
+
+ if err != nil {
+ return nil, fmt.Errorf("can't create TLS config for client: %w", err)
+ }
+
+ // Create a new registry client
+ registryClient, err := registry.NewClient(
+ registry.ClientOptDebug(settings.Debug),
+ registry.ClientOptEnableCache(true),
+ registry.ClientOptWriter(os.Stderr),
+ registry.ClientOptCredentialsFile(settings.RegistryConfig),
+ registry.ClientOptHTTPClient(&http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsConf,
+ Proxy: http.ProxyFromEnvironment,
+ },
+ }),
+ registry.ClientOptBasicAuth(username, password),
+ )
+ if err != nil {
+ return nil, err
+ }
+ return registryClient, nil
+}
+
+type CommandError struct {
+ error
+ ExitCode int
+}
+
+// releaserToV1Release is a helper function to convert a v1 release passed by interface
+// into the type object.
+func releaserToV1Release(rel ri.Releaser) (*release.Release, error) {
+ switch r := rel.(type) {
+ case release.Release:
+ return &r, nil
+ case *release.Release:
+ return r, nil
+ case nil:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unsupported release type: %T", rel)
+ }
+}
+
+func releaseListToV1List(ls []ri.Releaser) ([]*release.Release, error) {
+ rls := make([]*release.Release, 0, len(ls))
+ for _, val := range ls {
+ rel, err := releaserToV1Release(val)
+ if err != nil {
+ return nil, err
+ }
+ rls = append(rls, rel)
+ }
+
+ return rls, nil
+}
diff --git a/helm/pkg/cmd/root_test.go b/helm/pkg/cmd/root_test.go
new file mode 100644
index 000000000..316e6bd2e
--- /dev/null
+++ b/helm/pkg/cmd/root_test.go
@@ -0,0 +1,151 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/helmpath/xdg"
+)
+
+func TestRootCmd(t *testing.T) {
+ defer resetEnv()()
+
+ tests := []struct {
+ name, args, cachePath, configPath, dataPath string
+ envvars map[string]string
+ }{
+ {
+ name: "defaults",
+ args: "env",
+ },
+ {
+ name: "with $XDG_CACHE_HOME set",
+ args: "env",
+ envvars: map[string]string{xdg.CacheHomeEnvVar: "/bar"},
+ cachePath: "/bar/helm",
+ },
+ {
+ name: "with $XDG_CONFIG_HOME set",
+ args: "env",
+ envvars: map[string]string{xdg.ConfigHomeEnvVar: "/bar"},
+ configPath: "/bar/helm",
+ },
+ {
+ name: "with $XDG_DATA_HOME set",
+ args: "env",
+ envvars: map[string]string{xdg.DataHomeEnvVar: "/bar"},
+ dataPath: "/bar/helm",
+ },
+ {
+ name: "with $HELM_CACHE_HOME set",
+ args: "env",
+ envvars: map[string]string{helmpath.CacheHomeEnvVar: "/foo/helm"},
+ cachePath: "/foo/helm",
+ },
+ {
+ name: "with $HELM_CONFIG_HOME set",
+ args: "env",
+ envvars: map[string]string{helmpath.ConfigHomeEnvVar: "/foo/helm"},
+ configPath: "/foo/helm",
+ },
+ {
+ name: "with $HELM_DATA_HOME set",
+ args: "env",
+ envvars: map[string]string{helmpath.DataHomeEnvVar: "/foo/helm"},
+ dataPath: "/foo/helm",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ensure.HelmHome(t)
+
+ for k, v := range tt.envvars {
+ t.Setenv(k, v)
+ }
+
+ if _, _, err := executeActionCommand(tt.args); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // NOTE(bacongobbler): we need to check here after calling ensure.HelmHome so we
+ // load the proper paths after XDG_*_HOME is set
+ if tt.cachePath == "" {
+ tt.cachePath = filepath.Join(os.Getenv(xdg.CacheHomeEnvVar), "helm")
+ }
+
+ if tt.configPath == "" {
+ tt.configPath = filepath.Join(os.Getenv(xdg.ConfigHomeEnvVar), "helm")
+ }
+
+ if tt.dataPath == "" {
+ tt.dataPath = filepath.Join(os.Getenv(xdg.DataHomeEnvVar), "helm")
+ }
+
+ if helmpath.CachePath() != tt.cachePath {
+ t.Errorf("expected cache path %q, got %q", tt.cachePath, helmpath.CachePath())
+ }
+ if helmpath.ConfigPath() != tt.configPath {
+ t.Errorf("expected config path %q, got %q", tt.configPath, helmpath.ConfigPath())
+ }
+ if helmpath.DataPath() != tt.dataPath {
+ t.Errorf("expected data path %q, got %q", tt.dataPath, helmpath.DataPath())
+ }
+ })
+ }
+}
+
+func TestUnknownSubCmd(t *testing.T) {
+ _, _, err := executeActionCommand("foobar")
+
+ if err == nil || err.Error() != `unknown command "foobar" for "helm"` {
+ t.Errorf("Expect unknown command error, got %q", err)
+ }
+}
+
+// Need the release of Cobra following 1.0 to be able to disable
+// file completion on the root command. Until then, we cannot
+// because it would break 'helm help '
+//
+// func TestRootFileCompletion(t *testing.T) {
+// checkFileCompletion(t, "", false)
+// }
+
+func TestRootCmdLogger(t *testing.T) {
+ args := []string{}
+ buf := new(bytes.Buffer)
+ actionConfig := action.NewConfiguration()
+ _, err := newRootCmdWithConfig(actionConfig, buf, args, SetupLogging)
+ if err != nil {
+ t.Errorf("expected no error, got: '%v'", err)
+ }
+
+ l1 := actionConfig.Logger()
+ l2 := slog.Default()
+
+ if l1.Handler() != l2.Handler() {
+ t.Error("expected actionConfig logger to be the slog default logger")
+ }
+}
diff --git a/helm/pkg/cmd/search.go b/helm/pkg/cmd/search.go
new file mode 100644
index 000000000..4d110286d
--- /dev/null
+++ b/helm/pkg/cmd/search.go
@@ -0,0 +1,43 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "io"
+
+ "github.com/spf13/cobra"
+)
+
+const searchDesc = `
+Search provides the ability to search for Helm charts in the various places
+they can be stored including the Artifact Hub and repositories you have added.
+Use search subcommands to search different locations for charts.
+`
+
+func newSearchCmd(out io.Writer) *cobra.Command {
+
+ cmd := &cobra.Command{
+ Use: "search [keyword]",
+ Short: "search for a keyword in charts",
+ Long: searchDesc,
+ }
+
+ cmd.AddCommand(newSearchHubCmd(out))
+ cmd.AddCommand(newSearchRepoCmd(out))
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/search/search.go b/helm/pkg/cmd/search/search.go
new file mode 100644
index 000000000..1c7bb1d06
--- /dev/null
+++ b/helm/pkg/cmd/search/search.go
@@ -0,0 +1,227 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package search provides client-side repository searching.
+
+This supports building an in-memory search index based on the contents of
+multiple repositories, and then using string matching or regular expressions
+to find matches.
+*/
+package search
+
+import (
+ "path"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/Masterminds/semver/v3"
+
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+// Result is a search result.
+//
+// Score indicates how close it is to match. The higher the score, the longer
+// the distance.
+type Result struct {
+ Name string
+ Score int
+ Chart *repo.ChartVersion
+}
+
+// Index is a searchable index of chart information.
+type Index struct {
+ lines map[string]string
+ charts map[string]*repo.ChartVersion
+}
+
+const sep = "\v"
+
+// NewIndex creates a new Index.
+func NewIndex() *Index {
+ return &Index{lines: map[string]string{}, charts: map[string]*repo.ChartVersion{}}
+}
+
+// verSep is a separator for version fields in map keys.
+const verSep = "$$"
+
+// AddRepo adds a repository index to the search index.
+func (i *Index) AddRepo(rname string, ind *repo.IndexFile, all bool) {
+ ind.SortEntries()
+ for name, ref := range ind.Entries {
+ if len(ref) == 0 {
+ // Skip chart names that have zero releases.
+ continue
+ }
+ // By convention, an index file is supposed to have the newest at the
+ // 0 slot, so our best bet is to grab the 0 entry and build the index
+ // entry off of that.
+ // Note: Do not use filePath.Join since on Windows it will return \
+ // which results in a repo name that cannot be understood.
+ fname := path.Join(rname, name)
+ if !all {
+ i.lines[fname] = indstr(rname, ref[0])
+ i.charts[fname] = ref[0]
+ continue
+ }
+
+ // If 'all' is set, then we go through all of the refs, and add them all
+ // to the index. This will generate a lot of near-duplicate entries.
+ for _, rr := range ref {
+ versionedName := fname + verSep + rr.Version
+ i.lines[versionedName] = indstr(rname, rr)
+ i.charts[versionedName] = rr
+ }
+ }
+}
+
+// All returns all charts in the index as if they were search results.
+//
+// Each will be given a score of 0.
+func (i *Index) All() []*Result {
+ res := make([]*Result, len(i.charts))
+ j := 0
+ for name, ch := range i.charts {
+ parts := strings.Split(name, verSep)
+ res[j] = &Result{
+ Name: parts[0],
+ Chart: ch,
+ }
+ j++
+ }
+ return res
+}
+
+// Search searches an index for the given term.
+//
+// Threshold indicates the maximum score a term may have before being marked
+// irrelevant. (Low score means higher relevance. Golf, not bowling.)
+//
+// If regexp is true, the term is treated as a regular expression. Otherwise,
+// term is treated as a literal string.
+func (i *Index) Search(term string, threshold int, regexp bool) ([]*Result, error) {
+ if regexp {
+ return i.SearchRegexp(term, threshold)
+ }
+ return i.SearchLiteral(term, threshold), nil
+}
+
+// calcScore calculates a score for a match.
+func (i *Index) calcScore(index int, matchline string) int {
+
+ // This is currently tied to the fact that sep is a single char.
+ splits := []int{}
+ s := rune(sep[0])
+ for i, ch := range matchline {
+ if ch == s {
+ splits = append(splits, i)
+ }
+ }
+
+ for i, pos := range splits {
+ if index > pos {
+ continue
+ }
+ return i
+ }
+ return len(splits)
+}
+
+// SearchLiteral does a literal string search (no regexp).
+func (i *Index) SearchLiteral(term string, threshold int) []*Result {
+ term = strings.ToLower(term)
+ buf := []*Result{}
+ for k, v := range i.lines {
+ lv := strings.ToLower(v)
+ res := strings.Index(lv, term)
+ if score := i.calcScore(res, lv); res != -1 && score < threshold {
+ parts := strings.Split(k, verSep) // Remove version, if it is there.
+ buf = append(buf, &Result{Name: parts[0], Score: score, Chart: i.charts[k]})
+ }
+ }
+ return buf
+}
+
+// SearchRegexp searches using a regular expression.
+func (i *Index) SearchRegexp(re string, threshold int) ([]*Result, error) {
+ matcher, err := regexp.Compile(re)
+ if err != nil {
+ return []*Result{}, err
+ }
+ buf := []*Result{}
+ for k, v := range i.lines {
+ ind := matcher.FindStringIndex(v)
+ if len(ind) == 0 {
+ continue
+ }
+ if score := i.calcScore(ind[0], v); ind[0] >= 0 && score < threshold {
+ parts := strings.Split(k, verSep) // Remove version, if it is there.
+ buf = append(buf, &Result{Name: parts[0], Score: score, Chart: i.charts[k]})
+ }
+ }
+ return buf, nil
+}
+
+// SortScore does an in-place sort of the results.
+//
+// Lowest scores are highest on the list. Matching scores are subsorted alphabetically.
+func SortScore(r []*Result) {
+ sort.Sort(scoreSorter(r))
+}
+
+// scoreSorter sorts results by score, and subsorts by alpha Name.
+type scoreSorter []*Result
+
+// Len returns the length of this scoreSorter.
+func (s scoreSorter) Len() int { return len(s) }
+
+// Swap performs an in-place swap.
+func (s scoreSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// Less compares a to b, and returns true if a is less than b.
+func (s scoreSorter) Less(a, b int) bool {
+ first := s[a]
+ second := s[b]
+
+ if first.Score > second.Score {
+ return false
+ }
+ if first.Score < second.Score {
+ return true
+ }
+ if first.Name == second.Name {
+ v1, err := semver.NewVersion(first.Chart.Version)
+ if err != nil {
+ return true
+ }
+ v2, err := semver.NewVersion(second.Chart.Version)
+ if err != nil {
+ return true
+ }
+ // Sort so that the newest chart is higher than the oldest chart. This is
+ // the opposite of what you'd expect in a function called Less.
+ return v1.GreaterThan(v2)
+ }
+ return first.Name < second.Name
+}
+
+func indstr(name string, ref *repo.ChartVersion) string {
+ i := ref.Name + sep + name + "/" + ref.Name + sep +
+ ref.Description + sep + strings.Join(ref.Keywords, " ")
+ return i
+}
diff --git a/helm/pkg/cmd/search/search_test.go b/helm/pkg/cmd/search/search_test.go
new file mode 100644
index 000000000..b3220394f
--- /dev/null
+++ b/helm/pkg/cmd/search/search_test.go
@@ -0,0 +1,311 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package search
+
+import (
+ "strings"
+ "testing"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+func TestSortScore(t *testing.T) {
+ in := []*Result{
+ {Name: "bbb", Score: 0, Chart: &repo.ChartVersion{Metadata: &chart.Metadata{Version: "1.2.3"}}},
+ {Name: "aaa", Score: 5},
+ {Name: "abb", Score: 5},
+ {Name: "aab", Score: 0},
+ {Name: "bab", Score: 5},
+ {Name: "ver", Score: 5, Chart: &repo.ChartVersion{Metadata: &chart.Metadata{Version: "1.2.4"}}},
+ {Name: "ver", Score: 5, Chart: &repo.ChartVersion{Metadata: &chart.Metadata{Version: "1.2.3"}}},
+ }
+ expect := []string{"aab", "bbb", "aaa", "abb", "bab", "ver", "ver"}
+ expectScore := []int{0, 0, 5, 5, 5, 5, 5}
+ SortScore(in)
+
+ // Test Score
+ for i := range expectScore {
+ if expectScore[i] != in[i].Score {
+ t.Errorf("Sort error on index %d: expected %d, got %d", i, expectScore[i], in[i].Score)
+ }
+ }
+ // Test Name
+ for i := range expect {
+ if expect[i] != in[i].Name {
+ t.Errorf("Sort error: expected %s, got %s", expect[i], in[i].Name)
+ }
+ }
+
+ // Test version of last two items
+ if in[5].Chart.Version != "1.2.4" {
+ t.Errorf("Expected 1.2.4, got %s", in[5].Chart.Version)
+ }
+ if in[6].Chart.Version != "1.2.3" {
+ t.Error("Expected 1.2.3 to be last")
+ }
+}
+
+var indexfileEntries = map[string]repo.ChartVersions{
+ "niña": {
+ {
+ URLs: []string{"http://example.com/charts/nina-0.1.0.tgz"},
+ Metadata: &chart.Metadata{
+ Name: "niña",
+ Version: "0.1.0",
+ Description: "One boat",
+ },
+ },
+ },
+ "pinta": {
+ {
+ URLs: []string{"http://example.com/charts/pinta-0.1.0.tgz"},
+ Metadata: &chart.Metadata{
+ Name: "pinta",
+ Version: "0.1.0",
+ Description: "Two ship",
+ },
+ },
+ },
+ "santa-maria": {
+ {
+ URLs: []string{"http://example.com/charts/santa-maria-1.2.3.tgz"},
+ Metadata: &chart.Metadata{
+ Name: "santa-maria",
+ Version: "1.2.3",
+ Description: "Three boat",
+ },
+ },
+ {
+ URLs: []string{"http://example.com/charts/santa-maria-1.2.2-rc-1.tgz"},
+ Metadata: &chart.Metadata{
+ Name: "santa-maria",
+ Version: "1.2.2-RC-1",
+ Description: "Three boat",
+ },
+ },
+ },
+}
+
+func loadTestIndex(_ *testing.T, all bool) *Index {
+ i := NewIndex()
+ i.AddRepo("testing", &repo.IndexFile{Entries: indexfileEntries}, all)
+ i.AddRepo("ztesting", &repo.IndexFile{Entries: map[string]repo.ChartVersions{
+ "Pinta": {
+ {
+ URLs: []string{"http://example.com/charts/pinta-2.0.0.tgz"},
+ Metadata: &chart.Metadata{
+ Name: "Pinta",
+ Version: "2.0.0",
+ Description: "Two ship, version two",
+ },
+ },
+ },
+ }}, all)
+ return i
+}
+
+func TestAll(t *testing.T) {
+ i := loadTestIndex(t, false)
+ all := i.All()
+ if len(all) != 4 {
+ t.Errorf("Expected 4 entries, got %d", len(all))
+ }
+
+ i = loadTestIndex(t, true)
+ all = i.All()
+ if len(all) != 5 {
+ t.Errorf("Expected 5 entries, got %d", len(all))
+ }
+}
+
+func TestAddRepo_Sort(t *testing.T) {
+ i := loadTestIndex(t, true)
+ sr, err := i.Search("TESTING/SANTA-MARIA", 100, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ SortScore(sr)
+
+ ch := sr[0]
+ expect := "1.2.3"
+ if ch.Chart.Version != expect {
+ t.Errorf("Expected %q, got %q", expect, ch.Chart.Version)
+ }
+}
+
+func TestSearchByName(t *testing.T) {
+
+ tests := []struct {
+ name string
+ query string
+ expect []*Result
+ regexp bool
+ fail bool
+ failMsg string
+ }{
+ {
+ name: "basic search for one result",
+ query: "santa-maria",
+ expect: []*Result{
+ {Name: "testing/santa-maria"},
+ },
+ },
+ {
+ name: "basic search for two results",
+ query: "pinta",
+ expect: []*Result{
+ {Name: "testing/pinta"},
+ {Name: "ztesting/Pinta"},
+ },
+ },
+ {
+ name: "repo-specific search for one result",
+ query: "ztesting/pinta",
+ expect: []*Result{
+ {Name: "ztesting/Pinta"},
+ },
+ },
+ {
+ name: "partial name search",
+ query: "santa",
+ expect: []*Result{
+ {Name: "testing/santa-maria"},
+ },
+ },
+ {
+ name: "description search, one result",
+ query: "Three",
+ expect: []*Result{
+ {Name: "testing/santa-maria"},
+ },
+ },
+ {
+ name: "description search, two results",
+ query: "two",
+ expect: []*Result{
+ {Name: "testing/pinta"},
+ {Name: "ztesting/Pinta"},
+ },
+ },
+ {
+ name: "search mixedCase and result should be mixedCase too",
+ query: "pinta",
+ expect: []*Result{
+ {Name: "testing/pinta"},
+ {Name: "ztesting/Pinta"},
+ },
+ },
+ {
+ name: "description upper search, two results",
+ query: "TWO",
+ expect: []*Result{
+ {Name: "testing/pinta"},
+ {Name: "ztesting/Pinta"},
+ },
+ },
+ {
+ name: "nothing found",
+ query: "mayflower",
+ expect: []*Result{},
+ },
+ {
+ name: "regexp, one result",
+ query: "Th[ref]*",
+ expect: []*Result{
+ {Name: "testing/santa-maria"},
+ },
+ regexp: true,
+ },
+ {
+ name: "regexp, fail compile",
+ query: "th[",
+ expect: []*Result{},
+ regexp: true,
+ fail: true,
+ failMsg: "error parsing regexp:",
+ },
+ }
+
+ i := loadTestIndex(t, false)
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+
+ charts, err := i.Search(tt.query, 100, tt.regexp)
+ if err != nil {
+ if tt.fail {
+ if !strings.Contains(err.Error(), tt.failMsg) {
+ t.Fatalf("Unexpected error message: %s", err)
+ }
+ return
+ }
+ t.Fatalf("%s: %s", tt.name, err)
+ }
+ // Give us predictably ordered results.
+ SortScore(charts)
+
+ l := len(charts)
+ if l != len(tt.expect) {
+ t.Fatalf("Expected %d result, got %d", len(tt.expect), l)
+ }
+ // For empty result sets, just keep going.
+ if l == 0 {
+ return
+ }
+
+ for i, got := range charts {
+ ex := tt.expect[i]
+ if got.Name != ex.Name {
+ t.Errorf("[%d]: Expected name %q, got %q", i, ex.Name, got.Name)
+ }
+ }
+
+ })
+ }
+}
+
+func TestSearchByNameAll(t *testing.T) {
+ // Test with the All bit turned on.
+ i := loadTestIndex(t, true)
+ cs, err := i.Search("santa-maria", 100, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(cs) != 2 {
+ t.Errorf("expected 2 charts, got %d", len(cs))
+ }
+}
+
+func TestCalcScore(t *testing.T) {
+ i := NewIndex()
+
+ fields := []string{"aaa", "bbb", "ccc", "ddd"}
+ matchline := strings.Join(fields, sep)
+ if r := i.calcScore(2, matchline); r != 0 {
+ t.Errorf("Expected 0, got %d", r)
+ }
+ if r := i.calcScore(5, matchline); r != 1 {
+ t.Errorf("Expected 1, got %d", r)
+ }
+ if r := i.calcScore(10, matchline); r != 2 {
+ t.Errorf("Expected 2, got %d", r)
+ }
+ if r := i.calcScore(14, matchline); r != 3 {
+ t.Errorf("Expected 3, got %d", r)
+ }
+}
diff --git a/helm/pkg/cmd/search_hub.go b/helm/pkg/cmd/search_hub.go
new file mode 100644
index 000000000..bb2ff6038
--- /dev/null
+++ b/helm/pkg/cmd/search_hub.go
@@ -0,0 +1,199 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "log/slog"
+ "strings"
+
+ "github.com/gosuri/uitable"
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/internal/monocular"
+ "helm.sh/helm/v4/pkg/cli/output"
+)
+
+const searchHubDesc = `
+Search for Helm charts in the Artifact Hub or your own hub instance.
+
+Artifact Hub is a web-based application that enables finding, installing, and
+publishing packages and configurations for CNCF projects, including publicly
+available distributed charts Helm charts. It is a Cloud Native Computing
+Foundation sandbox project. You can browse the hub at https://artifacthub.io/
+
+The [KEYWORD] argument accepts either a keyword string, or quoted string of rich
+query options. For rich query options documentation, see
+https://artifacthub.github.io/hub/api/?urls.primaryName=Monocular%20compatible%20search%20API#/Monocular/get_api_chartsvc_v1_charts_search
+
+Previous versions of Helm used an instance of Monocular as the default
+'endpoint', so for backwards compatibility Artifact Hub is compatible with the
+Monocular search API. Similarly, when setting the 'endpoint' flag, the specified
+endpoint must also be implement a Monocular compatible search API endpoint.
+Note that when specifying a Monocular instance as the 'endpoint', rich queries
+are not supported. For API details, see https://github.com/helm/monocular
+`
+
+type searchHubOptions struct {
+ searchEndpoint string
+ maxColWidth uint
+ outputFormat output.Format
+ listRepoURL bool
+ failOnNoResult bool
+}
+
+func newSearchHubCmd(out io.Writer) *cobra.Command {
+ o := &searchHubOptions{}
+
+ cmd := &cobra.Command{
+ Use: "hub [KEYWORD]",
+ Short: "search for charts in the Artifact Hub or your own hub instance",
+ Long: searchHubDesc,
+ RunE: func(_ *cobra.Command, args []string) error {
+ return o.run(out, args)
+ },
+ }
+
+ f := cmd.Flags()
+ f.StringVar(&o.searchEndpoint, "endpoint", "https://hub.helm.sh", "Hub instance to query for charts")
+ f.UintVar(&o.maxColWidth, "max-col-width", 50, "maximum column width for output table")
+ f.BoolVar(&o.listRepoURL, "list-repo-url", false, "print charts repository URL")
+ f.BoolVar(&o.failOnNoResult, "fail-on-no-result", false, "search fails if no results are found")
+
+ bindOutputFlag(cmd, &o.outputFormat)
+
+ return cmd
+}
+
+func (o *searchHubOptions) run(out io.Writer, args []string) error {
+ c, err := monocular.New(o.searchEndpoint)
+ if err != nil {
+ return fmt.Errorf("unable to create connection to %q: %w", o.searchEndpoint, err)
+ }
+
+ q := strings.Join(args, " ")
+ results, err := c.Search(q)
+ if err != nil {
+ slog.Debug("search failed", slog.Any("error", err))
+ return fmt.Errorf("unable to perform search against %q", o.searchEndpoint)
+ }
+
+ return o.outputFormat.Write(out, newHubSearchWriter(results, o.searchEndpoint, o.maxColWidth, o.listRepoURL, o.failOnNoResult))
+}
+
+type hubChartRepo struct {
+ URL string `json:"url"`
+ Name string `json:"name"`
+}
+
+type hubChartElement struct {
+ URL string `json:"url"`
+ Version string `json:"version"`
+ AppVersion string `json:"app_version"`
+ Description string `json:"description"`
+ Repository hubChartRepo `json:"repository"`
+}
+
+type hubSearchWriter struct {
+ elements []hubChartElement
+ columnWidth uint
+ listRepoURL bool
+ failOnNoResult bool
+}
+
+func newHubSearchWriter(results []monocular.SearchResult, endpoint string, columnWidth uint, listRepoURL, failOnNoResult bool) *hubSearchWriter {
+ var elements []hubChartElement
+ for _, r := range results {
+ // Backwards compatibility for Monocular
+ url := endpoint + "/charts/" + r.ID
+
+ // Check for artifactHub compatibility
+ if r.ArtifactHub.PackageURL != "" {
+ url = r.ArtifactHub.PackageURL
+ }
+
+ elements = append(elements, hubChartElement{url, r.Relationships.LatestChartVersion.Data.Version, r.Relationships.LatestChartVersion.Data.AppVersion, r.Attributes.Description, hubChartRepo{URL: r.Attributes.Repo.URL, Name: r.Attributes.Repo.Name}})
+ }
+ return &hubSearchWriter{elements, columnWidth, listRepoURL, failOnNoResult}
+}
+
+func (h *hubSearchWriter) WriteTable(out io.Writer) error {
+ if len(h.elements) == 0 {
+ // Fail if no results found and --fail-on-no-result is enabled
+ if h.failOnNoResult {
+ return fmt.Errorf("no results found")
+ }
+
+ _, err := out.Write([]byte("No results found\n"))
+ if err != nil {
+ return fmt.Errorf("unable to write results: %s", err)
+ }
+ return nil
+ }
+ table := uitable.New()
+ table.MaxColWidth = h.columnWidth
+
+ if h.listRepoURL {
+ table.AddRow("URL", "CHART VERSION", "APP VERSION", "DESCRIPTION", "REPO URL")
+ } else {
+ table.AddRow("URL", "CHART VERSION", "APP VERSION", "DESCRIPTION")
+ }
+
+ for _, r := range h.elements {
+ if h.listRepoURL {
+ table.AddRow(r.URL, r.Version, r.AppVersion, r.Description, r.Repository.URL)
+ } else {
+ table.AddRow(r.URL, r.Version, r.AppVersion, r.Description)
+ }
+ }
+ return output.EncodeTable(out, table)
+}
+
+func (h *hubSearchWriter) WriteJSON(out io.Writer) error {
+ return h.encodeByFormat(out, output.JSON)
+}
+
+func (h *hubSearchWriter) WriteYAML(out io.Writer) error {
+ return h.encodeByFormat(out, output.YAML)
+}
+
+func (h *hubSearchWriter) encodeByFormat(out io.Writer, format output.Format) error {
+ // Fail if no results found and --fail-on-no-result is enabled
+ if len(h.elements) == 0 && h.failOnNoResult {
+ return fmt.Errorf("no results found")
+ }
+
+ // Initialize the array so no results returns an empty array instead of null
+ chartList := make([]hubChartElement, 0, len(h.elements))
+
+ for _, r := range h.elements {
+ chartList = append(chartList, hubChartElement{r.URL, r.Version, r.AppVersion, r.Description, r.Repository})
+ }
+
+ switch format {
+ case output.JSON:
+ return output.EncodeJSON(out, chartList)
+ case output.YAML:
+ return output.EncodeYAML(out, chartList)
+ default:
+ // Because this is a non-exported function and only called internally by
+ // WriteJSON and WriteYAML, we shouldn't get invalid types
+ return nil
+ }
+
+}
diff --git a/helm/pkg/cmd/search_hub_test.go b/helm/pkg/cmd/search_hub_test.go
new file mode 100644
index 000000000..8e056f771
--- /dev/null
+++ b/helm/pkg/cmd/search_hub_test.go
@@ -0,0 +1,187 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestSearchHubCmd(t *testing.T) {
+
+ // Setup a mock search service
+ var searchResult = `{"data":[{"id":"stable/phpmyadmin","type":"chart","attributes":{"name":"phpmyadmin","repo":{"name":"stable","url":"https://charts.helm.sh/stable"},"description":"phpMyAdmin is an mysql administration frontend","home":"https://www.phpmyadmin.net/","keywords":["mariadb","mysql","phpmyadmin"],"maintainers":[{"name":"Bitnami","email":"containers@bitnami.com"}],"sources":["https://github.com/bitnami/bitnami-docker-phpmyadmin"],"icon":""},"links":{"self":"/v1/charts/stable/phpmyadmin"},"relationships":{"latestChartVersion":{"data":{"version":"3.0.0","app_version":"4.9.0-1","created":"2019-08-08T17:57:31.38Z","digest":"119c499251bffd4b06ff0cd5ac98c2ce32231f84899fb4825be6c2d90971c742","urls":["https://charts.helm.sh/stable/phpmyadmin-3.0.0.tgz"],"readme":"/v1/assets/stable/phpmyadmin/versions/3.0.0/README.md","values":"/v1/assets/stable/phpmyadmin/versions/3.0.0/values.yaml"},"links":{"self":"/v1/charts/stable/phpmyadmin/versions/3.0.0"}}}},{"id":"bitnami/phpmyadmin","type":"chart","attributes":{"name":"phpmyadmin","repo":{"name":"bitnami","url":"https://charts.bitnami.com"},"description":"phpMyAdmin is an mysql administration frontend","home":"https://www.phpmyadmin.net/","keywords":["mariadb","mysql","phpmyadmin"],"maintainers":[{"name":"Bitnami","email":"containers@bitnami.com"}],"sources":["https://github.com/bitnami/bitnami-docker-phpmyadmin"],"icon":""},"links":{"self":"/v1/charts/bitnami/phpmyadmin"},"relationships":{"latestChartVersion":{"data":{"version":"3.0.0","app_version":"4.9.0-1","created":"2019-08-08T18:34:13.341Z","digest":"66d77cf6d8c2b52c488d0a294cd4996bd5bad8dc41d3829c394498fb401c008a","urls":["https://charts.bitnami.com/bitnami/phpmyadmin-3.0.0.tgz"],"readme":"/v1/assets/bitnami/phpmyadmin/versions/3.0.0/README.md","values":"/v1/assets/bitnami/phpmyadmin/versions/3.0.0/values.yaml"},"links":{"self":"/v1/charts/bitnami/phpmyadmin/versions/3.0.0"}}}}]}`
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ fmt.Fprintln(w, searchResult)
+ }))
+ defer ts.Close()
+
+ // The expected output has the URL to the mocked search service in it
+ // Trailing spaces are necessary to preserve in "expected" as the uitable package adds
+ // them during printing.
+ var expected = fmt.Sprintf(`URL CHART VERSION APP VERSION DESCRIPTION
+%s/charts/stable/phpmyadmin 3.0.0 4.9.0-1 phpMyAdmin is an mysql administration frontend
+%s/charts/bitnami/phpmyadmin 3.0.0 4.9.0-1 phpMyAdmin is an mysql administration frontend
+`, ts.URL, ts.URL)
+
+ testcmd := "search hub --endpoint " + ts.URL + " maria"
+ storage := storageFixture()
+ _, out, err := executeActionCommandC(storage, testcmd)
+ if err != nil {
+ t.Errorf("unexpected error, %s", err)
+ }
+ if out != expected {
+ t.Error("expected and actual output did not match")
+ t.Log(out)
+ t.Log(expected)
+ }
+}
+
+func TestSearchHubListRepoCmd(t *testing.T) {
+
+ // Setup a mock search service
+ var searchResult = `{"data":[{"id":"stable/phpmyadmin","type":"chart","attributes":{"name":"phpmyadmin","repo":{"name":"stable","url":"https://charts.helm.sh/stable"},"description":"phpMyAdmin is an mysql administration frontend","home":"https://www.phpmyadmin.net/","keywords":["mariadb","mysql","phpmyadmin"],"maintainers":[{"name":"Bitnami","email":"containers@bitnami.com"}],"sources":["https://github.com/bitnami/bitnami-docker-phpmyadmin"],"icon":""},"links":{"self":"/v1/charts/stable/phpmyadmin"},"relationships":{"latestChartVersion":{"data":{"version":"3.0.0","app_version":"4.9.0-1","created":"2019-08-08T17:57:31.38Z","digest":"119c499251bffd4b06ff0cd5ac98c2ce32231f84899fb4825be6c2d90971c742","urls":["https://charts.helm.sh/stable/phpmyadmin-3.0.0.tgz"],"readme":"/v1/assets/stable/phpmyadmin/versions/3.0.0/README.md","values":"/v1/assets/stable/phpmyadmin/versions/3.0.0/values.yaml"},"links":{"self":"/v1/charts/stable/phpmyadmin/versions/3.0.0"}}}},{"id":"bitnami/phpmyadmin","type":"chart","attributes":{"name":"phpmyadmin","repo":{"name":"bitnami","url":"https://charts.bitnami.com"},"description":"phpMyAdmin is an mysql administration frontend","home":"https://www.phpmyadmin.net/","keywords":["mariadb","mysql","phpmyadmin"],"maintainers":[{"name":"Bitnami","email":"containers@bitnami.com"}],"sources":["https://github.com/bitnami/bitnami-docker-phpmyadmin"],"icon":""},"links":{"self":"/v1/charts/bitnami/phpmyadmin"},"relationships":{"latestChartVersion":{"data":{"version":"3.0.0","app_version":"4.9.0-1","created":"2019-08-08T18:34:13.341Z","digest":"66d77cf6d8c2b52c488d0a294cd4996bd5bad8dc41d3829c394498fb401c008a","urls":["https://charts.bitnami.com/bitnami/phpmyadmin-3.0.0.tgz"],"readme":"/v1/assets/bitnami/phpmyadmin/versions/3.0.0/README.md","values":"/v1/assets/bitnami/phpmyadmin/versions/3.0.0/values.yaml"},"links":{"self":"/v1/charts/bitnami/phpmyadmin/versions/3.0.0"}}}}]}`
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ fmt.Fprintln(w, searchResult)
+ }))
+ defer ts.Close()
+
+ // The expected output has the URL to the mocked search service in it
+ // Trailing spaces are necessary to preserve in "expected" as the uitable package adds
+ // them during printing.
+ var expected = fmt.Sprintf(`URL CHART VERSION APP VERSION DESCRIPTION REPO URL
+%s/charts/stable/phpmyadmin 3.0.0 4.9.0-1 phpMyAdmin is an mysql administration frontend https://charts.helm.sh/stable
+%s/charts/bitnami/phpmyadmin 3.0.0 4.9.0-1 phpMyAdmin is an mysql administration frontend https://charts.bitnami.com
+`, ts.URL, ts.URL)
+
+ testcmd := "search hub --list-repo-url --endpoint " + ts.URL + " maria"
+ storage := storageFixture()
+ _, out, err := executeActionCommandC(storage, testcmd)
+ if err != nil {
+ t.Errorf("unexpected error, %s", err)
+ }
+ if out != expected {
+ t.Error("expected and actual output did not match")
+ t.Log(out)
+ t.Log(expected)
+ }
+}
+
+func TestSearchHubOutputCompletion(t *testing.T) {
+ outputFlagCompletionTest(t, "search hub")
+}
+
+func TestSearchHubFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "search hub", true) // File completion may be useful when inputting a keyword
+}
+
+func TestSearchHubCmd_FailOnNoResponseTests(t *testing.T) {
+ var (
+ searchResult = `{"data":[]}`
+ noResultFoundErr = "Error: no results found\n"
+ noResultFoundWarn = "No results found\n"
+ noResultFoundWarnInList = "[]\n"
+ )
+
+ type testCase struct {
+ name string
+ cmd string
+ response string
+ expected string
+ wantErr bool
+ }
+
+ var tests = []testCase{
+ {
+ name: "Search hub with no results in response",
+ cmd: `search hub maria`,
+ response: searchResult,
+ expected: noResultFoundWarn,
+ wantErr: false,
+ },
+ {
+ name: "Search hub with no results in response and output JSON",
+ cmd: `search hub maria --output json`,
+ response: searchResult,
+ expected: noResultFoundWarnInList,
+ wantErr: false,
+ },
+ {
+ name: "Search hub with no results in response and output YAML",
+ cmd: `search hub maria --output yaml`,
+ response: searchResult,
+ expected: noResultFoundWarnInList,
+ wantErr: false,
+ },
+ {
+ name: "Search hub with no results in response and --fail-on-no-result enabled, expected failure",
+ cmd: `search hub maria --fail-on-no-result`,
+ response: searchResult,
+ expected: noResultFoundErr,
+ wantErr: true,
+ },
+ {
+ name: "Search hub with no results in response, output JSON and --fail-on-no-result enabled, expected failure",
+ cmd: `search hub maria --fail-on-no-result --output json`,
+ response: searchResult,
+ expected: noResultFoundErr,
+ wantErr: true,
+ },
+ {
+ name: "Search hub with no results in response, output YAML and --fail-on-no-result enabled, expected failure",
+ cmd: `search hub maria --fail-on-no-result --output yaml`,
+ response: searchResult,
+ expected: noResultFoundErr,
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Setup a mock search service
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ fmt.Fprintln(w, tt.response)
+ }))
+ defer ts.Close()
+
+ // Add mock server URL to command
+ tt.cmd += " --endpoint " + ts.URL
+
+ storage := storageFixture()
+
+ _, out, err := executeActionCommandC(storage, tt.cmd)
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("expected error due to no record in response, got nil")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("unexpected error, got %q", err)
+ }
+ }
+
+ if out != tt.expected {
+ t.Errorf("expected and actual output did not match\n"+
+ "expected: %q\n"+
+ "actual : %q",
+ tt.expected, out)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/cmd/search_repo.go b/helm/pkg/cmd/search_repo.go
new file mode 100644
index 000000000..febb138e2
--- /dev/null
+++ b/helm/pkg/cmd/search_repo.go
@@ -0,0 +1,396 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/gosuri/uitable"
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cmd/search"
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+const searchRepoDesc = `
+Search reads through all of the repositories configured on the system, and
+looks for matches. Search of these repositories uses the metadata stored on
+the system.
+
+It will display the latest stable versions of the charts found. If you
+specify the --devel flag, the output will include pre-release versions.
+If you want to search using a version constraint, use --version.
+
+Examples:
+
+ # Search for stable release versions matching the keyword "nginx"
+ $ helm search repo nginx
+
+ # Search for release versions matching the keyword "nginx", including pre-release versions
+ $ helm search repo nginx --devel
+
+ # Search for the latest stable release for nginx-ingress with a major version of 1
+ $ helm search repo nginx-ingress --version ^1.0.0
+
+Repositories are managed with 'helm repo' commands.
+`
+
+// searchMaxScore suggests that any score higher than this is not considered a match.
+const searchMaxScore = 25
+
+type searchRepoOptions struct {
+ versions bool
+ regexp bool
+ devel bool
+ version string
+ maxColWidth uint
+ repoFile string
+ repoCacheDir string
+ outputFormat output.Format
+ failOnNoResult bool
+}
+
+func newSearchRepoCmd(out io.Writer) *cobra.Command {
+ o := &searchRepoOptions{}
+
+ cmd := &cobra.Command{
+ Use: "repo [keyword]",
+ Short: "search repositories for a keyword in charts",
+ Long: searchRepoDesc,
+ RunE: func(_ *cobra.Command, args []string) error {
+ o.repoFile = settings.RepositoryConfig
+ o.repoCacheDir = settings.RepositoryCache
+ return o.run(out, args)
+ },
+ }
+
+ f := cmd.Flags()
+ f.BoolVarP(&o.regexp, "regexp", "r", false, "use regular expressions for searching repositories you have added")
+ f.BoolVarP(&o.versions, "versions", "l", false, "show the long listing, with each version of each chart on its own line, for repositories you have added")
+ f.BoolVar(&o.devel, "devel", false, "use development versions (alpha, beta, and release candidate releases), too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored")
+ f.StringVar(&o.version, "version", "", "search using semantic versioning constraints on repositories you have added")
+ f.UintVar(&o.maxColWidth, "max-col-width", 50, "maximum column width for output table")
+ f.BoolVar(&o.failOnNoResult, "fail-on-no-result", false, "search fails if no results are found")
+
+ bindOutputFlag(cmd, &o.outputFormat)
+
+ return cmd
+}
+
+func (o *searchRepoOptions) run(out io.Writer, args []string) error {
+ o.setupSearchedVersion()
+
+ index, err := o.buildIndex()
+ if err != nil {
+ return err
+ }
+
+ var res []*search.Result
+ if len(args) == 0 {
+ res = index.All()
+ } else {
+ q := strings.Join(args, " ")
+ res, err = index.Search(q, searchMaxScore, o.regexp)
+ if err != nil {
+ return err
+ }
+ }
+
+ search.SortScore(res)
+ data, err := o.applyConstraint(res)
+ if err != nil {
+ return err
+ }
+
+ return o.outputFormat.Write(out, &repoSearchWriter{data, o.maxColWidth, o.failOnNoResult})
+}
+
+func (o *searchRepoOptions) setupSearchedVersion() {
+ slog.Debug("original chart version", "version", o.version)
+
+ if o.version != "" {
+ return
+ }
+
+ if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases).
+ slog.Debug("setting version to >0.0.0-0")
+ o.version = ">0.0.0-0"
+ } else { // search only for stable releases, prerelease versions will be skipped
+ slog.Debug("setting version to >0.0.0")
+ o.version = ">0.0.0"
+ }
+}
+
+func (o *searchRepoOptions) applyConstraint(res []*search.Result) ([]*search.Result, error) {
+ if o.version == "" {
+ return res, nil
+ }
+
+ constraint, err := semver.NewConstraint(o.version)
+ if err != nil {
+ return res, fmt.Errorf("an invalid version/constraint format: %w", err)
+ }
+
+ data := res[:0]
+ foundNames := map[string]bool{}
+ for _, r := range res {
+ // if not returning all versions and already have found a result,
+ // you're done!
+ if !o.versions && foundNames[r.Name] {
+ continue
+ }
+ v, err := semver.NewVersion(r.Chart.Version)
+ if err != nil {
+ continue
+ }
+ if constraint.Check(v) {
+ data = append(data, r)
+ foundNames[r.Name] = true
+ }
+ }
+
+ return data, nil
+}
+
+func (o *searchRepoOptions) buildIndex() (*search.Index, error) {
+ // Load the repositories.yaml
+ rf, err := repo.LoadFile(o.repoFile)
+ if isNotExist(err) || len(rf.Repositories) == 0 {
+ return nil, errors.New("no repositories configured")
+ }
+
+ i := search.NewIndex()
+ for _, re := range rf.Repositories {
+ n := re.Name
+ f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n))
+ ind, err := repo.LoadIndexFile(f)
+ if err != nil {
+ slog.Warn("repo is corrupt or missing", slog.String("repo", n), slog.Any("error", err))
+ continue
+ }
+
+ i.AddRepo(n, ind, o.versions || len(o.version) > 0)
+ }
+ return i, nil
+}
+
+type repoChartElement struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ AppVersion string `json:"app_version"`
+ Description string `json:"description"`
+}
+
+type repoSearchWriter struct {
+ results []*search.Result
+ columnWidth uint
+ failOnNoResult bool
+}
+
+func (r *repoSearchWriter) WriteTable(out io.Writer) error {
+ if len(r.results) == 0 {
+ // Fail if no results found and --fail-on-no-result is enabled
+ if r.failOnNoResult {
+ return fmt.Errorf("no results found")
+ }
+
+ _, err := out.Write([]byte("No results found\n"))
+ if err != nil {
+ return fmt.Errorf("unable to write results: %s", err)
+ }
+ return nil
+ }
+ table := uitable.New()
+ table.MaxColWidth = r.columnWidth
+ table.AddRow("NAME", "CHART VERSION", "APP VERSION", "DESCRIPTION")
+ for _, r := range r.results {
+ table.AddRow(r.Name, r.Chart.Version, r.Chart.AppVersion, r.Chart.Description)
+ }
+ return output.EncodeTable(out, table)
+}
+
+func (r *repoSearchWriter) WriteJSON(out io.Writer) error {
+ return r.encodeByFormat(out, output.JSON)
+}
+
+func (r *repoSearchWriter) WriteYAML(out io.Writer) error {
+ return r.encodeByFormat(out, output.YAML)
+}
+
+func (r *repoSearchWriter) encodeByFormat(out io.Writer, format output.Format) error {
+ // Fail if no results found and --fail-on-no-result is enabled
+ if len(r.results) == 0 && r.failOnNoResult {
+ return fmt.Errorf("no results found")
+ }
+
+ // Initialize the array so no results returns an empty array instead of null
+ chartList := make([]repoChartElement, 0, len(r.results))
+
+ for _, r := range r.results {
+ chartList = append(chartList, repoChartElement{r.Name, r.Chart.Version, r.Chart.AppVersion, r.Chart.Description})
+ }
+
+ switch format {
+ case output.JSON:
+ return output.EncodeJSON(out, chartList)
+ case output.YAML:
+ return output.EncodeYAML(out, chartList)
+ default:
+ // Because this is a non-exported function and only called internally by
+ // WriteJSON and WriteYAML, we shouldn't get invalid types
+ return nil
+ }
+}
+
+// Provides the list of charts that are part of the specified repo, and that starts with 'prefix'.
+func compListChartsOfRepo(repoName string, prefix string) []string {
+ var charts []string
+
+ path := filepath.Join(settings.RepositoryCache, helmpath.CacheChartsFile(repoName))
+ content, err := os.ReadFile(path)
+ if err == nil {
+ scanner := bufio.NewScanner(bytes.NewReader(content))
+ for scanner.Scan() {
+ fullName := fmt.Sprintf("%s/%s", repoName, scanner.Text())
+ if strings.HasPrefix(fullName, prefix) {
+ charts = append(charts, fullName)
+ }
+ }
+ return charts
+ }
+
+ if isNotExist(err) {
+ // If there is no cached charts file, fallback to the full index file.
+ // This is much slower but can happen after the caching feature is first
+ // installed but before the user does a 'helm repo update' to generate the
+ // first cached charts file.
+ path = filepath.Join(settings.RepositoryCache, helmpath.CacheIndexFile(repoName))
+ if indexFile, err := repo.LoadIndexFile(path); err == nil {
+ for name := range indexFile.Entries {
+ fullName := fmt.Sprintf("%s/%s", repoName, name)
+ if strings.HasPrefix(fullName, prefix) {
+ charts = append(charts, fullName)
+ }
+ }
+ return charts
+ }
+ }
+
+ return []string{}
+}
+
+// Provide dynamic auto-completion for commands that operate on charts (e.g., helm show)
+// When true, the includeFiles argument indicates that completion should include local files (e.g., local charts)
+func compListCharts(toComplete string, includeFiles bool) ([]string, cobra.ShellCompDirective) {
+ cobra.CompDebugln(fmt.Sprintf("compListCharts with toComplete %s", toComplete), settings.Debug)
+
+ noSpace := false
+ noFile := false
+ var completions []string
+
+ // First check completions for repos
+ repos := compListRepos("", nil)
+ for _, repoInfo := range repos {
+ // Split name from description
+ repoInfo := strings.Split(repoInfo, "\t")
+ repo := repoInfo[0]
+ repoDesc := ""
+ if len(repoInfo) > 1 {
+ repoDesc = repoInfo[1]
+ }
+ repoWithSlash := fmt.Sprintf("%s/", repo)
+ if strings.HasPrefix(toComplete, repoWithSlash) {
+ // Must complete with charts within the specified repo.
+ // Don't filter on toComplete to allow for shell fuzzy matching
+ completions = append(completions, compListChartsOfRepo(repo, "")...)
+ noSpace = false
+ break
+ } else if strings.HasPrefix(repo, toComplete) {
+ // Must complete the repo name with the slash, followed by the description
+ completions = append(completions, fmt.Sprintf("%s\t%s", repoWithSlash, repoDesc))
+ noSpace = true
+ }
+ }
+ cobra.CompDebugln(fmt.Sprintf("Completions after repos: %v", completions), settings.Debug)
+
+ // Now handle completions for url prefixes
+ for _, url := range []string{"oci://\tChart OCI prefix", "https://\tChart URL prefix", "http://\tChart URL prefix", "file://\tChart local URL prefix"} {
+ if strings.HasPrefix(toComplete, url) {
+ // The user already put in the full url prefix; we don't have
+ // anything to add, but make sure the shell does not default
+ // to file completion since we could be returning an empty array.
+ noFile = true
+ noSpace = true
+ } else if strings.HasPrefix(url, toComplete) {
+ // We are completing a url prefix
+ completions = append(completions, url)
+ noSpace = true
+ }
+ }
+ cobra.CompDebugln(fmt.Sprintf("Completions after urls: %v", completions), settings.Debug)
+
+ // Finally, provide file completion if we need to.
+ // We only do this if:
+ // 1- There are other completions found (if there are no completions,
+ // the shell will do file completion itself)
+ // 2- If there is some input from the user (or else we will end up
+ // listing the entire content of the current directory which will
+ // be too many choices for the user to find the real repos)
+ if includeFiles && len(completions) > 0 && len(toComplete) > 0 {
+ if files, err := os.ReadDir("."); err == nil {
+ for _, file := range files {
+ if strings.HasPrefix(file.Name(), toComplete) {
+ // We are completing a file prefix
+ completions = append(completions, file.Name())
+ }
+ }
+ }
+ }
+ cobra.CompDebugln(fmt.Sprintf("Completions after files: %v", completions), settings.Debug)
+
+ // If the user didn't provide any input to completion,
+ // we provide a hint that a path can also be used
+ if includeFiles && len(toComplete) == 0 {
+ completions = append(completions, "./\tRelative path prefix to local chart", "/\tAbsolute path prefix to local chart")
+ }
+ cobra.CompDebugln(fmt.Sprintf("Completions after checking empty input: %v", completions), settings.Debug)
+
+ directive := cobra.ShellCompDirectiveDefault
+ if noFile {
+ directive = directive | cobra.ShellCompDirectiveNoFileComp
+ }
+ if noSpace {
+ directive = directive | cobra.ShellCompDirectiveNoSpace
+ }
+ if !includeFiles {
+ // If we should not include files in the completions,
+ // we should disable file completion
+ directive = directive | cobra.ShellCompDirectiveNoFileComp
+ }
+ return completions, directive
+}
diff --git a/helm/pkg/cmd/search_repo_test.go b/helm/pkg/cmd/search_repo_test.go
new file mode 100644
index 000000000..e7f104e05
--- /dev/null
+++ b/helm/pkg/cmd/search_repo_test.go
@@ -0,0 +1,107 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+)
+
+func TestSearchRepositoriesCmd(t *testing.T) {
+ repoFile := "testdata/helmhome/helm/repositories.yaml"
+ repoCache := "testdata/helmhome/helm/repository"
+
+ tests := []cmdTestCase{{
+ name: "search for 'alpine', expect one match with latest stable version",
+ cmd: "search repo alpine",
+ golden: "output/search-multiple-stable-release.txt",
+ }, {
+ name: "search for 'alpine', expect one match with newest development version",
+ cmd: "search repo alpine --devel",
+ golden: "output/search-multiple-devel-release.txt",
+ }, {
+ name: "search for 'alpine' with versions, expect three matches",
+ cmd: "search repo alpine --versions",
+ golden: "output/search-multiple-versions.txt",
+ }, {
+ name: "search for 'alpine' with version constraint, expect one match with version 0.1.0",
+ cmd: "search repo alpine --version '>= 0.1, < 0.2'",
+ golden: "output/search-constraint.txt",
+ }, {
+ name: "search for 'alpine' with version constraint, expect one match with version 0.1.0",
+ cmd: "search repo alpine --versions --version '>= 0.1, < 0.2'",
+ golden: "output/search-versions-constraint.txt",
+ }, {
+ name: "search for 'alpine' with version constraint, expect one match with version 0.2.0",
+ cmd: "search repo alpine --version '>= 0.1'",
+ golden: "output/search-constraint-single.txt",
+ }, {
+ name: "search for 'alpine' with version constraint and --versions, expect two matches",
+ cmd: "search repo alpine --versions --version '>= 0.1'",
+ golden: "output/search-multiple-versions-constraints.txt",
+ }, {
+ name: "search for 'syzygy', expect no matches",
+ cmd: "search repo syzygy",
+ golden: "output/search-not-found.txt",
+ }, {
+ name: "search for 'syzygy' with --fail-on-no-result, expect failure for no results",
+ cmd: "search repo syzygy --fail-on-no-result",
+ golden: "output/search-not-found-error.txt",
+ wantError: true,
+ }, {name: "search for 'syzygy' with json output and --fail-on-no-result, expect failure for no results",
+ cmd: "search repo syzygy --output json --fail-on-no-result",
+ golden: "output/search-not-found-error.txt",
+ wantError: true,
+ }, {
+ name: "search for 'syzygy' with yaml output --fail-on-no-result, expect failure for no results",
+ cmd: "search repo syzygy --output yaml --fail-on-no-result",
+ golden: "output/search-not-found-error.txt",
+ wantError: true,
+ }, {
+ name: "search for 'alp[a-z]+', expect two matches",
+ cmd: "search repo alp[a-z]+ --regexp",
+ golden: "output/search-regex.txt",
+ }, {
+ name: "search for 'alp[', expect failure to compile regexp",
+ cmd: "search repo alp[ --regexp",
+ wantError: true,
+ }, {
+ name: "search for 'maria', expect valid json output",
+ cmd: "search repo maria --output json",
+ golden: "output/search-output-json.txt",
+ }, {
+ name: "search for 'alpine', expect valid yaml output",
+ cmd: "search repo alpine --output yaml",
+ golden: "output/search-output-yaml.txt",
+ }}
+
+ settings.Debug = true
+ defer func() { settings.Debug = false }()
+
+ for i := range tests {
+ tests[i].cmd += " --repository-config " + repoFile
+ tests[i].cmd += " --repository-cache " + repoCache
+ }
+ runTestCmd(t, tests)
+}
+
+func TestSearchRepoOutputCompletion(t *testing.T) {
+ outputFlagCompletionTest(t, "search repo")
+}
+
+func TestSearchRepoFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "search repo", true) // File completion may be useful when inputting a keyword
+}
diff --git a/helm/pkg/cmd/search_test.go b/helm/pkg/cmd/search_test.go
new file mode 100644
index 000000000..a0e5d84cb
--- /dev/null
+++ b/helm/pkg/cmd/search_test.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import "testing"
+
+func TestSearchFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "search", false)
+}
diff --git a/helm/pkg/cmd/show.go b/helm/pkg/cmd/show.go
new file mode 100644
index 000000000..d7249c3fe
--- /dev/null
+++ b/helm/pkg/cmd/show.go
@@ -0,0 +1,236 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "log/slog"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const showDesc = `
+This command consists of multiple subcommands to display information about a chart
+`
+
+const showAllDesc = `
+This command inspects a chart (directory, file, or URL) and displays all its content
+(values.yaml, Chart.yaml, README)
+`
+
+const showValuesDesc = `
+This command inspects a chart (directory, file, or URL) and displays the contents
+of the values.yaml file
+`
+
+const showChartDesc = `
+This command inspects a chart (directory, file, or URL) and displays the contents
+of the Chart.yaml file
+`
+
+const readmeChartDesc = `
+This command inspects a chart (directory, file, or URL) and displays the contents
+of the README file
+`
+
+const showCRDsDesc = `
+This command inspects a chart (directory, file, or URL) and displays the contents
+of the CustomResourceDefinition files
+`
+
+func newShowCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewShow(action.ShowAll, cfg)
+
+ showCommand := &cobra.Command{
+ Use: "show",
+ Short: "show information of a chart",
+ Aliases: []string{"inspect"},
+ Long: showDesc,
+ Args: require.NoArgs,
+ }
+
+ // Function providing dynamic auto-completion
+ validArgsFunc := func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return noMoreArgsComp()
+ }
+ return compListCharts(toComplete, true)
+ }
+
+ all := &cobra.Command{
+ Use: "all [CHART]",
+ Short: "show all information of the chart",
+ Long: showAllDesc,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: validArgsFunc,
+ RunE: func(_ *cobra.Command, args []string) error {
+ client.OutputFormat = action.ShowAll
+ err := addRegistryClient(client)
+ if err != nil {
+ return err
+ }
+ output, err := runShow(args, client)
+ if err != nil {
+ return err
+ }
+ fmt.Fprint(out, output)
+ return nil
+ },
+ }
+
+ valuesSubCmd := &cobra.Command{
+ Use: "values [CHART]",
+ Short: "show the chart's values",
+ Long: showValuesDesc,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: validArgsFunc,
+ RunE: func(_ *cobra.Command, args []string) error {
+ client.OutputFormat = action.ShowValues
+ err := addRegistryClient(client)
+ if err != nil {
+ return err
+ }
+ output, err := runShow(args, client)
+ if err != nil {
+ return err
+ }
+ fmt.Fprint(out, output)
+ return nil
+ },
+ }
+
+ chartSubCmd := &cobra.Command{
+ Use: "chart [CHART]",
+ Short: "show the chart's definition",
+ Long: showChartDesc,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: validArgsFunc,
+ RunE: func(_ *cobra.Command, args []string) error {
+ client.OutputFormat = action.ShowChart
+ err := addRegistryClient(client)
+ if err != nil {
+ return err
+ }
+ output, err := runShow(args, client)
+ if err != nil {
+ return err
+ }
+ fmt.Fprint(out, output)
+ return nil
+ },
+ }
+
+ readmeSubCmd := &cobra.Command{
+ Use: "readme [CHART]",
+ Short: "show the chart's README",
+ Long: readmeChartDesc,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: validArgsFunc,
+ RunE: func(_ *cobra.Command, args []string) error {
+ client.OutputFormat = action.ShowReadme
+ err := addRegistryClient(client)
+ if err != nil {
+ return err
+ }
+ output, err := runShow(args, client)
+ if err != nil {
+ return err
+ }
+ fmt.Fprint(out, output)
+ return nil
+ },
+ }
+
+ crdsSubCmd := &cobra.Command{
+ Use: "crds [CHART]",
+ Short: "show the chart's CRDs",
+ Long: showCRDsDesc,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: validArgsFunc,
+ RunE: func(_ *cobra.Command, args []string) error {
+ client.OutputFormat = action.ShowCRDs
+ err := addRegistryClient(client)
+ if err != nil {
+ return err
+ }
+ output, err := runShow(args, client)
+ if err != nil {
+ return err
+ }
+ fmt.Fprint(out, output)
+ return nil
+ },
+ }
+
+ cmds := []*cobra.Command{all, readmeSubCmd, valuesSubCmd, chartSubCmd, crdsSubCmd}
+ for _, subCmd := range cmds {
+ addShowFlags(subCmd, client)
+ showCommand.AddCommand(subCmd)
+ }
+
+ return showCommand
+}
+
+func addShowFlags(subCmd *cobra.Command, client *action.Show) {
+ f := subCmd.Flags()
+
+ f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored")
+ if subCmd.Name() == "values" {
+ f.StringVar(&client.JSONPathTemplate, "jsonpath", "", "supply a JSONPath expression to filter the output")
+ }
+ addChartPathOptionsFlags(f, &client.ChartPathOptions)
+
+ err := subCmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 1 {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return compVersionFlag(args[0], toComplete)
+ })
+
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func runShow(args []string, client *action.Show) (string, error) {
+ slog.Debug("original chart version", "version", client.Version)
+ if client.Version == "" && client.Devel {
+ slog.Debug("setting version to >0.0.0-0")
+ client.Version = ">0.0.0-0"
+ }
+
+ cp, err := client.LocateChart(args[0], settings)
+ if err != nil {
+ return "", err
+ }
+ return client.Run(cp)
+}
+
+func addRegistryClient(client *action.Show) error {
+ registryClient, err := newRegistryClient(client.CertFile, client.KeyFile, client.CaFile,
+ client.InsecureSkipTLSVerify, client.PlainHTTP, client.Username, client.Password)
+ if err != nil {
+ return fmt.Errorf("missing registry client: %w", err)
+ }
+ client.SetRegistryClient(registryClient)
+ return nil
+}
diff --git a/helm/pkg/cmd/show_test.go b/helm/pkg/cmd/show_test.go
new file mode 100644
index 000000000..ff3671dbc
--- /dev/null
+++ b/helm/pkg/cmd/show_test.go
@@ -0,0 +1,158 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
+)
+
+func TestShowPreReleaseChart(t *testing.T) {
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/testcharts/*.tgz*"),
+ )
+ defer srv.Stop()
+
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ tests := []struct {
+ name string
+ args string
+ flags string
+ fail bool
+ expectedErr string
+ }{
+ {
+ name: "show pre-release chart",
+ args: "test/pre-release-chart",
+ fail: true,
+ expectedErr: "chart \"pre-release-chart\" matching not found in test index. (try 'helm repo update'): no chart version found for pre-release-chart-",
+ },
+ {
+ name: "show pre-release chart",
+ args: "test/pre-release-chart",
+ fail: true,
+ flags: "--version 1.0.0",
+ expectedErr: "chart \"pre-release-chart\" matching 1.0.0 not found in test index. (try 'helm repo update'): no chart version found for pre-release-chart-1.0.0",
+ },
+ {
+ name: "show pre-release chart with 'devel' flag",
+ args: "test/pre-release-chart",
+ flags: "--devel",
+ fail: false,
+ },
+ }
+
+ contentTmp := t.TempDir()
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ outdir := srv.Root()
+ cmd := fmt.Sprintf("show all '%s' %s --repository-config %s --repository-cache %s --content-cache %s",
+ tt.args,
+ tt.flags,
+ filepath.Join(outdir, "repositories.yaml"),
+ outdir,
+ contentTmp,
+ )
+ //_, out, err := executeActionCommand(cmd)
+ _, _, err := executeActionCommand(cmd)
+ if err != nil {
+ if tt.fail {
+ if !strings.Contains(err.Error(), tt.expectedErr) {
+ t.Errorf("%q expected error: %s, got: %s", tt.name, tt.expectedErr, err.Error())
+ }
+ return
+ }
+ t.Errorf("%q reported error: %s", tt.name, err)
+ }
+ })
+ }
+}
+
+func TestShowVersionCompletion(t *testing.T) {
+ repoFile := "testdata/helmhome/helm/repositories.yaml"
+ repoCache := "testdata/helmhome/helm/repository"
+
+ repoSetup := fmt.Sprintf("--repository-config %s --repository-cache %s", repoFile, repoCache)
+
+ tests := []cmdTestCase{{
+ name: "completion for show version flag",
+ cmd: fmt.Sprintf("%s __complete show chart testing/alpine --version ''", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for show version flag, no filter",
+ cmd: fmt.Sprintf("%s __complete show chart testing/alpine --version 0.3", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for show version flag too few args",
+ cmd: fmt.Sprintf("%s __complete show chart --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }, {
+ name: "completion for show version flag too many args",
+ cmd: fmt.Sprintf("%s __complete show chart testing/alpine badarg --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }, {
+ name: "completion for show version flag invalid chart",
+ cmd: fmt.Sprintf("%s __complete show chart invalid/invalid --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }, {
+ name: "completion for show version flag with all",
+ cmd: fmt.Sprintf("%s __complete show all testing/alpine --version ''", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for show version flag with readme",
+ cmd: fmt.Sprintf("%s __complete show readme testing/alpine --version ''", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for show version flag with values",
+ cmd: fmt.Sprintf("%s __complete show values testing/alpine --version ''", repoSetup),
+ golden: "output/version-comp.txt",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestShowFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "show", false)
+}
+
+func TestShowAllFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "show all", true)
+}
+
+func TestShowChartFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "show chart", true)
+}
+
+func TestShowReadmeFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "show readme", true)
+}
+
+func TestShowValuesFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "show values", true)
+}
+
+func TestShowCRDsFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "show crds", true)
+}
diff --git a/helm/pkg/cmd/status.go b/helm/pkg/cmd/status.go
new file mode 100644
index 000000000..f68316c6c
--- /dev/null
+++ b/helm/pkg/cmd/status.go
@@ -0,0 +1,257 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "k8s.io/kubectl/pkg/cmd/get"
+
+ coloroutput "helm.sh/helm/v4/internal/cli/output"
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/release"
+ releasev1 "helm.sh/helm/v4/pkg/release/v1"
+)
+
+// NOTE: Keep the list of statuses up-to-date with pkg/release/status.go.
+var statusHelp = `
+This command shows the status of a named release.
+The status consists of:
+- last deployment time
+- k8s namespace in which the release lives
+- state of the release (can be: unknown, deployed, uninstalled, superseded, failed, uninstalling, pending-install, pending-upgrade or pending-rollback)
+- revision of the release
+- description of the release (can be completion message or error message)
+- list of resources that this release consists of
+- details on last test suite run, if applicable
+- additional notes provided by the chart
+`
+
+func newStatusCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewStatus(cfg)
+ var outfmt output.Format
+
+ cmd := &cobra.Command{
+ Use: "status RELEASE_NAME",
+ Short: "display the status of the named release",
+ Long: statusHelp,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return noMoreArgsComp()
+ }
+ return compListReleases(toComplete, args, cfg)
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ // When the output format is a table the resources should be fetched
+ // and displayed as a table. When YAML or JSON the resources will be
+ // returned. This mirrors the handling in kubectl.
+ if outfmt == output.Table {
+ client.ShowResourcesTable = true
+ }
+ reli, err := client.Run(args[0])
+ if err != nil {
+ return err
+ }
+ rel, err := releaserToV1Release(reli)
+ if err != nil {
+ return err
+ }
+
+ // strip chart metadata from the output
+ rel.Chart = nil
+
+ return outfmt.Write(out, &statusPrinter{
+ release: rel,
+ debug: false,
+ showMetadata: false,
+ hideNotes: false,
+ noColor: settings.ShouldDisableColor(),
+ })
+ },
+ }
+
+ f := cmd.Flags()
+
+ f.IntVar(&client.Version, "revision", 0, "if set, display the status of the named release with revision")
+
+ err := cmd.RegisterFlagCompletionFunc("revision", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 1 {
+ return compListRevisions(toComplete, cfg, args[0])
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ bindOutputFlag(cmd, &outfmt)
+
+ return cmd
+}
+
+type statusPrinter struct {
+ release release.Releaser
+ debug bool
+ showMetadata bool
+ hideNotes bool
+ noColor bool
+}
+
+func (s statusPrinter) getV1Release() *releasev1.Release {
+ switch rel := s.release.(type) {
+ case releasev1.Release:
+ return &rel
+ case *releasev1.Release:
+ return rel
+ }
+ return &releasev1.Release{}
+}
+
+func (s statusPrinter) WriteJSON(out io.Writer) error {
+ return output.EncodeJSON(out, s.getV1Release())
+}
+
+func (s statusPrinter) WriteYAML(out io.Writer) error {
+ return output.EncodeYAML(out, s.getV1Release())
+}
+
+func (s statusPrinter) WriteTable(out io.Writer) error {
+ if s.release == nil {
+ return nil
+ }
+ rel := s.getV1Release()
+ _, _ = fmt.Fprintf(out, "NAME: %s\n", rel.Name)
+ if !rel.Info.LastDeployed.IsZero() {
+ _, _ = fmt.Fprintf(out, "LAST DEPLOYED: %s\n", rel.Info.LastDeployed.Format(time.ANSIC))
+ }
+ _, _ = fmt.Fprintf(out, "NAMESPACE: %s\n", coloroutput.ColorizeNamespace(rel.Namespace, s.noColor))
+ _, _ = fmt.Fprintf(out, "STATUS: %s\n", coloroutput.ColorizeStatus(rel.Info.Status, s.noColor))
+ _, _ = fmt.Fprintf(out, "REVISION: %d\n", rel.Version)
+ if s.showMetadata {
+ _, _ = fmt.Fprintf(out, "CHART: %s\n", rel.Chart.Metadata.Name)
+ _, _ = fmt.Fprintf(out, "VERSION: %s\n", rel.Chart.Metadata.Version)
+ _, _ = fmt.Fprintf(out, "APP_VERSION: %s\n", rel.Chart.Metadata.AppVersion)
+ }
+ _, _ = fmt.Fprintf(out, "DESCRIPTION: %s\n", rel.Info.Description)
+
+ if len(rel.Info.Resources) > 0 {
+ buf := new(bytes.Buffer)
+ printFlags := get.NewHumanPrintFlags()
+ typePrinter, _ := printFlags.ToPrinter("")
+ printer := &get.TablePrinter{Delegate: typePrinter}
+
+ var keys []string
+ for key := range rel.Info.Resources {
+ keys = append(keys, key)
+ }
+
+ for _, t := range keys {
+ _, _ = fmt.Fprintf(buf, "==> %s\n", t)
+
+ vk := rel.Info.Resources[t]
+ for _, resource := range vk {
+ if err := printer.PrintObj(resource, buf); err != nil {
+ _, _ = fmt.Fprintf(buf, "failed to print object type %s: %v\n", t, err)
+ }
+ }
+
+ buf.WriteString("\n")
+ }
+
+ _, _ = fmt.Fprintf(out, "RESOURCES:\n%s\n", buf.String())
+ }
+
+ executions := executionsByHookEvent(rel)
+ if tests, ok := executions[releasev1.HookTest]; !ok || len(tests) == 0 {
+ _, _ = fmt.Fprintln(out, "TEST SUITE: None")
+ } else {
+ for _, h := range tests {
+ // Don't print anything if hook has not been initiated
+ if h.LastRun.StartedAt.IsZero() {
+ continue
+ }
+ _, _ = fmt.Fprintf(out, "TEST SUITE: %s\n%s\n%s\n%s\n",
+ h.Name,
+ fmt.Sprintf("Last Started: %s", h.LastRun.StartedAt.Format(time.ANSIC)),
+ fmt.Sprintf("Last Completed: %s", h.LastRun.CompletedAt.Format(time.ANSIC)),
+ fmt.Sprintf("Phase: %s", h.LastRun.Phase),
+ )
+ }
+ }
+
+ if s.debug {
+ _, _ = fmt.Fprintln(out, "USER-SUPPLIED VALUES:")
+ err := output.EncodeYAML(out, rel.Config)
+ if err != nil {
+ return err
+ }
+ // Print an extra newline
+ _, _ = fmt.Fprintln(out)
+
+ cfg, err := util.CoalesceValues(rel.Chart, rel.Config)
+ if err != nil {
+ return err
+ }
+
+ _, _ = fmt.Fprintln(out, "COMPUTED VALUES:")
+ err = output.EncodeYAML(out, cfg.AsMap())
+ if err != nil {
+ return err
+ }
+ // Print an extra newline
+ _, _ = fmt.Fprintln(out)
+ }
+
+ if strings.EqualFold(rel.Info.Description, "Dry run complete") || s.debug {
+ _, _ = fmt.Fprintln(out, "HOOKS:")
+ for _, h := range rel.Hooks {
+ _, _ = fmt.Fprintf(out, "---\n# Source: %s\n%s\n", h.Path, h.Manifest)
+ }
+ _, _ = fmt.Fprintf(out, "MANIFEST:\n%s\n", rel.Manifest)
+ }
+
+ // Hide notes from output - option in install and upgrades
+ if !s.hideNotes && len(rel.Info.Notes) > 0 {
+ _, _ = fmt.Fprintf(out, "NOTES:\n%s\n", strings.TrimSpace(rel.Info.Notes))
+ }
+ return nil
+}
+
+func executionsByHookEvent(rel *releasev1.Release) map[releasev1.HookEvent][]*releasev1.Hook {
+ result := make(map[releasev1.HookEvent][]*releasev1.Hook)
+ for _, h := range rel.Hooks {
+ for _, e := range h.Events {
+ executions, ok := result[e]
+ if !ok {
+ executions = []*releasev1.Hook{}
+ }
+ result[e] = append(executions, h)
+ }
+ }
+ return result
+}
diff --git a/helm/pkg/cmd/status_test.go b/helm/pkg/cmd/status_test.go
new file mode 100644
index 000000000..b96a0d19a
--- /dev/null
+++ b/helm/pkg/cmd/status_test.go
@@ -0,0 +1,220 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+ "time"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestStatusCmd(t *testing.T) {
+ releasesMockWithStatus := func(info *release.Info, hooks ...*release.Hook) []*release.Release {
+ info.LastDeployed = time.Unix(1452902400, 0).UTC()
+ return []*release.Release{{
+ Name: "flummoxed-chickadee",
+ Namespace: "default",
+ Info: info,
+ Chart: &chart.Chart{Metadata: &chart.Metadata{Name: "name", Version: "1.2.3", AppVersion: "3.2.1"}},
+ Hooks: hooks,
+ }}
+ }
+
+ tests := []cmdTestCase{{
+ name: "get status of a deployed release",
+ cmd: "status flummoxed-chickadee",
+ golden: "output/status.txt",
+ rels: releasesMockWithStatus(&release.Info{
+ Status: common.StatusDeployed,
+ }),
+ }, {
+ name: "get status of a deployed release, with desc",
+ cmd: "status flummoxed-chickadee",
+ golden: "output/status-with-desc.txt",
+ rels: releasesMockWithStatus(&release.Info{
+ Status: common.StatusDeployed,
+ Description: "Mock description",
+ }),
+ }, {
+ name: "get status of a deployed release with notes",
+ cmd: "status flummoxed-chickadee",
+ golden: "output/status-with-notes.txt",
+ rels: releasesMockWithStatus(&release.Info{
+ Status: common.StatusDeployed,
+ Notes: "release notes",
+ }),
+ }, {
+ name: "get status of a deployed release with notes in json",
+ cmd: "status flummoxed-chickadee -o json",
+ golden: "output/status.json",
+ rels: releasesMockWithStatus(&release.Info{
+ Status: common.StatusDeployed,
+ Notes: "release notes",
+ }),
+ }, {
+ name: "get status of a deployed release with resources",
+ cmd: "status flummoxed-chickadee",
+ golden: "output/status-with-resources.txt",
+ rels: releasesMockWithStatus(
+ &release.Info{
+ Status: common.StatusDeployed,
+ },
+ ),
+ }, {
+ name: "get status of a deployed release with resources in json",
+ cmd: "status flummoxed-chickadee -o json",
+ golden: "output/status-with-resources.json",
+ rels: releasesMockWithStatus(
+ &release.Info{
+ Status: common.StatusDeployed,
+ },
+ ),
+ }, {
+ name: "get status of a deployed release with test suite",
+ cmd: "status flummoxed-chickadee",
+ golden: "output/status-with-test-suite.txt",
+ rels: releasesMockWithStatus(
+ &release.Info{
+ Status: common.StatusDeployed,
+ },
+ &release.Hook{
+ Name: "never-run-test",
+ Events: []release.HookEvent{release.HookTest},
+ },
+ &release.Hook{
+ Name: "passing-test",
+ Events: []release.HookEvent{release.HookTest},
+ LastRun: release.HookExecution{
+ StartedAt: mustParseTime("2006-01-02T15:04:05Z"),
+ CompletedAt: mustParseTime("2006-01-02T15:04:07Z"),
+ Phase: release.HookPhaseSucceeded,
+ },
+ },
+ &release.Hook{
+ Name: "failing-test",
+ Events: []release.HookEvent{release.HookTest},
+ LastRun: release.HookExecution{
+ StartedAt: mustParseTime("2006-01-02T15:10:05Z"),
+ CompletedAt: mustParseTime("2006-01-02T15:10:07Z"),
+ Phase: release.HookPhaseFailed,
+ },
+ },
+ &release.Hook{
+ Name: "passing-pre-install",
+ Events: []release.HookEvent{release.HookPreInstall},
+ LastRun: release.HookExecution{
+ StartedAt: mustParseTime("2006-01-02T15:00:05Z"),
+ CompletedAt: mustParseTime("2006-01-02T15:00:07Z"),
+ Phase: release.HookPhaseSucceeded,
+ },
+ },
+ ),
+ }}
+ runTestCmd(t, tests)
+}
+
+func mustParseTime(t string) time.Time {
+ res, _ := time.Parse(time.RFC3339, t)
+ return res
+}
+
+func TestStatusCompletion(t *testing.T) {
+ rels := []*release.Release{
+ {
+ Name: "athos",
+ Namespace: "default",
+ Info: &release.Info{
+ Status: common.StatusDeployed,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "Athos-chart",
+ Version: "1.2.3",
+ },
+ },
+ }, {
+ Name: "porthos",
+ Namespace: "default",
+ Info: &release.Info{
+ Status: common.StatusFailed,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "Porthos-chart",
+ Version: "111.222.333",
+ },
+ },
+ }, {
+ Name: "aramis",
+ Namespace: "default",
+ Info: &release.Info{
+ Status: common.StatusUninstalled,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "Aramis-chart",
+ Version: "0.0.0",
+ },
+ },
+ }, {
+ Name: "dartagnan",
+ Namespace: "gascony",
+ Info: &release.Info{
+ Status: common.StatusUnknown,
+ },
+ Chart: &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "Dartagnan-chart",
+ Version: "1.2.3-prerelease",
+ },
+ },
+ }}
+
+ tests := []cmdTestCase{{
+ name: "completion for status",
+ cmd: "__complete status a",
+ golden: "output/status-comp.txt",
+ rels: rels,
+ }, {
+ name: "completion for status with too many arguments",
+ cmd: "__complete status dartagnan ''",
+ golden: "output/status-wrong-args-comp.txt",
+ rels: rels,
+ }, {
+ name: "completion for status with global flag",
+ cmd: "__complete status --debug a",
+ golden: "output/status-comp.txt",
+ rels: rels,
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestStatusRevisionCompletion(t *testing.T) {
+ revisionFlagCompletionTest(t, "status")
+}
+
+func TestStatusOutputCompletion(t *testing.T) {
+ outputFlagCompletionTest(t, "status")
+}
+
+func TestStatusFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "status", false)
+ checkFileCompletion(t, "status myrelease", false)
+}
diff --git a/helm/pkg/cmd/template.go b/helm/pkg/cmd/template.go
new file mode 100644
index 000000000..14f85042b
--- /dev/null
+++ b/helm/pkg/cmd/template.go
@@ -0,0 +1,277 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "regexp"
+ "slices"
+ "sort"
+ "strings"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/cli/values"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+)
+
+const templateDesc = `
+Render chart templates locally and display the output.
+
+Any values that would normally be looked up or retrieved in-cluster will be
+faked locally. Additionally, none of the server-side testing of chart validity
+(e.g. whether an API is supported) is done.
+
+To specify the Kubernetes API versions used for Capabilities.APIVersions, use
+the '--api-versions' flag. This flag can be specified multiple times or as a
+comma-separated list:
+
+ $ helm template --api-versions networking.k8s.io/v1 --api-versions cert-manager.io/v1 mychart ./mychart
+
+or
+
+ $ helm template --api-versions networking.k8s.io/v1,cert-manager.io/v1 mychart ./mychart
+`
+
+func newTemplateCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ var validate bool
+ var includeCrds bool
+ var skipTests bool
+ client := action.NewInstall(cfg)
+ valueOpts := &values.Options{}
+ var kubeVersion string
+ var extraAPIs []string
+ var showFiles []string
+
+ cmd := &cobra.Command{
+ Use: "template [NAME] [CHART]",
+ Short: "locally render templates",
+ Long: templateDesc,
+ Args: require.MinimumNArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return compInstall(args, toComplete, client)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if kubeVersion != "" {
+ parsedKubeVersion, err := common.ParseKubeVersion(kubeVersion)
+ if err != nil {
+ return fmt.Errorf("invalid kube version '%s': %s", kubeVersion, err)
+ }
+ client.KubeVersion = parsedKubeVersion
+ }
+
+ registryClient, err := newRegistryClient(client.CertFile, client.KeyFile, client.CaFile,
+ client.InsecureSkipTLSVerify, client.PlainHTTP, client.Username, client.Password)
+ if err != nil {
+ return fmt.Errorf("missing registry client: %w", err)
+ }
+ client.SetRegistryClient(registryClient)
+
+ dryRunStrategy, err := cmdGetDryRunFlagStrategy(cmd, true)
+ if err != nil {
+ return err
+ }
+ if validate {
+ // Mimic deprecated --validate flag behavior by enabling server dry run
+ dryRunStrategy = action.DryRunServer
+ }
+ client.DryRunStrategy = dryRunStrategy
+ client.ReleaseName = "release-name"
+ client.Replace = true // Skip the name check
+ client.APIVersions = common.VersionSet(extraAPIs)
+ client.IncludeCRDs = includeCrds
+ rel, err := runInstall(args, client, valueOpts, out)
+
+ if err != nil && !settings.Debug {
+ if rel != nil {
+ return fmt.Errorf("%w\n\nUse --debug flag to render out invalid YAML", err)
+ }
+ return err
+ }
+
+ // We ignore a potential error here because, when the --debug flag was specified,
+ // we always want to print the YAML, even if it is not valid. The error is still returned afterwards.
+ if rel != nil {
+ var manifests bytes.Buffer
+ fmt.Fprintln(&manifests, strings.TrimSpace(rel.Manifest))
+ if !client.DisableHooks {
+ fileWritten := make(map[string]bool)
+ for _, m := range rel.Hooks {
+ if skipTests && isTestHook(m) {
+ continue
+ }
+ if client.OutputDir == "" {
+ fmt.Fprintf(&manifests, "---\n# Source: %s\n%s\n", m.Path, m.Manifest)
+ } else {
+ newDir := client.OutputDir
+ if client.UseReleaseName {
+ newDir = filepath.Join(client.OutputDir, client.ReleaseName)
+ }
+ _, err := os.Stat(filepath.Join(newDir, m.Path))
+ if err == nil {
+ fileWritten[m.Path] = true
+ }
+
+ err = writeToFile(newDir, m.Path, m.Manifest, fileWritten[m.Path])
+ if err != nil {
+ return err
+ }
+ }
+
+ }
+ }
+
+ // if we have a list of files to render, then check that each of the
+ // provided files exists in the chart.
+ if len(showFiles) > 0 {
+ // This is necessary to ensure consistent manifest ordering when using --show-only
+ // with globs or directory names.
+ splitManifests := releaseutil.SplitManifests(manifests.String())
+ manifestsKeys := make([]string, 0, len(splitManifests))
+ for k := range splitManifests {
+ manifestsKeys = append(manifestsKeys, k)
+ }
+ sort.Sort(releaseutil.BySplitManifestsOrder(manifestsKeys))
+
+ manifestNameRegex := regexp.MustCompile("# Source: [^/]+/(.+)")
+ var manifestsToRender []string
+ for _, f := range showFiles {
+ missing := true
+ // Use linux-style filepath separators to unify user's input path
+ f = filepath.ToSlash(f)
+ for _, manifestKey := range manifestsKeys {
+ manifest := splitManifests[manifestKey]
+ submatch := manifestNameRegex.FindStringSubmatch(manifest)
+ if len(submatch) == 0 {
+ continue
+ }
+ manifestName := submatch[1]
+ // manifest.Name is rendered using linux-style filepath separators on Windows as
+ // well as macOS/linux.
+ manifestPathSplit := strings.Split(manifestName, "/")
+ // manifest.Path is connected using linux-style filepath separators on Windows as
+ // well as macOS/linux
+ manifestPath := strings.Join(manifestPathSplit, "/")
+
+ // if the filepath provided matches a manifest path in the
+ // chart, render that manifest
+ if matched, _ := filepath.Match(f, manifestPath); !matched {
+ continue
+ }
+ manifestsToRender = append(manifestsToRender, manifest)
+ missing = false
+ }
+ if missing {
+ return fmt.Errorf("could not find template %s in chart", f)
+ }
+ }
+ for _, m := range manifestsToRender {
+ fmt.Fprintf(out, "---\n%s\n", m)
+ }
+ } else {
+ fmt.Fprintf(out, "%s", manifests.String())
+ }
+ }
+
+ return err
+ },
+ }
+
+ f := cmd.Flags()
+ addInstallFlags(cmd, f, client, valueOpts)
+ f.StringArrayVarP(&showFiles, "show-only", "s", []string{}, "only show manifests rendered from the given templates")
+ f.StringVar(&client.OutputDir, "output-dir", "", "writes the executed templates to files in output-dir instead of stdout")
+ f.BoolVar(&validate, "validate", false, "deprecated")
+ f.MarkDeprecated("validate", "use '--dry-run=server' instead")
+ f.BoolVar(&includeCrds, "include-crds", false, "include CRDs in the templated output")
+ f.BoolVar(&skipTests, "skip-tests", false, "skip tests from templated output")
+ f.BoolVar(&client.IsUpgrade, "is-upgrade", false, "set .Release.IsUpgrade instead of .Release.IsInstall")
+ f.StringVar(&kubeVersion, "kube-version", "", "Kubernetes version used for Capabilities.KubeVersion")
+ f.StringSliceVarP(&extraAPIs, "api-versions", "a", []string{}, "Kubernetes api versions used for Capabilities.APIVersions (multiple can be specified)")
+ f.BoolVar(&client.UseReleaseName, "release-name", false, "use release name in the output-dir path.")
+ f.String(
+ "dry-run",
+ "client",
+ `simulates the operation either client-side or server-side. Must be either: "client", or "server". '--dry-run=client simulates the operation client-side only and avoids cluster connections. '--dry-run=server' simulates/validates the operation on the server, requiring cluster connectivity.`)
+ f.Lookup("dry-run").NoOptDefVal = "unset"
+ bindPostRenderFlag(cmd, &client.PostRenderer, settings)
+ cmd.MarkFlagsMutuallyExclusive("validate", "dry-run")
+
+ return cmd
+}
+
+func isTestHook(h *release.Hook) bool {
+ return slices.Contains(h.Events, release.HookTest)
+}
+
+// The following functions (writeToFile, createOrOpenFile, and ensureDirectoryForFile)
+// are copied from the actions package. This is part of a change to correct a
+// bug introduced by #8156. As part of the todo to refactor renderResources
+// this duplicate code should be removed. It is added here so that the API
+// surface area is as minimally impacted as possible in fixing the issue.
+func writeToFile(outputDir string, name string, data string, appendData bool) error {
+ outfileName := strings.Join([]string{outputDir, name}, string(filepath.Separator))
+
+ err := ensureDirectoryForFile(outfileName)
+ if err != nil {
+ return err
+ }
+
+ f, err := createOrOpenFile(outfileName, appendData)
+ if err != nil {
+ return err
+ }
+
+ defer f.Close()
+
+ _, err = fmt.Fprintf(f, "---\n# Source: %s\n%s\n", name, data)
+
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("wrote %s\n", outfileName)
+ return nil
+}
+
+func createOrOpenFile(filename string, appendData bool) (*os.File, error) {
+ if appendData {
+ return os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0600)
+ }
+ return os.Create(filename)
+}
+
+func ensureDirectoryForFile(file string) error {
+ baseDir := filepath.Dir(file)
+ _, err := os.Stat(baseDir)
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return err
+ }
+
+ return os.MkdirAll(baseDir, 0755)
+}
diff --git a/helm/pkg/cmd/template_test.go b/helm/pkg/cmd/template_test.go
new file mode 100644
index 000000000..5bcccf5d0
--- /dev/null
+++ b/helm/pkg/cmd/template_test.go
@@ -0,0 +1,208 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "path/filepath"
+ "testing"
+)
+
+var chartPath = "testdata/testcharts/subchart"
+
+func TestTemplateCmd(t *testing.T) {
+ deletevalchart := "testdata/testcharts/issue-9027"
+
+ tests := []cmdTestCase{
+ {
+ name: "check name",
+ cmd: fmt.Sprintf("template '%s'", chartPath),
+ golden: "output/template.txt",
+ },
+ {
+ name: "check set name",
+ cmd: fmt.Sprintf("template '%s' --set service.name=apache", chartPath),
+ golden: "output/template-set.txt",
+ },
+ {
+ name: "check values files",
+ cmd: fmt.Sprintf("template '%s' --values '%s'", chartPath, filepath.Join(chartPath, "/charts/subchartA/values.yaml")),
+ golden: "output/template-values-files.txt",
+ },
+ {
+ name: "check name template",
+ cmd: fmt.Sprintf(`template '%s' --name-template='foobar-{{ b64enc "abc" | lower }}-baz'`, chartPath),
+ golden: "output/template-name-template.txt",
+ },
+ {
+ name: "check no args",
+ cmd: "template",
+ wantError: true,
+ golden: "output/template-no-args.txt",
+ },
+ {
+ name: "check library chart",
+ cmd: fmt.Sprintf("template '%s'", "testdata/testcharts/lib-chart"),
+ wantError: true,
+ golden: "output/template-lib-chart.txt",
+ },
+ {
+ name: "check chart bad type",
+ cmd: fmt.Sprintf("template '%s'", "testdata/testcharts/chart-bad-type"),
+ wantError: true,
+ golden: "output/template-chart-bad-type.txt",
+ },
+ {
+ name: "check chart with dependency which is an app chart acting as a library chart",
+ cmd: fmt.Sprintf("template '%s'", "testdata/testcharts/chart-with-template-lib-dep"),
+ golden: "output/template-chart-with-template-lib-dep.txt",
+ },
+ {
+ name: "check chart with dependency which is an app chart archive acting as a library chart",
+ cmd: fmt.Sprintf("template '%s'", "testdata/testcharts/chart-with-template-lib-archive-dep"),
+ golden: "output/template-chart-with-template-lib-archive-dep.txt",
+ },
+ {
+ name: "check kube version",
+ cmd: fmt.Sprintf("template --kube-version 1.16.0 '%s'", chartPath),
+ golden: "output/template-with-kube-version.txt",
+ },
+ {
+ name: "check kube api versions",
+ cmd: fmt.Sprintf("template --api-versions helm.k8s.io/test,helm.k8s.io/test2 '%s'", chartPath),
+ golden: "output/template-with-api-version.txt",
+ },
+ {
+ name: "check kube api versions",
+ cmd: fmt.Sprintf("template --api-versions helm.k8s.io/test --api-versions helm.k8s.io/test2 '%s'", chartPath),
+ golden: "output/template-with-api-version.txt",
+ },
+ {
+ name: "template with CRDs",
+ cmd: fmt.Sprintf("template '%s' --include-crds", chartPath),
+ golden: "output/template-with-crds.txt",
+ },
+ {
+ name: "template with show-only one",
+ cmd: fmt.Sprintf("template '%s' --show-only templates/service.yaml", chartPath),
+ golden: "output/template-show-only-one.txt",
+ },
+ {
+ name: "template with show-only multiple",
+ cmd: fmt.Sprintf("template '%s' --show-only templates/service.yaml --show-only charts/subcharta/templates/service.yaml", chartPath),
+ golden: "output/template-show-only-multiple.txt",
+ },
+ {
+ name: "template with show-only glob",
+ cmd: fmt.Sprintf("template '%s' --show-only templates/subdir/role*", chartPath),
+ golden: "output/template-show-only-glob.txt",
+ // Repeat to ensure manifest ordering regressions are caught
+ repeat: 10,
+ },
+ {
+ name: "sorted output of manifests (order of filenames, then order of objects within each YAML file)",
+ cmd: fmt.Sprintf("template '%s'", "testdata/testcharts/object-order"),
+ golden: "output/object-order.txt",
+ // Helm previously used random file order. Repeat the test so we
+ // don't accidentally get the expected result.
+ repeat: 10,
+ },
+ {
+ name: "chart with template with invalid yaml",
+ cmd: fmt.Sprintf("template '%s'", "testdata/testcharts/chart-with-template-with-invalid-yaml"),
+ wantError: true,
+ golden: "output/template-with-invalid-yaml.txt",
+ },
+ {
+ name: "chart with template with invalid yaml (--debug)",
+ cmd: fmt.Sprintf("template '%s' --debug", "testdata/testcharts/chart-with-template-with-invalid-yaml"),
+ wantError: true,
+ golden: "output/template-with-invalid-yaml-debug.txt",
+ },
+ {
+ name: "template skip-tests",
+ cmd: fmt.Sprintf(`template '%s' --skip-tests`, chartPath),
+ golden: "output/template-skip-tests.txt",
+ },
+ {
+ // This test case is to ensure the case where specified dependencies
+ // in the Chart.yaml and those where the Chart.yaml don't have them
+ // specified are the same.
+ name: "ensure nil/null values pass to subcharts delete values",
+ cmd: fmt.Sprintf("template '%s'", deletevalchart),
+ golden: "output/issue-9027.txt",
+ },
+ {
+ // Ensure that parent chart values take precedence over imported values
+ name: "template with imported subchart values ensuring import",
+ cmd: fmt.Sprintf("template '%s' --set configmap.enabled=true --set subchartb.enabled=true", chartPath),
+ golden: "output/template-subchart-cm.txt",
+ },
+ {
+ // Ensure that user input values take precedence over imported
+ // values from sub-charts.
+ name: "template with imported subchart values set with --set",
+ cmd: fmt.Sprintf("template '%s' --set configmap.enabled=true --set subchartb.enabled=true --set configmap.value=baz", chartPath),
+ golden: "output/template-subchart-cm-set.txt",
+ },
+ {
+ // Ensure that user input values take precedence over imported
+ // values from sub-charts when passed by file
+ name: "template with imported subchart values set with --set",
+ cmd: fmt.Sprintf("template '%s' -f %s/extra_values.yaml", chartPath, chartPath),
+ golden: "output/template-subchart-cm-set-file.txt",
+ },
+ }
+ runTestCmd(t, tests)
+}
+
+func TestTemplateVersionCompletion(t *testing.T) {
+ repoFile := "testdata/helmhome/helm/repositories.yaml"
+ repoCache := "testdata/helmhome/helm/repository"
+
+ repoSetup := fmt.Sprintf("--repository-config %s --repository-cache %s", repoFile, repoCache)
+
+ tests := []cmdTestCase{{
+ name: "completion for template version flag with release name",
+ cmd: fmt.Sprintf("%s __complete template releasename testing/alpine --version ''", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for template version flag with generate-name",
+ cmd: fmt.Sprintf("%s __complete template --generate-name testing/alpine --version ''", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for template version flag too few args",
+ cmd: fmt.Sprintf("%s __complete template testing/alpine --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }, {
+ name: "completion for template version flag too many args",
+ cmd: fmt.Sprintf("%s __complete template releasename testing/alpine badarg --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }, {
+ name: "completion for template version flag invalid chart",
+ cmd: fmt.Sprintf("%s __complete template releasename invalid/invalid --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestTemplateFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "template", false)
+ checkFileCompletion(t, "template --generate-name", true)
+ checkFileCompletion(t, "template myname", true)
+ checkFileCompletion(t, "template myname mychart", false)
+}
diff --git a/helm/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/completion.yaml b/helm/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/completion.yaml
new file mode 100644
index 000000000..e0b161c69
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/completion.yaml
@@ -0,0 +1,19 @@
+name: wrongname
+commands:
+ - name: empty
+ - name: full
+ commands:
+ - name: more
+ validArgs:
+ - one
+ - two
+ flags:
+ - b
+ - ball
+ - name: less
+ flags:
+ - a
+ - all
+flags:
+- z
+- q
diff --git a/helm/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/fullenv.sh b/helm/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/fullenv.sh
new file mode 100755
index 000000000..2efad9b3c
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/fullenv.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+echo $HELM_PLUGIN_NAME
+echo $HELM_PLUGIN_DIR
+echo $HELM_PLUGINS
+echo $HELM_REPOSITORY_CONFIG
+echo $HELM_REPOSITORY_CACHE
+echo $HELM_BIN
diff --git a/helm/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml b/helm/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml
new file mode 100644
index 000000000..a58544b03
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helm home with space/helm/plugins/fullenv/plugin.yaml
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+name: fullenv
+type: cli/v1
+runtime: subprocess
+config:
+ shortHelp: "show env vars"
+ longHelp: "show all env vars"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "$HELM_PLUGIN_DIR/fullenv.sh"
diff --git a/helm/pkg/cmd/testdata/helm home with space/helm/repositories.yaml b/helm/pkg/cmd/testdata/helm home with space/helm/repositories.yaml
new file mode 100644
index 000000000..e9de487d6
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helm home with space/helm/repositories.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+generated: 2016-10-03T16:03:10.640376913-06:00
+repositories:
+ - cache: testing-index.yaml
+ name: testing
+ url: http://example.com/charts
diff --git a/helm/pkg/cmd/testdata/helm home with space/helm/repository/test-name-charts.txt b/helm/pkg/cmd/testdata/helm home with space/helm/repository/test-name-charts.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/cmd/testdata/helm home with space/helm/repository/test-name-index.yaml b/helm/pkg/cmd/testdata/helm home with space/helm/repository/test-name-index.yaml
new file mode 100644
index 000000000..d5ab620ad
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helm home with space/helm/repository/test-name-index.yaml
@@ -0,0 +1,3 @@
+apiVersion: v1
+entries: {}
+generated: "2020-09-09T19:50:50.198347916-04:00"
diff --git a/helm/pkg/cmd/testdata/helm home with space/helm/repository/testing-index.yaml b/helm/pkg/cmd/testdata/helm home with space/helm/repository/testing-index.yaml
new file mode 100644
index 000000000..91e4d463f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helm home with space/helm/repository/testing-index.yaml
@@ -0,0 +1,66 @@
+apiVersion: v1
+entries:
+ alpine:
+ - name: alpine
+ url: https://charts.helm.sh/stable/alpine-0.1.0.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ created: "2018-06-27T10:00:18.230700509Z"
+ deprecated: true
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 0.1.0
+ appVersion: 1.2.3
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ - name: alpine
+ url: https://charts.helm.sh/stable/alpine-0.2.0.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ created: "2018-07-09T11:34:37.797864902Z"
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 0.2.0
+ appVersion: 2.3.4
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ - name: alpine
+ url: https://charts.helm.sh/stable/alpine-0.3.0-rc.1.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ created: "2020-11-12T08:44:58.872726222Z"
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 0.3.0-rc.1
+ appVersion: 3.0.0
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ mariadb:
+ - name: mariadb
+ url: https://charts.helm.sh/stable/mariadb-0.3.0.tgz
+ checksum: 65229f6de44a2be9f215d11dbff311673fc8ba56
+ created: "2018-04-23T08:20:27.160959131Z"
+ home: https://mariadb.org
+ sources:
+ - https://github.com/bitnami/bitnami-docker-mariadb
+ version: 0.3.0
+ description: Chart for MariaDB
+ keywords:
+ - mariadb
+ - mysql
+ - database
+ - sql
+ maintainers:
+ - name: Bitnami
+ email: containers@bitnami.com
+ icon: ""
+ apiVersion: v2
diff --git a/helm/pkg/cmd/testdata/helm-test-key.pub b/helm/pkg/cmd/testdata/helm-test-key.pub
new file mode 100644
index 000000000..38714f25a
Binary files /dev/null and b/helm/pkg/cmd/testdata/helm-test-key.pub differ
diff --git a/helm/pkg/cmd/testdata/helm-test-key.secret b/helm/pkg/cmd/testdata/helm-test-key.secret
new file mode 100644
index 000000000..a966aef93
Binary files /dev/null and b/helm/pkg/cmd/testdata/helm-test-key.secret differ
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/args/args.sh b/helm/pkg/cmd/testdata/helmhome/helm/plugins/args/args.sh
new file mode 100755
index 000000000..6c62be8b9
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/args/args.sh
@@ -0,0 +1,2 @@
+#!/usr/bin/env sh
+echo "$@"
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.complete b/helm/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.complete
new file mode 100755
index 000000000..2b00c2281
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.complete
@@ -0,0 +1,13 @@
+#!/usr/bin/env sh
+
+echo "plugin.complete was called"
+echo "Namespace: ${HELM_NAMESPACE:-NO_NS}"
+echo "Num args received: ${#}"
+echo "Args received: ${@}"
+
+# Final printout is the optional completion directive of the form :
+if [ "$HELM_NAMESPACE" = "default" ]; then
+ echo ":4"
+else
+ echo ":2"
+fi
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml b/helm/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml
new file mode 100644
index 000000000..4156e7f17
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/args/plugin.yaml
@@ -0,0 +1,11 @@
+name: args
+type: cli/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ shortHelp: "echo args"
+ longHelp: "This echos args"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "$HELM_PLUGIN_DIR/args.sh"
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/echo/completion.yaml b/helm/pkg/cmd/testdata/helmhome/helm/plugins/echo/completion.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.complete b/helm/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.complete
new file mode 100755
index 000000000..63569aada
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.complete
@@ -0,0 +1,13 @@
+#!/usr/bin/env sh
+
+echo "echo plugin.complete was called"
+echo "Namespace: ${HELM_NAMESPACE:-NO_NS}"
+echo "Num args received: ${#}"
+echo "Args received: ${@}"
+
+# Final printout is the optional completion directive of the form :
+if [ "$HELM_NAMESPACE" = "default" ]; then
+ echo ":0"
+# else
+ # Don't include the directive, to test it is really optional
+fi
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml b/helm/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml
new file mode 100644
index 000000000..a0a0b5255
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/echo/plugin.yaml
@@ -0,0 +1,11 @@
+name: echo
+type: cli/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ shortHelp: "echo stuff"
+ longHelp: "This echos stuff"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "echo hello"
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/completion.yaml b/helm/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/completion.yaml
new file mode 100644
index 000000000..e5bf440f6
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/completion.yaml
@@ -0,0 +1,5 @@
+commands:
+ - name: code
+ flags:
+ - a
+ - b
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/exitwith.sh b/helm/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/exitwith.sh
new file mode 100755
index 000000000..9cf68da68
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/exitwith.sh
@@ -0,0 +1,2 @@
+#!/usr/bin/env sh
+exit "$1"
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml b/helm/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml
new file mode 100644
index 000000000..ba9508255
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/exitwith/plugin.yaml
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+name: exitwith
+type: cli/v1
+runtime: subprocess
+config:
+ shortHelp: "exitwith code"
+ longHelp: "This exits with the specified exit code"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "$HELM_PLUGIN_DIR/exitwith.sh"
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/completion.yaml b/helm/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/completion.yaml
new file mode 100644
index 000000000..e0b161c69
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/completion.yaml
@@ -0,0 +1,19 @@
+name: wrongname
+commands:
+ - name: empty
+ - name: full
+ commands:
+ - name: more
+ validArgs:
+ - one
+ - two
+ flags:
+ - b
+ - ball
+ - name: less
+ flags:
+ - a
+ - all
+flags:
+- z
+- q
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/fullenv.sh b/helm/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/fullenv.sh
new file mode 100755
index 000000000..cc0c64a6a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/fullenv.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+echo HELM_PLUGIN_NAME=${HELM_PLUGIN_NAME}
+echo HELM_PLUGIN_DIR=${HELM_PLUGIN_DIR}
+echo HELM_PLUGINS=${HELM_PLUGINS}
+echo HELM_REPOSITORY_CONFIG=${HELM_REPOSITORY_CONFIG}
+echo HELM_REPOSITORY_CACHE=${HELM_REPOSITORY_CACHE}
+echo HELM_BIN=${HELM_BIN}
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml b/helm/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml
new file mode 100644
index 000000000..a58544b03
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/fullenv/plugin.yaml
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+name: fullenv
+type: cli/v1
+runtime: subprocess
+config:
+ shortHelp: "show env vars"
+ longHelp: "show all env vars"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "$HELM_PLUGIN_DIR/fullenv.sh"
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml b/helm/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml
new file mode 100644
index 000000000..b6e8afa57
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/plugin.yaml
@@ -0,0 +1,9 @@
+---
+apiVersion: v1
+name: "postrenderer-v1"
+version: "1.2.3"
+type: postrenderer/v1
+runtime: subprocess
+runtimeConfig:
+ platformCommand:
+ - command: "${HELM_PLUGIN_DIR}/sed-test.sh"
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/sed-test.sh b/helm/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/sed-test.sh
new file mode 100755
index 000000000..a016e398f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/postrenderer-v1/sed-test.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+if [ $# -eq 0 ]; then
+ sed s/FOOTEST/BARTEST/g <&0
+else
+ sed s/FOOTEST/"$*"/g <&0
+fi
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml b/helm/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml
new file mode 100644
index 000000000..027573ed4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml
@@ -0,0 +1,13 @@
+name: shortenv
+commands:
+ - name: list
+ flags:
+ - a
+ - all
+ - log
+ - name: remove
+ validArgs:
+ - all
+ - one
+flags:
+- global
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin-name.sh b/helm/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin-name.sh
new file mode 100755
index 000000000..9e823ac13
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin-name.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env sh
+
+echo HELM_PLUGIN_NAME=${HELM_PLUGIN_NAME}
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml b/helm/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml
new file mode 100644
index 000000000..5fe053ed0
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+name: shortenv
+type: cli/v1
+runtime: subprocess
+config:
+ shortHelp: "env stuff"
+ longHelp: "show the env"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: ${HELM_PLUGIN_DIR}/plugin-name.sh
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/repositories.yaml b/helm/pkg/cmd/testdata/helmhome/helm/repositories.yaml
new file mode 100644
index 000000000..3835aaa5a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/repositories.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+generated: 2016-10-03T16:03:10.640376913-06:00
+repositories:
+- cache: testing-index.yaml
+ name: testing
+ url: http://example.com/charts
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/repository/test-name-charts.txt b/helm/pkg/cmd/testdata/helmhome/helm/repository/test-name-charts.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/repository/test-name-index.yaml b/helm/pkg/cmd/testdata/helmhome/helm/repository/test-name-index.yaml
new file mode 100644
index 000000000..d5ab620ad
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/repository/test-name-index.yaml
@@ -0,0 +1,3 @@
+apiVersion: v1
+entries: {}
+generated: "2020-09-09T19:50:50.198347916-04:00"
diff --git a/helm/pkg/cmd/testdata/helmhome/helm/repository/testing-index.yaml b/helm/pkg/cmd/testdata/helmhome/helm/repository/testing-index.yaml
new file mode 100644
index 000000000..91e4d463f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/helmhome/helm/repository/testing-index.yaml
@@ -0,0 +1,66 @@
+apiVersion: v1
+entries:
+ alpine:
+ - name: alpine
+ url: https://charts.helm.sh/stable/alpine-0.1.0.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ created: "2018-06-27T10:00:18.230700509Z"
+ deprecated: true
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 0.1.0
+ appVersion: 1.2.3
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ - name: alpine
+ url: https://charts.helm.sh/stable/alpine-0.2.0.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ created: "2018-07-09T11:34:37.797864902Z"
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 0.2.0
+ appVersion: 2.3.4
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ - name: alpine
+ url: https://charts.helm.sh/stable/alpine-0.3.0-rc.1.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ created: "2020-11-12T08:44:58.872726222Z"
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 0.3.0-rc.1
+ appVersion: 3.0.0
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ mariadb:
+ - name: mariadb
+ url: https://charts.helm.sh/stable/mariadb-0.3.0.tgz
+ checksum: 65229f6de44a2be9f215d11dbff311673fc8ba56
+ created: "2018-04-23T08:20:27.160959131Z"
+ home: https://mariadb.org
+ sources:
+ - https://github.com/bitnami/bitnami-docker-mariadb
+ version: 0.3.0
+ description: Chart for MariaDB
+ keywords:
+ - mariadb
+ - mysql
+ - database
+ - sql
+ maintainers:
+ - name: Bitnami
+ email: containers@bitnami.com
+ icon: ""
+ apiVersion: v2
diff --git a/helm/pkg/cmd/testdata/output/chart-with-subchart-update.txt b/helm/pkg/cmd/testdata/output/chart-with-subchart-update.txt
new file mode 100644
index 000000000..5b2083e1d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/chart-with-subchart-update.txt
@@ -0,0 +1,9 @@
+NAME: updeps
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
+NOTES:
+PARENT NOTES
diff --git a/helm/pkg/cmd/testdata/output/dependency-list-archive.txt b/helm/pkg/cmd/testdata/output/dependency-list-archive.txt
new file mode 100644
index 000000000..ffd4542b0
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/dependency-list-archive.txt
@@ -0,0 +1,5 @@
+NAME VERSION REPOSITORY STATUS
+reqsubchart 0.1.0 https://example.com/charts unpacked
+reqsubchart2 0.2.0 https://example.com/charts unpacked
+reqsubchart3 >=0.1.0 https://example.com/charts unpacked
+
diff --git a/helm/pkg/cmd/testdata/output/dependency-list-no-chart-linux.txt b/helm/pkg/cmd/testdata/output/dependency-list-no-chart-linux.txt
new file mode 100644
index 000000000..8fab8f8eb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/dependency-list-no-chart-linux.txt
@@ -0,0 +1 @@
+Error: stat /no/such/chart: no such file or directory
diff --git a/helm/pkg/cmd/testdata/output/dependency-list-no-requirements-linux.txt b/helm/pkg/cmd/testdata/output/dependency-list-no-requirements-linux.txt
new file mode 100644
index 000000000..35fe1d2e3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/dependency-list-no-requirements-linux.txt
@@ -0,0 +1 @@
+WARNING: no dependencies at testdata/testcharts/alpine/charts
diff --git a/helm/pkg/cmd/testdata/output/dependency-list.txt b/helm/pkg/cmd/testdata/output/dependency-list.txt
new file mode 100644
index 000000000..b57c21a21
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/dependency-list.txt
@@ -0,0 +1,5 @@
+NAME VERSION REPOSITORY STATUS
+reqsubchart 0.1.0 https://example.com/charts unpacked
+reqsubchart2 0.2.0 https://example.com/charts unpacked
+reqsubchart3 >=0.1.0 https://example.com/charts ok
+
diff --git a/helm/pkg/cmd/testdata/output/deprecated-chart.txt b/helm/pkg/cmd/testdata/output/deprecated-chart.txt
new file mode 100644
index 000000000..fcf5cc0ef
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/deprecated-chart.txt
@@ -0,0 +1,7 @@
+NAME: aeneas
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/docs-type-comp.txt b/helm/pkg/cmd/testdata/output/docs-type-comp.txt
new file mode 100644
index 000000000..69494f87d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/docs-type-comp.txt
@@ -0,0 +1,5 @@
+bash
+man
+markdown
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/empty_default_comp.txt b/helm/pkg/cmd/testdata/output/empty_default_comp.txt
new file mode 100644
index 000000000..879d50d0e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/empty_default_comp.txt
@@ -0,0 +1,2 @@
+:0
+Completion ended with directive: ShellCompDirectiveDefault
diff --git a/helm/pkg/cmd/testdata/output/empty_nofile_comp.txt b/helm/pkg/cmd/testdata/output/empty_nofile_comp.txt
new file mode 100644
index 000000000..3c537283e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/empty_nofile_comp.txt
@@ -0,0 +1,3 @@
+_activeHelp_ This command does not take any more arguments (but may accept flags).
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/env-comp.txt b/helm/pkg/cmd/testdata/output/env-comp.txt
new file mode 100644
index 000000000..9d38ee464
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/env-comp.txt
@@ -0,0 +1,24 @@
+HELM_BIN
+HELM_BURST_LIMIT
+HELM_CACHE_HOME
+HELM_CONFIG_HOME
+HELM_CONTENT_CACHE
+HELM_DATA_HOME
+HELM_DEBUG
+HELM_KUBEAPISERVER
+HELM_KUBEASGROUPS
+HELM_KUBEASUSER
+HELM_KUBECAFILE
+HELM_KUBECONTEXT
+HELM_KUBEINSECURE_SKIP_TLS_VERIFY
+HELM_KUBETLS_SERVER_NAME
+HELM_KUBETOKEN
+HELM_MAX_HISTORY
+HELM_NAMESPACE
+HELM_PLUGINS
+HELM_QPS
+HELM_REGISTRY_CONFIG
+HELM_REPOSITORY_CACHE
+HELM_REPOSITORY_CONFIG
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/get-all-no-args.txt b/helm/pkg/cmd/testdata/output/get-all-no-args.txt
new file mode 100644
index 000000000..cc3fc2ad1
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-all-no-args.txt
@@ -0,0 +1,3 @@
+Error: "helm get all" requires 1 argument
+
+Usage: helm get all RELEASE_NAME [flags]
diff --git a/helm/pkg/cmd/testdata/output/get-hooks-no-args.txt b/helm/pkg/cmd/testdata/output/get-hooks-no-args.txt
new file mode 100644
index 000000000..2911fdb88
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-hooks-no-args.txt
@@ -0,0 +1,3 @@
+Error: "helm get hooks" requires 1 argument
+
+Usage: helm get hooks RELEASE_NAME [flags]
diff --git a/helm/pkg/cmd/testdata/output/get-hooks.txt b/helm/pkg/cmd/testdata/output/get-hooks.txt
new file mode 100644
index 000000000..81e87b1f1
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-hooks.txt
@@ -0,0 +1,8 @@
+---
+# Source: pre-install-hook.yaml
+apiVersion: v1
+kind: Job
+metadata:
+ annotations:
+ "helm.sh/hook": pre-install
+
diff --git a/helm/pkg/cmd/testdata/output/get-manifest-no-args.txt b/helm/pkg/cmd/testdata/output/get-manifest-no-args.txt
new file mode 100644
index 000000000..df7aa5b04
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-manifest-no-args.txt
@@ -0,0 +1,3 @@
+Error: "helm get manifest" requires 1 argument
+
+Usage: helm get manifest RELEASE_NAME [flags]
diff --git a/helm/pkg/cmd/testdata/output/get-manifest.txt b/helm/pkg/cmd/testdata/output/get-manifest.txt
new file mode 100644
index 000000000..88937e089
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-manifest.txt
@@ -0,0 +1,5 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: fixture
+
diff --git a/helm/pkg/cmd/testdata/output/get-metadata-args.txt b/helm/pkg/cmd/testdata/output/get-metadata-args.txt
new file mode 100644
index 000000000..acd3f4c15
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-metadata-args.txt
@@ -0,0 +1,3 @@
+Error: "helm get metadata" requires 1 argument
+
+Usage: helm get metadata RELEASE_NAME [flags]
diff --git a/helm/pkg/cmd/testdata/output/get-metadata.json b/helm/pkg/cmd/testdata/output/get-metadata.json
new file mode 100644
index 000000000..9166f87ac
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-metadata.json
@@ -0,0 +1 @@
+{"name":"thomas-guide","chart":"foo","version":"0.1.0-beta.1","appVersion":"1.0","annotations":{"category":"web-apps","supported":"true"},"labels":{"key1":"value1"},"dependencies":[{"name":"cool-plugin","version":"1.0.0","repository":"https://coolplugin.io/charts","condition":"coolPlugin.enabled","enabled":true},{"name":"crds","version":"2.7.1","repository":"","condition":"crds.enabled"}],"namespace":"default","revision":1,"status":"deployed","deployedAt":"1977-09-02T22:04:05Z"}
diff --git a/helm/pkg/cmd/testdata/output/get-metadata.txt b/helm/pkg/cmd/testdata/output/get-metadata.txt
new file mode 100644
index 000000000..b3cb73ee2
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-metadata.txt
@@ -0,0 +1,12 @@
+NAME: thomas-guide
+CHART: foo
+VERSION: 0.1.0-beta.1
+APP_VERSION: 1.0
+ANNOTATIONS: category=web-apps,supported=true
+LABELS: key1=value1
+DEPENDENCIES: cool-plugin,crds
+NAMESPACE: default
+REVISION: 1
+STATUS: deployed
+DEPLOYED_AT: 1977-09-02T22:04:05Z
+APPLY_METHOD: client-side apply (defaulted)
diff --git a/helm/pkg/cmd/testdata/output/get-metadata.yaml b/helm/pkg/cmd/testdata/output/get-metadata.yaml
new file mode 100644
index 000000000..98f567837
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-metadata.yaml
@@ -0,0 +1,23 @@
+annotations:
+ category: web-apps
+ supported: "true"
+appVersion: "1.0"
+chart: foo
+dependencies:
+- condition: coolPlugin.enabled
+ enabled: true
+ name: cool-plugin
+ repository: https://coolplugin.io/charts
+ version: 1.0.0
+- condition: crds.enabled
+ name: crds
+ repository: ""
+ version: 2.7.1
+deployedAt: "1977-09-02T22:04:05Z"
+labels:
+ key1: value1
+name: thomas-guide
+namespace: default
+revision: 1
+status: deployed
+version: 0.1.0-beta.1
diff --git a/helm/pkg/cmd/testdata/output/get-notes-no-args.txt b/helm/pkg/cmd/testdata/output/get-notes-no-args.txt
new file mode 100644
index 000000000..1a0c20caa
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-notes-no-args.txt
@@ -0,0 +1,3 @@
+Error: "helm get notes" requires 1 argument
+
+Usage: helm get notes RELEASE_NAME [flags]
diff --git a/helm/pkg/cmd/testdata/output/get-notes.txt b/helm/pkg/cmd/testdata/output/get-notes.txt
new file mode 100644
index 000000000..e710c7801
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-notes.txt
@@ -0,0 +1,2 @@
+NOTES:
+Some mock release notes!
diff --git a/helm/pkg/cmd/testdata/output/get-release-template.txt b/helm/pkg/cmd/testdata/output/get-release-template.txt
new file mode 100644
index 000000000..02d44fb01
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-release-template.txt
@@ -0,0 +1 @@
+0.1.0-beta.1
\ No newline at end of file
diff --git a/helm/pkg/cmd/testdata/output/get-release.txt b/helm/pkg/cmd/testdata/output/get-release.txt
new file mode 100644
index 000000000..dbca662c5
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-release.txt
@@ -0,0 +1,33 @@
+NAME: thomas-guide
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+CHART: foo
+VERSION: 0.1.0-beta.1
+APP_VERSION: 1.0
+DESCRIPTION: Release mock
+TEST SUITE: None
+USER-SUPPLIED VALUES:
+name: value
+
+COMPUTED VALUES:
+name: value
+
+HOOKS:
+---
+# Source: pre-install-hook.yaml
+apiVersion: v1
+kind: Job
+metadata:
+ annotations:
+ "helm.sh/hook": pre-install
+
+MANIFEST:
+apiVersion: v1
+kind: Secret
+metadata:
+ name: fixture
+
+NOTES:
+Some mock release notes!
diff --git a/helm/pkg/cmd/testdata/output/get-values-all.txt b/helm/pkg/cmd/testdata/output/get-values-all.txt
new file mode 100644
index 000000000..b7e9696bc
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-values-all.txt
@@ -0,0 +1,2 @@
+COMPUTED VALUES:
+name: value
diff --git a/helm/pkg/cmd/testdata/output/get-values-args.txt b/helm/pkg/cmd/testdata/output/get-values-args.txt
new file mode 100644
index 000000000..c8a65e7f3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-values-args.txt
@@ -0,0 +1,3 @@
+Error: "helm get values" requires 1 argument
+
+Usage: helm get values RELEASE_NAME [flags]
diff --git a/helm/pkg/cmd/testdata/output/get-values.txt b/helm/pkg/cmd/testdata/output/get-values.txt
new file mode 100644
index 000000000..b7d146b15
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/get-values.txt
@@ -0,0 +1,2 @@
+USER-SUPPLIED VALUES:
+name: value
diff --git a/helm/pkg/cmd/testdata/output/history-limit.txt b/helm/pkg/cmd/testdata/output/history-limit.txt
new file mode 100644
index 000000000..aee0fadb2
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/history-limit.txt
@@ -0,0 +1,3 @@
+REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION
+3 Fri Sep 2 22:04:05 1977 superseded foo-0.1.0-beta.1 1.0 Release mock
+4 Fri Sep 2 22:04:05 1977 deployed foo-0.1.0-beta.1 1.0 Release mock
diff --git a/helm/pkg/cmd/testdata/output/history.json b/helm/pkg/cmd/testdata/output/history.json
new file mode 100644
index 000000000..35311d3ce
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/history.json
@@ -0,0 +1 @@
+[{"revision":3,"updated":"1977-09-02T22:04:05Z","status":"superseded","chart":"foo-0.1.0-beta.1","app_version":"1.0","description":"Release mock"},{"revision":4,"updated":"1977-09-02T22:04:05Z","status":"deployed","chart":"foo-0.1.0-beta.1","app_version":"1.0","description":"Release mock"}]
diff --git a/helm/pkg/cmd/testdata/output/history.txt b/helm/pkg/cmd/testdata/output/history.txt
new file mode 100644
index 000000000..2a5d69c11
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/history.txt
@@ -0,0 +1,5 @@
+REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION
+1 Fri Sep 2 22:04:05 1977 superseded foo-0.1.0-beta.1 1.0 Release mock
+2 Fri Sep 2 22:04:05 1977 superseded foo-0.1.0-beta.1 1.0 Release mock
+3 Fri Sep 2 22:04:05 1977 superseded foo-0.1.0-beta.1 1.0 Release mock
+4 Fri Sep 2 22:04:05 1977 deployed foo-0.1.0-beta.1 1.0 Release mock
diff --git a/helm/pkg/cmd/testdata/output/history.yaml b/helm/pkg/cmd/testdata/output/history.yaml
new file mode 100644
index 000000000..b7ae03be7
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/history.yaml
@@ -0,0 +1,12 @@
+- app_version: "1.0"
+ chart: foo-0.1.0-beta.1
+ description: Release mock
+ revision: 3
+ status: superseded
+ updated: "1977-09-02T22:04:05Z"
+- app_version: "1.0"
+ chart: foo-0.1.0-beta.1
+ description: Release mock
+ revision: 4
+ status: deployed
+ updated: "1977-09-02T22:04:05Z"
diff --git a/helm/pkg/cmd/testdata/output/install-and-replace.txt b/helm/pkg/cmd/testdata/output/install-and-replace.txt
new file mode 100644
index 000000000..fcf5cc0ef
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-and-replace.txt
@@ -0,0 +1,7 @@
+NAME: aeneas
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/install-and-take-ownership.txt b/helm/pkg/cmd/testdata/output/install-and-take-ownership.txt
new file mode 100644
index 000000000..413329ae1
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-and-take-ownership.txt
@@ -0,0 +1,7 @@
+NAME: aeneas-take-ownership
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/install-chart-bad-type.txt b/helm/pkg/cmd/testdata/output/install-chart-bad-type.txt
new file mode 100644
index 000000000..c482a793d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-chart-bad-type.txt
@@ -0,0 +1 @@
+Error: INSTALLATION FAILED: validation: chart.metadata.type must be application or library
diff --git a/helm/pkg/cmd/testdata/output/install-dry-run-with-secret-hidden.txt b/helm/pkg/cmd/testdata/output/install-dry-run-with-secret-hidden.txt
new file mode 100644
index 000000000..eb770967f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-dry-run-with-secret-hidden.txt
@@ -0,0 +1,21 @@
+NAME: secrets
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: pending-install
+REVISION: 1
+DESCRIPTION: Dry run complete
+TEST SUITE: None
+HOOKS:
+MANIFEST:
+---
+# Source: chart-with-secret/templates/secret.yaml
+# HIDDEN: The Secret output has been suppressed
+---
+# Source: chart-with-secret/templates/configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-configmap
+data:
+ foo: bar
+
diff --git a/helm/pkg/cmd/testdata/output/install-dry-run-with-secret.txt b/helm/pkg/cmd/testdata/output/install-dry-run-with-secret.txt
new file mode 100644
index 000000000..d22c1437f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-dry-run-with-secret.txt
@@ -0,0 +1,26 @@
+NAME: secrets
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: pending-install
+REVISION: 1
+DESCRIPTION: Dry run complete
+TEST SUITE: None
+HOOKS:
+MANIFEST:
+---
+# Source: chart-with-secret/templates/secret.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+stringData:
+ foo: bar
+---
+# Source: chart-with-secret/templates/configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-configmap
+data:
+ foo: bar
+
diff --git a/helm/pkg/cmd/testdata/output/install-hide-secret.txt b/helm/pkg/cmd/testdata/output/install-hide-secret.txt
new file mode 100644
index 000000000..165f14f73
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-hide-secret.txt
@@ -0,0 +1 @@
+Error: INSTALLATION FAILED: hiding Kubernetes secrets requires a dry-run mode
diff --git a/helm/pkg/cmd/testdata/output/install-lib-chart.txt b/helm/pkg/cmd/testdata/output/install-lib-chart.txt
new file mode 100644
index 000000000..c482a793d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-lib-chart.txt
@@ -0,0 +1 @@
+Error: INSTALLATION FAILED: validation: chart.metadata.type must be application or library
diff --git a/helm/pkg/cmd/testdata/output/install-name-template.txt b/helm/pkg/cmd/testdata/output/install-name-template.txt
new file mode 100644
index 000000000..bcc5f87ba
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-name-template.txt
@@ -0,0 +1,7 @@
+NAME: foobar
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/install-no-args.txt b/helm/pkg/cmd/testdata/output/install-no-args.txt
new file mode 100644
index 000000000..47f010ab8
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-no-args.txt
@@ -0,0 +1,3 @@
+Error: "helm install" requires at least 1 argument
+
+Usage: helm install [NAME] [CHART] [flags]
diff --git a/helm/pkg/cmd/testdata/output/install-no-hooks.txt b/helm/pkg/cmd/testdata/output/install-no-hooks.txt
new file mode 100644
index 000000000..fcf5cc0ef
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-no-hooks.txt
@@ -0,0 +1,7 @@
+NAME: aeneas
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/install-with-multiple-values-files.txt b/helm/pkg/cmd/testdata/output/install-with-multiple-values-files.txt
new file mode 100644
index 000000000..1116cb907
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-with-multiple-values-files.txt
@@ -0,0 +1,7 @@
+NAME: virgil
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/install-with-multiple-values.txt b/helm/pkg/cmd/testdata/output/install-with-multiple-values.txt
new file mode 100644
index 000000000..1116cb907
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-with-multiple-values.txt
@@ -0,0 +1,7 @@
+NAME: virgil
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/install-with-timeout.txt b/helm/pkg/cmd/testdata/output/install-with-timeout.txt
new file mode 100644
index 000000000..bcc5f87ba
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-with-timeout.txt
@@ -0,0 +1,7 @@
+NAME: foobar
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/install-with-values-file.txt b/helm/pkg/cmd/testdata/output/install-with-values-file.txt
new file mode 100644
index 000000000..1116cb907
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-with-values-file.txt
@@ -0,0 +1,7 @@
+NAME: virgil
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/install-with-values.txt b/helm/pkg/cmd/testdata/output/install-with-values.txt
new file mode 100644
index 000000000..1116cb907
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-with-values.txt
@@ -0,0 +1,7 @@
+NAME: virgil
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/install-with-wait-for-jobs.txt b/helm/pkg/cmd/testdata/output/install-with-wait-for-jobs.txt
new file mode 100644
index 000000000..c5676c610
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-with-wait-for-jobs.txt
@@ -0,0 +1,7 @@
+NAME: apollo
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/install-with-wait.txt b/helm/pkg/cmd/testdata/output/install-with-wait.txt
new file mode 100644
index 000000000..c5676c610
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install-with-wait.txt
@@ -0,0 +1,7 @@
+NAME: apollo
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/install.txt b/helm/pkg/cmd/testdata/output/install.txt
new file mode 100644
index 000000000..fcf5cc0ef
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/install.txt
@@ -0,0 +1,7 @@
+NAME: aeneas
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/issue-9027.txt b/helm/pkg/cmd/testdata/output/issue-9027.txt
new file mode 100644
index 000000000..eb19fc383
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/issue-9027.txt
@@ -0,0 +1,32 @@
+---
+# Source: issue-9027/charts/subchart/templates/values.yaml
+global:
+ hash:
+ key3: 13
+ key4: 4
+ key5: 5
+ key6: 6
+hash:
+ key3: 13
+ key4: 4
+ key5: 5
+ key6: 6
+---
+# Source: issue-9027/templates/values.yaml
+global:
+ hash:
+ key1: null
+ key2: null
+ key3: 13
+subchart:
+ global:
+ hash:
+ key3: 13
+ key4: 4
+ key5: 5
+ key6: 6
+ hash:
+ key3: 13
+ key4: 4
+ key5: 5
+ key6: 6
diff --git a/helm/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts-with-subcharts.txt b/helm/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts-with-subcharts.txt
new file mode 100644
index 000000000..67ed58ec3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts-with-subcharts.txt
@@ -0,0 +1,21 @@
+==> Linting testdata/testcharts/chart-with-bad-subcharts
+[INFO] Chart.yaml: icon is recommended
+[WARNING] templates/: directory does not exist
+[ERROR] : unable to load chart
+ error unpacking subchart bad-subchart in chart-with-bad-subcharts: validation: chart.metadata.name is required
+
+==> Linting testdata/testcharts/chart-with-bad-subcharts/charts/bad-subchart
+[ERROR] Chart.yaml: name is required
+[ERROR] Chart.yaml: apiVersion is required. The value must be either "v1" or "v2"
+[ERROR] Chart.yaml: version is required
+[INFO] Chart.yaml: icon is recommended
+[WARNING] Chart.yaml: version '' is not a valid SemVerV2
+[WARNING] templates/: directory does not exist
+[ERROR] : unable to load chart
+ validation: chart.metadata.name is required
+
+==> Linting testdata/testcharts/chart-with-bad-subcharts/charts/good-subchart
+[INFO] Chart.yaml: icon is recommended
+[WARNING] templates/: directory does not exist
+
+Error: 3 chart(s) linted, 2 chart(s) failed
diff --git a/helm/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts.txt b/helm/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts.txt
new file mode 100644
index 000000000..5a1c388bb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/lint-chart-with-bad-subcharts.txt
@@ -0,0 +1,7 @@
+==> Linting testdata/testcharts/chart-with-bad-subcharts
+[INFO] Chart.yaml: icon is recommended
+[WARNING] templates/: directory does not exist
+[ERROR] : unable to load chart
+ error unpacking subchart bad-subchart in chart-with-bad-subcharts: validation: chart.metadata.name is required
+
+Error: 1 chart(s) linted, 1 chart(s) failed
diff --git a/helm/pkg/cmd/testdata/output/lint-chart-with-deprecated-api-old-k8s.txt b/helm/pkg/cmd/testdata/output/lint-chart-with-deprecated-api-old-k8s.txt
new file mode 100644
index 000000000..bd0d70000
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/lint-chart-with-deprecated-api-old-k8s.txt
@@ -0,0 +1,4 @@
+==> Linting testdata/testcharts/chart-with-deprecated-api
+[INFO] Chart.yaml: icon is recommended
+
+1 chart(s) linted, 0 chart(s) failed
diff --git a/helm/pkg/cmd/testdata/output/lint-chart-with-deprecated-api-strict.txt b/helm/pkg/cmd/testdata/output/lint-chart-with-deprecated-api-strict.txt
new file mode 100644
index 000000000..a1ec4394e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/lint-chart-with-deprecated-api-strict.txt
@@ -0,0 +1,5 @@
+==> Linting testdata/testcharts/chart-with-deprecated-api
+[INFO] Chart.yaml: icon is recommended
+[WARNING] templates/horizontalpodautoscaler.yaml: autoscaling/v2beta1 HorizontalPodAutoscaler is deprecated in v1.22+, unavailable in v1.25+; use autoscaling/v2 HorizontalPodAutoscaler
+
+Error: 1 chart(s) linted, 1 chart(s) failed
diff --git a/helm/pkg/cmd/testdata/output/lint-chart-with-deprecated-api.txt b/helm/pkg/cmd/testdata/output/lint-chart-with-deprecated-api.txt
new file mode 100644
index 000000000..dac54620c
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/lint-chart-with-deprecated-api.txt
@@ -0,0 +1,5 @@
+==> Linting testdata/testcharts/chart-with-deprecated-api
+[INFO] Chart.yaml: icon is recommended
+[WARNING] templates/horizontalpodautoscaler.yaml: autoscaling/v2beta1 HorizontalPodAutoscaler is deprecated in v1.22+, unavailable in v1.25+; use autoscaling/v2 HorizontalPodAutoscaler
+
+1 chart(s) linted, 0 chart(s) failed
diff --git a/helm/pkg/cmd/testdata/output/lint-quiet-with-error.txt b/helm/pkg/cmd/testdata/output/lint-quiet-with-error.txt
new file mode 100644
index 000000000..0731a07d1
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/lint-quiet-with-error.txt
@@ -0,0 +1,8 @@
+==> Linting testdata/testcharts/chart-bad-requirements
+[ERROR] Chart.yaml: unable to parse YAML
+ error converting YAML to JSON: yaml: line 6: did not find expected '-' indicator
+[WARNING] templates/: directory does not exist
+[ERROR] : unable to load chart
+ cannot load Chart.yaml: error converting YAML to JSON: yaml: line 6: did not find expected '-' indicator
+
+Error: 2 chart(s) linted, 1 chart(s) failed
diff --git a/helm/pkg/cmd/testdata/output/lint-quiet-with-warning.txt b/helm/pkg/cmd/testdata/output/lint-quiet-with-warning.txt
new file mode 100644
index 000000000..ebf6c1989
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/lint-quiet-with-warning.txt
@@ -0,0 +1,4 @@
+==> Linting testdata/testcharts/chart-with-only-crds
+[WARNING] templates/: directory does not exist
+
+1 chart(s) linted, 0 chart(s) failed
diff --git a/helm/pkg/cmd/testdata/output/lint-quiet.txt b/helm/pkg/cmd/testdata/output/lint-quiet.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/cmd/testdata/output/list-all-date-reversed.txt b/helm/pkg/cmd/testdata/output/list-all-date-reversed.txt
new file mode 100644
index 000000000..d185334a2
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-all-date-reversed.txt
@@ -0,0 +1,9 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+thanos default 1 2016-01-16 00:00:01 +0000 UTC pending-install chickadee-1.0.0 0.0.1
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
+groot default 1 2016-01-16 00:00:01 +0000 UTC uninstalled chickadee-1.0.0 0.0.1
+gamora default 1 2016-01-16 00:00:01 +0000 UTC superseded chickadee-1.0.0 0.0.1
+drax default 1 2016-01-16 00:00:01 +0000 UTC uninstalling chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-all-date.txt b/helm/pkg/cmd/testdata/output/list-all-date.txt
new file mode 100644
index 000000000..5e5f9efee
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-all-date.txt
@@ -0,0 +1,9 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+drax default 1 2016-01-16 00:00:01 +0000 UTC uninstalling chickadee-1.0.0 0.0.1
+gamora default 1 2016-01-16 00:00:01 +0000 UTC superseded chickadee-1.0.0 0.0.1
+groot default 1 2016-01-16 00:00:01 +0000 UTC uninstalled chickadee-1.0.0 0.0.1
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
+thanos default 1 2016-01-16 00:00:01 +0000 UTC pending-install chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-all-max.txt b/helm/pkg/cmd/testdata/output/list-all-max.txt
new file mode 100644
index 000000000..922896391
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-all-max.txt
@@ -0,0 +1,2 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+drax default 1 2016-01-16 00:00:01 +0000 UTC uninstalling chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-all-no-headers.txt b/helm/pkg/cmd/testdata/output/list-all-no-headers.txt
new file mode 100644
index 000000000..33581d8c5
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-all-no-headers.txt
@@ -0,0 +1,8 @@
+drax default 1 2016-01-16 00:00:01 +0000 UTC uninstalling chickadee-1.0.0 0.0.1
+gamora default 1 2016-01-16 00:00:01 +0000 UTC superseded chickadee-1.0.0 0.0.1
+groot default 1 2016-01-16 00:00:01 +0000 UTC uninstalled chickadee-1.0.0 0.0.1
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
+thanos default 1 2016-01-16 00:00:01 +0000 UTC pending-install chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-all-offset.txt b/helm/pkg/cmd/testdata/output/list-all-offset.txt
new file mode 100644
index 000000000..e17fd7b00
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-all-offset.txt
@@ -0,0 +1,8 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+gamora default 1 2016-01-16 00:00:01 +0000 UTC superseded chickadee-1.0.0 0.0.1
+groot default 1 2016-01-16 00:00:01 +0000 UTC uninstalled chickadee-1.0.0 0.0.1
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
+thanos default 1 2016-01-16 00:00:01 +0000 UTC pending-install chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-all-reverse.txt b/helm/pkg/cmd/testdata/output/list-all-reverse.txt
new file mode 100644
index 000000000..31bb3de96
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-all-reverse.txt
@@ -0,0 +1,9 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+thanos default 1 2016-01-16 00:00:01 +0000 UTC pending-install chickadee-1.0.0 0.0.1
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
+groot default 1 2016-01-16 00:00:01 +0000 UTC uninstalled chickadee-1.0.0 0.0.1
+gamora default 1 2016-01-16 00:00:01 +0000 UTC superseded chickadee-1.0.0 0.0.1
+drax default 1 2016-01-16 00:00:01 +0000 UTC uninstalling chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-all-short-json.txt b/helm/pkg/cmd/testdata/output/list-all-short-json.txt
new file mode 100644
index 000000000..6dac52c43
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-all-short-json.txt
@@ -0,0 +1 @@
+["drax","gamora","groot","hummingbird","iguana","rocket","starlord","thanos"]
diff --git a/helm/pkg/cmd/testdata/output/list-all-short-yaml.txt b/helm/pkg/cmd/testdata/output/list-all-short-yaml.txt
new file mode 100644
index 000000000..2ae0e88ad
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-all-short-yaml.txt
@@ -0,0 +1,8 @@
+- drax
+- gamora
+- groot
+- hummingbird
+- iguana
+- rocket
+- starlord
+- thanos
diff --git a/helm/pkg/cmd/testdata/output/list-all-short.txt b/helm/pkg/cmd/testdata/output/list-all-short.txt
new file mode 100644
index 000000000..52871d8b4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-all-short.txt
@@ -0,0 +1,8 @@
+drax
+gamora
+groot
+hummingbird
+iguana
+rocket
+starlord
+thanos
diff --git a/helm/pkg/cmd/testdata/output/list-all.txt b/helm/pkg/cmd/testdata/output/list-all.txt
new file mode 100644
index 000000000..ef6d44cd5
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-all.txt
@@ -0,0 +1,9 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+drax default 1 2016-01-16 00:00:01 +0000 UTC uninstalling chickadee-1.0.0 0.0.1
+gamora default 1 2016-01-16 00:00:01 +0000 UTC superseded chickadee-1.0.0 0.0.1
+groot default 1 2016-01-16 00:00:01 +0000 UTC uninstalled chickadee-1.0.0 0.0.1
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
+thanos default 1 2016-01-16 00:00:01 +0000 UTC pending-install chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-date-reversed.txt b/helm/pkg/cmd/testdata/output/list-date-reversed.txt
new file mode 100644
index 000000000..8b4e71a38
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-date-reversed.txt
@@ -0,0 +1,5 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-date.txt b/helm/pkg/cmd/testdata/output/list-date.txt
new file mode 100644
index 000000000..3d2b27ad8
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-date.txt
@@ -0,0 +1,5 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-failed.txt b/helm/pkg/cmd/testdata/output/list-failed.txt
new file mode 100644
index 000000000..a8ec3e132
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-failed.txt
@@ -0,0 +1,2 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-filter.txt b/helm/pkg/cmd/testdata/output/list-filter.txt
new file mode 100644
index 000000000..0a820922b
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-filter.txt
@@ -0,0 +1,5 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-json.txt b/helm/pkg/cmd/testdata/output/list-json.txt
new file mode 100644
index 000000000..89e4d9dcf
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-json.txt
@@ -0,0 +1 @@
+[{"name":"test-release","namespace":"default","revision":"1","updated":"2016-01-16 00:00:00 +0000 UTC","status":"deployed","chart":"test-chart-1.0.0","app_version":"0.0.1"}]
diff --git a/helm/pkg/cmd/testdata/output/list-max.txt b/helm/pkg/cmd/testdata/output/list-max.txt
new file mode 100644
index 000000000..a909322b4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-max.txt
@@ -0,0 +1,2 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-namespace.txt b/helm/pkg/cmd/testdata/output/list-namespace.txt
new file mode 100644
index 000000000..9382327d6
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-namespace.txt
@@ -0,0 +1,2 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+starlord milano 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-no-headers.txt b/helm/pkg/cmd/testdata/output/list-no-headers.txt
new file mode 100644
index 000000000..9d11d0caf
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-no-headers.txt
@@ -0,0 +1,4 @@
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-offset.txt b/helm/pkg/cmd/testdata/output/list-offset.txt
new file mode 100644
index 000000000..36e963ca5
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-offset.txt
@@ -0,0 +1,4 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-pending.txt b/helm/pkg/cmd/testdata/output/list-pending.txt
new file mode 100644
index 000000000..f3d7aa03b
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-pending.txt
@@ -0,0 +1,2 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+thanos default 1 2016-01-16 00:00:01 +0000 UTC pending-install chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-reverse.txt b/helm/pkg/cmd/testdata/output/list-reverse.txt
new file mode 100644
index 000000000..da178b2c3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-reverse.txt
@@ -0,0 +1,5 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-short-json.txt b/helm/pkg/cmd/testdata/output/list-short-json.txt
new file mode 100644
index 000000000..acbf1e44d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-short-json.txt
@@ -0,0 +1 @@
+["hummingbird","iguana","rocket","starlord"]
diff --git a/helm/pkg/cmd/testdata/output/list-short-yaml.txt b/helm/pkg/cmd/testdata/output/list-short-yaml.txt
new file mode 100644
index 000000000..86fb3d670
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-short-yaml.txt
@@ -0,0 +1,4 @@
+- hummingbird
+- iguana
+- rocket
+- starlord
diff --git a/helm/pkg/cmd/testdata/output/list-short.txt b/helm/pkg/cmd/testdata/output/list-short.txt
new file mode 100644
index 000000000..0a63be990
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-short.txt
@@ -0,0 +1,4 @@
+hummingbird
+iguana
+rocket
+starlord
diff --git a/helm/pkg/cmd/testdata/output/list-superseded.txt b/helm/pkg/cmd/testdata/output/list-superseded.txt
new file mode 100644
index 000000000..50b435874
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-superseded.txt
@@ -0,0 +1,3 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+gamora default 1 2016-01-16 00:00:01 +0000 UTC superseded chickadee-1.0.0 0.0.1
+starlord default 1 2016-01-16 00:00:01 +0000 UTC superseded chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-time-format.txt b/helm/pkg/cmd/testdata/output/list-time-format.txt
new file mode 100644
index 000000000..4d493da7c
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-time-format.txt
@@ -0,0 +1,2 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+test-release default 1 2016-01-16 00:00:00 deployed test-chart-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-uninstalled.txt b/helm/pkg/cmd/testdata/output/list-uninstalled.txt
new file mode 100644
index 000000000..430cf32fb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-uninstalled.txt
@@ -0,0 +1,2 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+groot default 1 2016-01-16 00:00:01 +0000 UTC uninstalled chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-uninstalling.txt b/helm/pkg/cmd/testdata/output/list-uninstalling.txt
new file mode 100644
index 000000000..922896391
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-uninstalling.txt
@@ -0,0 +1,2 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+drax default 1 2016-01-16 00:00:01 +0000 UTC uninstalling chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/list-yaml.txt b/helm/pkg/cmd/testdata/output/list-yaml.txt
new file mode 100644
index 000000000..9e1d41f30
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list-yaml.txt
@@ -0,0 +1,7 @@
+- app_version: 0.0.1
+ chart: test-chart-1.0.0
+ name: test-release
+ namespace: default
+ revision: "1"
+ status: deployed
+ updated: 2016-01-16 00:00:00 +0000 UTC
diff --git a/helm/pkg/cmd/testdata/output/list.txt b/helm/pkg/cmd/testdata/output/list.txt
new file mode 100644
index 000000000..0a820922b
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/list.txt
@@ -0,0 +1,5 @@
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+hummingbird default 1 2016-01-16 00:00:03 +0000 UTC deployed chickadee-1.0.0 0.0.1
+iguana default 2 2016-01-16 00:00:04 +0000 UTC deployed chickadee-1.0.0 0.0.1
+rocket default 1 2016-01-16 00:00:02 +0000 UTC failed chickadee-1.0.0 0.0.1
+starlord default 2 2016-01-16 00:00:01 +0000 UTC deployed chickadee-1.0.0 0.0.1
diff --git a/helm/pkg/cmd/testdata/output/object-order.txt b/helm/pkg/cmd/testdata/output/object-order.txt
new file mode 100644
index 000000000..307f928f2
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/object-order.txt
@@ -0,0 +1,191 @@
+---
+# Source: object-order/templates/01-a.yml
+# 1
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: first
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/01-a.yml
+# 2
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: second
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/01-a.yml
+# 3
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: third
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/02-b.yml
+# 5
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fifth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/02-b.yml
+# 7
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: seventh
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/02-b.yml
+# 8
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: eighth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/02-b.yml
+# 9
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: ninth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/02-b.yml
+# 10
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: tenth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/02-b.yml
+# 11
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: eleventh
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/02-b.yml
+# 12
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: twelfth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/02-b.yml
+# 13
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: thirteenth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/02-b.yml
+# 14
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fourteenth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/02-b.yml
+# 15 (11th object within 02-b.yml, in order to test `SplitManifests` which assigns `manifest-10`
+# to this object which should then come *after* `manifest-9`)
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fifteenth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+# Source: object-order/templates/01-a.yml
+# 4 (Deployment should come after all NetworkPolicy manifests, since 'helm template' outputs in install order)
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: fourth
+spec:
+ selector:
+ matchLabels:
+ pod: fourth
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ pod: fourth
+ spec:
+ containers:
+ - name: hello-world
+ image: gcr.io/google-samples/node-hello:1.0
+---
+# Source: object-order/templates/02-b.yml
+# 6 (implementation detail: currently, 'helm template' outputs hook manifests last; and yes, NetworkPolicy won't make a reasonable hook, this is just a dummy unit test manifest)
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ annotations:
+ "helm.sh/hook": pre-install
+ name: sixth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
diff --git a/helm/pkg/cmd/testdata/output/output-comp.txt b/helm/pkg/cmd/testdata/output/output-comp.txt
new file mode 100644
index 000000000..6232b2928
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/output-comp.txt
@@ -0,0 +1,5 @@
+json Output result in JSON format
+table Output result in human-readable format
+yaml Output result in YAML format
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/plugin_args_comp.txt b/helm/pkg/cmd/testdata/output/plugin_args_comp.txt
new file mode 100644
index 000000000..4070cb1e6
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/plugin_args_comp.txt
@@ -0,0 +1,6 @@
+plugin.complete was called
+Namespace: default
+Num args received: 1
+Args received:
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/plugin_args_flag_comp.txt b/helm/pkg/cmd/testdata/output/plugin_args_flag_comp.txt
new file mode 100644
index 000000000..87300fa97
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/plugin_args_flag_comp.txt
@@ -0,0 +1,6 @@
+plugin.complete was called
+Namespace: default
+Num args received: 2
+Args received: --myflag
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/plugin_args_many_args_comp.txt b/helm/pkg/cmd/testdata/output/plugin_args_many_args_comp.txt
new file mode 100644
index 000000000..f3c386b6d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/plugin_args_many_args_comp.txt
@@ -0,0 +1,6 @@
+plugin.complete was called
+Namespace: mynamespace
+Num args received: 2
+Args received: --myflag start
+:2
+Completion ended with directive: ShellCompDirectiveNoSpace
diff --git a/helm/pkg/cmd/testdata/output/plugin_args_ns_comp.txt b/helm/pkg/cmd/testdata/output/plugin_args_ns_comp.txt
new file mode 100644
index 000000000..13bfcd3f4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/plugin_args_ns_comp.txt
@@ -0,0 +1,6 @@
+plugin.complete was called
+Namespace: mynamespace
+Num args received: 1
+Args received:
+:2
+Completion ended with directive: ShellCompDirectiveNoSpace
diff --git a/helm/pkg/cmd/testdata/output/plugin_echo_no_directive.txt b/helm/pkg/cmd/testdata/output/plugin_echo_no_directive.txt
new file mode 100644
index 000000000..99cc47c13
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/plugin_echo_no_directive.txt
@@ -0,0 +1,6 @@
+echo plugin.complete was called
+Namespace: mynamespace
+Num args received: 1
+Args received:
+:0
+Completion ended with directive: ShellCompDirectiveDefault
diff --git a/helm/pkg/cmd/testdata/output/plugin_list_comp.txt b/helm/pkg/cmd/testdata/output/plugin_list_comp.txt
new file mode 100644
index 000000000..1dff43551
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/plugin_list_comp.txt
@@ -0,0 +1,7 @@
+args echo args
+echo echo stuff
+exitwith exitwith code
+fullenv show env vars
+shortenv env stuff
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/plugin_repeat_comp.txt b/helm/pkg/cmd/testdata/output/plugin_repeat_comp.txt
new file mode 100644
index 000000000..b46c1b7d4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/plugin_repeat_comp.txt
@@ -0,0 +1,6 @@
+echo echo stuff
+exitwith exitwith code
+fullenv show env vars
+shortenv env stuff
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/release_list_comp.txt b/helm/pkg/cmd/testdata/output/release_list_comp.txt
new file mode 100644
index 000000000..226c378a9
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/release_list_comp.txt
@@ -0,0 +1,5 @@
+aramis foo-0.1.0-beta.1 -> deployed
+athos foo-0.1.0-beta.1 -> deployed
+porthos foo-0.1.0-beta.1 -> deployed
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/release_list_repeat_comp.txt b/helm/pkg/cmd/testdata/output/release_list_repeat_comp.txt
new file mode 100644
index 000000000..aa330f47f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/release_list_repeat_comp.txt
@@ -0,0 +1,4 @@
+aramis foo-0.1.0-beta.1 -> deployed
+athos foo-0.1.0-beta.1 -> deployed
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/repo-add.txt b/helm/pkg/cmd/testdata/output/repo-add.txt
new file mode 100644
index 000000000..e8882321e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/repo-add.txt
@@ -0,0 +1 @@
+"test-name" has been added to your repositories
diff --git a/helm/pkg/cmd/testdata/output/repo-add2.txt b/helm/pkg/cmd/testdata/output/repo-add2.txt
new file mode 100644
index 000000000..263ffa9e4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/repo-add2.txt
@@ -0,0 +1 @@
+"test-name" already exists with the same configuration, skipping
diff --git a/helm/pkg/cmd/testdata/output/repo-list-empty.txt b/helm/pkg/cmd/testdata/output/repo-list-empty.txt
new file mode 100644
index 000000000..c6edb659a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/repo-list-empty.txt
@@ -0,0 +1 @@
+no repositories to show
diff --git a/helm/pkg/cmd/testdata/output/repo-list-no-headers.txt b/helm/pkg/cmd/testdata/output/repo-list-no-headers.txt
new file mode 100644
index 000000000..13491aeb2
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/repo-list-no-headers.txt
@@ -0,0 +1,3 @@
+charts https://charts.helm.sh/stable
+firstexample http://firstexample.com
+secondexample http://secondexample.com
diff --git a/helm/pkg/cmd/testdata/output/repo-list.txt b/helm/pkg/cmd/testdata/output/repo-list.txt
new file mode 100644
index 000000000..edbd0ecc1
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/repo-list.txt
@@ -0,0 +1,4 @@
+NAME URL
+charts https://charts.helm.sh/stable
+firstexample http://firstexample.com
+secondexample http://secondexample.com
diff --git a/helm/pkg/cmd/testdata/output/repo_list_comp.txt b/helm/pkg/cmd/testdata/output/repo_list_comp.txt
new file mode 100644
index 000000000..289e0d2e1
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/repo_list_comp.txt
@@ -0,0 +1,5 @@
+foo
+bar
+baz
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/repo_repeat_comp.txt b/helm/pkg/cmd/testdata/output/repo_repeat_comp.txt
new file mode 100644
index 000000000..ed8ed89fa
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/repo_repeat_comp.txt
@@ -0,0 +1,4 @@
+bar
+baz
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/revision-comp.txt b/helm/pkg/cmd/testdata/output/revision-comp.txt
new file mode 100644
index 000000000..fe9faf1f1
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/revision-comp.txt
@@ -0,0 +1,6 @@
+8 App: 1.0, Chart: foo-0.1.0-beta.1
+9 App: 1.0, Chart: foo-0.1.0-beta.1
+10 App: 1.0, Chart: foo-0.1.0-beta.1
+11 App: 1.0, Chart: foo-0.1.0-beta.1
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/revision-wrong-args-comp.txt b/helm/pkg/cmd/testdata/output/revision-wrong-args-comp.txt
new file mode 100644
index 000000000..8d9fad576
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/revision-wrong-args-comp.txt
@@ -0,0 +1,2 @@
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/rollback-comp.txt b/helm/pkg/cmd/testdata/output/rollback-comp.txt
new file mode 100644
index 000000000..2cfeed1f9
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/rollback-comp.txt
@@ -0,0 +1,4 @@
+carabins foo-0.1.0-beta.1 -> superseded
+musketeers foo-0.1.0-beta.1 -> deployed
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/rollback-no-args.txt b/helm/pkg/cmd/testdata/output/rollback-no-args.txt
new file mode 100644
index 000000000..a1bc30b7a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/rollback-no-args.txt
@@ -0,0 +1,3 @@
+Error: "helm rollback" requires at least 1 argument
+
+Usage: helm rollback [REVISION] [flags]
diff --git a/helm/pkg/cmd/testdata/output/rollback-no-revision.txt b/helm/pkg/cmd/testdata/output/rollback-no-revision.txt
new file mode 100644
index 000000000..ae3c6f1c4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/rollback-no-revision.txt
@@ -0,0 +1 @@
+Rollback was a success! Happy Helming!
diff --git a/helm/pkg/cmd/testdata/output/rollback-non-existent-version.txt b/helm/pkg/cmd/testdata/output/rollback-non-existent-version.txt
new file mode 100644
index 000000000..9c2e10e17
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/rollback-non-existent-version.txt
@@ -0,0 +1 @@
+Error: release has no 3 version
diff --git a/helm/pkg/cmd/testdata/output/rollback-timeout.txt b/helm/pkg/cmd/testdata/output/rollback-timeout.txt
new file mode 100644
index 000000000..ae3c6f1c4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/rollback-timeout.txt
@@ -0,0 +1 @@
+Rollback was a success! Happy Helming!
diff --git a/helm/pkg/cmd/testdata/output/rollback-wait-for-jobs.txt b/helm/pkg/cmd/testdata/output/rollback-wait-for-jobs.txt
new file mode 100644
index 000000000..ae3c6f1c4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/rollback-wait-for-jobs.txt
@@ -0,0 +1 @@
+Rollback was a success! Happy Helming!
diff --git a/helm/pkg/cmd/testdata/output/rollback-wait.txt b/helm/pkg/cmd/testdata/output/rollback-wait.txt
new file mode 100644
index 000000000..ae3c6f1c4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/rollback-wait.txt
@@ -0,0 +1 @@
+Rollback was a success! Happy Helming!
diff --git a/helm/pkg/cmd/testdata/output/rollback-wrong-args-comp.txt b/helm/pkg/cmd/testdata/output/rollback-wrong-args-comp.txt
new file mode 100644
index 000000000..3c537283e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/rollback-wrong-args-comp.txt
@@ -0,0 +1,3 @@
+_activeHelp_ This command does not take any more arguments (but may accept flags).
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/rollback.txt b/helm/pkg/cmd/testdata/output/rollback.txt
new file mode 100644
index 000000000..ae3c6f1c4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/rollback.txt
@@ -0,0 +1 @@
+Rollback was a success! Happy Helming!
diff --git a/helm/pkg/cmd/testdata/output/schema-negative-cli.txt b/helm/pkg/cmd/testdata/output/schema-negative-cli.txt
new file mode 100644
index 000000000..12bcc5103
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/schema-negative-cli.txt
@@ -0,0 +1,4 @@
+Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
+empty:
+- at '/age': minimum: got -5, want 0
+
diff --git a/helm/pkg/cmd/testdata/output/schema-negative.txt b/helm/pkg/cmd/testdata/output/schema-negative.txt
new file mode 100644
index 000000000..daf132635
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/schema-negative.txt
@@ -0,0 +1,5 @@
+Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
+empty:
+- at '': missing property 'employmentInfo'
+- at '/age': minimum: got -5, want 0
+
diff --git a/helm/pkg/cmd/testdata/output/schema.txt b/helm/pkg/cmd/testdata/output/schema.txt
new file mode 100644
index 000000000..f5fe63768
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/schema.txt
@@ -0,0 +1,7 @@
+NAME: schema
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/search-constraint-single.txt b/helm/pkg/cmd/testdata/output/search-constraint-single.txt
new file mode 100644
index 000000000..a1f75099f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-constraint-single.txt
@@ -0,0 +1,2 @@
+NAME CHART VERSION APP VERSION DESCRIPTION
+testing/alpine 0.2.0 2.3.4 Deploy a basic Alpine Linux pod
diff --git a/helm/pkg/cmd/testdata/output/search-constraint.txt b/helm/pkg/cmd/testdata/output/search-constraint.txt
new file mode 100644
index 000000000..9fb22fe76
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-constraint.txt
@@ -0,0 +1,2 @@
+NAME CHART VERSION APP VERSION DESCRIPTION
+testing/alpine 0.1.0 1.2.3 Deploy a basic Alpine Linux pod
diff --git a/helm/pkg/cmd/testdata/output/search-multiple-devel-release.txt b/helm/pkg/cmd/testdata/output/search-multiple-devel-release.txt
new file mode 100644
index 000000000..7e29a8f7e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-multiple-devel-release.txt
@@ -0,0 +1,2 @@
+NAME CHART VERSION APP VERSION DESCRIPTION
+testing/alpine 0.3.0-rc.1 3.0.0 Deploy a basic Alpine Linux pod
diff --git a/helm/pkg/cmd/testdata/output/search-multiple-stable-release.txt b/helm/pkg/cmd/testdata/output/search-multiple-stable-release.txt
new file mode 100644
index 000000000..a1f75099f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-multiple-stable-release.txt
@@ -0,0 +1,2 @@
+NAME CHART VERSION APP VERSION DESCRIPTION
+testing/alpine 0.2.0 2.3.4 Deploy a basic Alpine Linux pod
diff --git a/helm/pkg/cmd/testdata/output/search-multiple-versions-constraints.txt b/helm/pkg/cmd/testdata/output/search-multiple-versions-constraints.txt
new file mode 100644
index 000000000..a6a388858
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-multiple-versions-constraints.txt
@@ -0,0 +1,3 @@
+NAME CHART VERSION APP VERSION DESCRIPTION
+testing/alpine 0.2.0 2.3.4 Deploy a basic Alpine Linux pod
+testing/alpine 0.1.0 1.2.3 Deploy a basic Alpine Linux pod
diff --git a/helm/pkg/cmd/testdata/output/search-multiple-versions.txt b/helm/pkg/cmd/testdata/output/search-multiple-versions.txt
new file mode 100644
index 000000000..a6a388858
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-multiple-versions.txt
@@ -0,0 +1,3 @@
+NAME CHART VERSION APP VERSION DESCRIPTION
+testing/alpine 0.2.0 2.3.4 Deploy a basic Alpine Linux pod
+testing/alpine 0.1.0 1.2.3 Deploy a basic Alpine Linux pod
diff --git a/helm/pkg/cmd/testdata/output/search-not-found-error.txt b/helm/pkg/cmd/testdata/output/search-not-found-error.txt
new file mode 100644
index 000000000..8b586bea3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-not-found-error.txt
@@ -0,0 +1 @@
+Error: no results found
diff --git a/helm/pkg/cmd/testdata/output/search-not-found.txt b/helm/pkg/cmd/testdata/output/search-not-found.txt
new file mode 100644
index 000000000..4f2a9fd07
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-not-found.txt
@@ -0,0 +1 @@
+No results found
diff --git a/helm/pkg/cmd/testdata/output/search-output-json.txt b/helm/pkg/cmd/testdata/output/search-output-json.txt
new file mode 100644
index 000000000..9b211e1b5
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-output-json.txt
@@ -0,0 +1 @@
+[{"name":"testing/mariadb","version":"0.3.0","app_version":"","description":"Chart for MariaDB"}]
diff --git a/helm/pkg/cmd/testdata/output/search-output-yaml.txt b/helm/pkg/cmd/testdata/output/search-output-yaml.txt
new file mode 100644
index 000000000..122b7f345
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-output-yaml.txt
@@ -0,0 +1,4 @@
+- app_version: 2.3.4
+ description: Deploy a basic Alpine Linux pod
+ name: testing/alpine
+ version: 0.2.0
diff --git a/helm/pkg/cmd/testdata/output/search-regex.txt b/helm/pkg/cmd/testdata/output/search-regex.txt
new file mode 100644
index 000000000..a1f75099f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-regex.txt
@@ -0,0 +1,2 @@
+NAME CHART VERSION APP VERSION DESCRIPTION
+testing/alpine 0.2.0 2.3.4 Deploy a basic Alpine Linux pod
diff --git a/helm/pkg/cmd/testdata/output/search-versions-constraint.txt b/helm/pkg/cmd/testdata/output/search-versions-constraint.txt
new file mode 100644
index 000000000..9fb22fe76
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/search-versions-constraint.txt
@@ -0,0 +1,2 @@
+NAME CHART VERSION APP VERSION DESCRIPTION
+testing/alpine 0.1.0 1.2.3 Deploy a basic Alpine Linux pod
diff --git a/helm/pkg/cmd/testdata/output/status-comp.txt b/helm/pkg/cmd/testdata/output/status-comp.txt
new file mode 100644
index 000000000..4c408c974
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/status-comp.txt
@@ -0,0 +1,5 @@
+aramis Aramis-chart-0.0.0 -> uninstalled
+athos Athos-chart-1.2.3 -> deployed
+porthos Porthos-chart-111.222.333 -> failed
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/status-with-desc.txt b/helm/pkg/cmd/testdata/output/status-with-desc.txt
new file mode 100644
index 000000000..c681fe3ec
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/status-with-desc.txt
@@ -0,0 +1,7 @@
+NAME: flummoxed-chickadee
+LAST DEPLOYED: Sat Jan 16 00:00:00 2016
+NAMESPACE: default
+STATUS: deployed
+REVISION: 0
+DESCRIPTION: Mock description
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/status-with-notes.txt b/helm/pkg/cmd/testdata/output/status-with-notes.txt
new file mode 100644
index 000000000..f05be6c18
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/status-with-notes.txt
@@ -0,0 +1,9 @@
+NAME: flummoxed-chickadee
+LAST DEPLOYED: Sat Jan 16 00:00:00 2016
+NAMESPACE: default
+STATUS: deployed
+REVISION: 0
+DESCRIPTION:
+TEST SUITE: None
+NOTES:
+release notes
diff --git a/helm/pkg/cmd/testdata/output/status-with-resources.json b/helm/pkg/cmd/testdata/output/status-with-resources.json
new file mode 100644
index 000000000..af512bfd1
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/status-with-resources.json
@@ -0,0 +1 @@
+{"name":"flummoxed-chickadee","info":{"last_deployed":"2016-01-16T00:00:00Z","status":"deployed"},"namespace":"default"}
diff --git a/helm/pkg/cmd/testdata/output/status-with-resources.txt b/helm/pkg/cmd/testdata/output/status-with-resources.txt
new file mode 100644
index 000000000..20763acda
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/status-with-resources.txt
@@ -0,0 +1,7 @@
+NAME: flummoxed-chickadee
+LAST DEPLOYED: Sat Jan 16 00:00:00 2016
+NAMESPACE: default
+STATUS: deployed
+REVISION: 0
+DESCRIPTION:
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/status-with-test-suite.txt b/helm/pkg/cmd/testdata/output/status-with-test-suite.txt
new file mode 100644
index 000000000..7c1ade450
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/status-with-test-suite.txt
@@ -0,0 +1,14 @@
+NAME: flummoxed-chickadee
+LAST DEPLOYED: Sat Jan 16 00:00:00 2016
+NAMESPACE: default
+STATUS: deployed
+REVISION: 0
+DESCRIPTION:
+TEST SUITE: passing-test
+Last Started: Mon Jan 2 15:04:05 2006
+Last Completed: Mon Jan 2 15:04:07 2006
+Phase: Succeeded
+TEST SUITE: failing-test
+Last Started: Mon Jan 2 15:10:05 2006
+Last Completed: Mon Jan 2 15:10:07 2006
+Phase: Failed
diff --git a/helm/pkg/cmd/testdata/output/status-wrong-args-comp.txt b/helm/pkg/cmd/testdata/output/status-wrong-args-comp.txt
new file mode 100644
index 000000000..3c537283e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/status-wrong-args-comp.txt
@@ -0,0 +1,3 @@
+_activeHelp_ This command does not take any more arguments (but may accept flags).
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/status.json b/helm/pkg/cmd/testdata/output/status.json
new file mode 100644
index 000000000..4727dd100
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/status.json
@@ -0,0 +1 @@
+{"name":"flummoxed-chickadee","info":{"last_deployed":"2016-01-16T00:00:00Z","status":"deployed","notes":"release notes"},"namespace":"default"}
diff --git a/helm/pkg/cmd/testdata/output/status.txt b/helm/pkg/cmd/testdata/output/status.txt
new file mode 100644
index 000000000..20763acda
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/status.txt
@@ -0,0 +1,7 @@
+NAME: flummoxed-chickadee
+LAST DEPLOYED: Sat Jan 16 00:00:00 2016
+NAMESPACE: default
+STATUS: deployed
+REVISION: 0
+DESCRIPTION:
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/subchart-schema-cli-negative.txt b/helm/pkg/cmd/testdata/output/subchart-schema-cli-negative.txt
new file mode 100644
index 000000000..179550f69
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/subchart-schema-cli-negative.txt
@@ -0,0 +1,4 @@
+Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
+subchart-with-schema:
+- at '/age': minimum: got -25, want 0
+
diff --git a/helm/pkg/cmd/testdata/output/subchart-schema-cli.txt b/helm/pkg/cmd/testdata/output/subchart-schema-cli.txt
new file mode 100644
index 000000000..f5fe63768
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/subchart-schema-cli.txt
@@ -0,0 +1,7 @@
+NAME: schema
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 1
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/subchart-schema-negative.txt b/helm/pkg/cmd/testdata/output/subchart-schema-negative.txt
new file mode 100644
index 000000000..7522ef3e4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/subchart-schema-negative.txt
@@ -0,0 +1,6 @@
+Error: INSTALLATION FAILED: values don't meet the specifications of the schema(s) in the following chart(s):
+chart-without-schema:
+- at '': missing property 'lastname'
+subchart-with-schema:
+- at '': missing property 'age'
+
diff --git a/helm/pkg/cmd/testdata/output/template-chart-bad-type.txt b/helm/pkg/cmd/testdata/output/template-chart-bad-type.txt
new file mode 100644
index 000000000..d8a3bf275
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-chart-bad-type.txt
@@ -0,0 +1 @@
+Error: validation: chart.metadata.type must be application or library
diff --git a/helm/pkg/cmd/testdata/output/template-chart-with-template-lib-archive-dep.txt b/helm/pkg/cmd/testdata/output/template-chart-with-template-lib-archive-dep.txt
new file mode 100644
index 000000000..c954b8e14
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-chart-with-template-lib-archive-dep.txt
@@ -0,0 +1,61 @@
+---
+# Source: chart-with-template-lib-archive-dep/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: chart-with-template-lib-archive-dep
+ chart: chart-with-template-lib-archive-dep-0.1.0
+ heritage: Helm
+ release: release-name
+ name: release-name-chart-with-template-lib-archive-dep
+spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ selector:
+ app: chart-with-template-lib-archive-dep
+ release: release-name
+ type: ClusterIP
+---
+# Source: chart-with-template-lib-archive-dep/templates/deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: release-name-chart-with-template-lib-archive-dep
+ labels:
+ app: chart-with-template-lib-archive-dep
+ chart: chart-with-template-lib-archive-dep-0.1.0
+ release: release-name
+ heritage: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: chart-with-template-lib-archive-dep
+ release: release-name
+ template:
+ metadata:
+ labels:
+ app: chart-with-template-lib-archive-dep
+ release: release-name
+ spec:
+ containers:
+ - name: chart-with-template-lib-archive-dep
+ image: "nginx:stable"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: http
+ resources:
+ {}
diff --git a/helm/pkg/cmd/testdata/output/template-chart-with-template-lib-dep.txt b/helm/pkg/cmd/testdata/output/template-chart-with-template-lib-dep.txt
new file mode 100644
index 000000000..74a2a2df8
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-chart-with-template-lib-dep.txt
@@ -0,0 +1,61 @@
+---
+# Source: chart-with-template-lib-dep/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: chart-with-template-lib-dep
+ chart: chart-with-template-lib-dep-0.1.0
+ heritage: Helm
+ release: release-name
+ name: release-name-chart-with-template-lib-dep
+spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ selector:
+ app: chart-with-template-lib-dep
+ release: release-name
+ type: ClusterIP
+---
+# Source: chart-with-template-lib-dep/templates/deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: release-name-chart-with-template-lib-dep
+ labels:
+ app: chart-with-template-lib-dep
+ chart: chart-with-template-lib-dep-0.1.0
+ release: release-name
+ heritage: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: chart-with-template-lib-dep
+ release: release-name
+ template:
+ metadata:
+ labels:
+ app: chart-with-template-lib-dep
+ release: release-name
+ spec:
+ containers:
+ - name: chart-with-template-lib-dep
+ image: "nginx:stable"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: http
+ resources:
+ {}
diff --git a/helm/pkg/cmd/testdata/output/template-lib-chart.txt b/helm/pkg/cmd/testdata/output/template-lib-chart.txt
new file mode 100644
index 000000000..d8a3bf275
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-lib-chart.txt
@@ -0,0 +1 @@
+Error: validation: chart.metadata.type must be application or library
diff --git a/helm/pkg/cmd/testdata/output/template-name-template.txt b/helm/pkg/cmd/testdata/output/template-name-template.txt
new file mode 100644
index 000000000..9406048dd
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-name-template.txt
@@ -0,0 +1,114 @@
+---
+# Source: subchart/templates/subdir/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: subchart-sa
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
+---
+# Source: subchart/charts/subchartb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchartb
+ labels:
+ helm.sh/chart: "subchartb-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchartb
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "foobar-ywjj-baz"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchart
+---
+# Source: subchart/templates/tests/test-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "foobar-ywjj-baz-testconfig"
+ annotations:
+ "helm.sh/hook": test
+data:
+ message: Hello World
+---
+# Source: subchart/templates/tests/test-nothing.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "foobar-ywjj-baz-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: test
+ image: "alpine:latest"
+ envFrom:
+ - configMapRef:
+ name: "foobar-ywjj-baz-testconfig"
+ command:
+ - echo
+ - "$message"
+ restartPolicy: Never
diff --git a/helm/pkg/cmd/testdata/output/template-no-args.txt b/helm/pkg/cmd/testdata/output/template-no-args.txt
new file mode 100644
index 000000000..f72f2b8cf
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-no-args.txt
@@ -0,0 +1,3 @@
+Error: "helm template" requires at least 1 argument
+
+Usage: helm template [NAME] [CHART] [flags]
diff --git a/helm/pkg/cmd/testdata/output/template-set.txt b/helm/pkg/cmd/testdata/output/template-set.txt
new file mode 100644
index 000000000..4040991cf
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-set.txt
@@ -0,0 +1,114 @@
+---
+# Source: subchart/templates/subdir/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: subchart-sa
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
+---
+# Source: subchart/charts/subchartb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchartb
+ labels:
+ helm.sh/chart: "subchartb-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchartb
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subchart
+---
+# Source: subchart/templates/tests/test-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "release-name-testconfig"
+ annotations:
+ "helm.sh/hook": test
+data:
+ message: Hello World
+---
+# Source: subchart/templates/tests/test-nothing.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "release-name-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: test
+ image: "alpine:latest"
+ envFrom:
+ - configMapRef:
+ name: "release-name-testconfig"
+ command:
+ - echo
+ - "$message"
+ restartPolicy: Never
diff --git a/helm/pkg/cmd/testdata/output/template-show-only-glob.txt b/helm/pkg/cmd/testdata/output/template-show-only-glob.txt
new file mode 100644
index 000000000..b2d2b1c2d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-show-only-glob.txt
@@ -0,0 +1,24 @@
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
diff --git a/helm/pkg/cmd/testdata/output/template-show-only-multiple.txt b/helm/pkg/cmd/testdata/output/template-show-only-multiple.txt
new file mode 100644
index 000000000..1aac3081a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-show-only-multiple.txt
@@ -0,0 +1,38 @@
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchart
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
diff --git a/helm/pkg/cmd/testdata/output/template-show-only-one.txt b/helm/pkg/cmd/testdata/output/template-show-only-one.txt
new file mode 100644
index 000000000..9cc34f515
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-show-only-one.txt
@@ -0,0 +1,21 @@
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchart
diff --git a/helm/pkg/cmd/testdata/output/template-skip-tests.txt b/helm/pkg/cmd/testdata/output/template-skip-tests.txt
new file mode 100644
index 000000000..5c907b563
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-skip-tests.txt
@@ -0,0 +1,85 @@
+---
+# Source: subchart/templates/subdir/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: subchart-sa
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
+---
+# Source: subchart/charts/subchartb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchartb
+ labels:
+ helm.sh/chart: "subchartb-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchartb
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchart
diff --git a/helm/pkg/cmd/testdata/output/template-subchart-cm-set-file.txt b/helm/pkg/cmd/testdata/output/template-subchart-cm-set-file.txt
new file mode 100644
index 000000000..56844e292
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-subchart-cm-set-file.txt
@@ -0,0 +1,122 @@
+---
+# Source: subchart/templates/subdir/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: subchart-sa
+---
+# Source: subchart/templates/subdir/configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: subchart-cm
+data:
+ value: qux
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
+---
+# Source: subchart/charts/subchartb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchartb
+ labels:
+ helm.sh/chart: "subchartb-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchartb
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchart
+---
+# Source: subchart/templates/tests/test-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "release-name-testconfig"
+ annotations:
+ "helm.sh/hook": test
+data:
+ message: Hello World
+---
+# Source: subchart/templates/tests/test-nothing.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "release-name-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: test
+ image: "alpine:latest"
+ envFrom:
+ - configMapRef:
+ name: "release-name-testconfig"
+ command:
+ - echo
+ - "$message"
+ restartPolicy: Never
diff --git a/helm/pkg/cmd/testdata/output/template-subchart-cm-set.txt b/helm/pkg/cmd/testdata/output/template-subchart-cm-set.txt
new file mode 100644
index 000000000..e52f7c234
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-subchart-cm-set.txt
@@ -0,0 +1,122 @@
+---
+# Source: subchart/templates/subdir/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: subchart-sa
+---
+# Source: subchart/templates/subdir/configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: subchart-cm
+data:
+ value: baz
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
+---
+# Source: subchart/charts/subchartb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchartb
+ labels:
+ helm.sh/chart: "subchartb-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchartb
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchart
+---
+# Source: subchart/templates/tests/test-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "release-name-testconfig"
+ annotations:
+ "helm.sh/hook": test
+data:
+ message: Hello World
+---
+# Source: subchart/templates/tests/test-nothing.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "release-name-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: test
+ image: "alpine:latest"
+ envFrom:
+ - configMapRef:
+ name: "release-name-testconfig"
+ command:
+ - echo
+ - "$message"
+ restartPolicy: Never
diff --git a/helm/pkg/cmd/testdata/output/template-subchart-cm.txt b/helm/pkg/cmd/testdata/output/template-subchart-cm.txt
new file mode 100644
index 000000000..9cc9e2296
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-subchart-cm.txt
@@ -0,0 +1,122 @@
+---
+# Source: subchart/templates/subdir/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: subchart-sa
+---
+# Source: subchart/templates/subdir/configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: subchart-cm
+data:
+ value: foo
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
+---
+# Source: subchart/charts/subchartb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchartb
+ labels:
+ helm.sh/chart: "subchartb-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchartb
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchart
+---
+# Source: subchart/templates/tests/test-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "release-name-testconfig"
+ annotations:
+ "helm.sh/hook": test
+data:
+ message: Hello World
+---
+# Source: subchart/templates/tests/test-nothing.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "release-name-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: test
+ image: "alpine:latest"
+ envFrom:
+ - configMapRef:
+ name: "release-name-testconfig"
+ command:
+ - echo
+ - "$message"
+ restartPolicy: Never
diff --git a/helm/pkg/cmd/testdata/output/template-values-files.txt b/helm/pkg/cmd/testdata/output/template-values-files.txt
new file mode 100644
index 000000000..4040991cf
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-values-files.txt
@@ -0,0 +1,114 @@
+---
+# Source: subchart/templates/subdir/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: subchart-sa
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
+---
+# Source: subchart/charts/subchartb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchartb
+ labels:
+ helm.sh/chart: "subchartb-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchartb
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subchart
+---
+# Source: subchart/templates/tests/test-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "release-name-testconfig"
+ annotations:
+ "helm.sh/hook": test
+data:
+ message: Hello World
+---
+# Source: subchart/templates/tests/test-nothing.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "release-name-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: test
+ image: "alpine:latest"
+ envFrom:
+ - configMapRef:
+ name: "release-name-testconfig"
+ command:
+ - echo
+ - "$message"
+ restartPolicy: Never
diff --git a/helm/pkg/cmd/testdata/output/template-with-api-version.txt b/helm/pkg/cmd/testdata/output/template-with-api-version.txt
new file mode 100644
index 000000000..8b6074cdb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-with-api-version.txt
@@ -0,0 +1,116 @@
+---
+# Source: subchart/templates/subdir/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: subchart-sa
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
+---
+# Source: subchart/charts/subchartb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchartb
+ labels:
+ helm.sh/chart: "subchartb-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchartb
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+ kube-api-version/test: v1
+ kube-api-version/test2: v2
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchart
+---
+# Source: subchart/templates/tests/test-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "release-name-testconfig"
+ annotations:
+ "helm.sh/hook": test
+data:
+ message: Hello World
+---
+# Source: subchart/templates/tests/test-nothing.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "release-name-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: test
+ image: "alpine:latest"
+ envFrom:
+ - configMapRef:
+ name: "release-name-testconfig"
+ command:
+ - echo
+ - "$message"
+ restartPolicy: Never
diff --git a/helm/pkg/cmd/testdata/output/template-with-crds.txt b/helm/pkg/cmd/testdata/output/template-with-crds.txt
new file mode 100644
index 000000000..256fc7c3b
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-with-crds.txt
@@ -0,0 +1,131 @@
+---
+# Source: subchart/crds/crdA.yaml
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: testcrds.testcrdgroups.example.com
+spec:
+ group: testcrdgroups.example.com
+ version: v1alpha1
+ names:
+ kind: TestCRD
+ listKind: TestCRDList
+ plural: testcrds
+ shortNames:
+ - tc
+ singular: authconfig
+
+---
+# Source: subchart/templates/subdir/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: subchart-sa
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
+---
+# Source: subchart/charts/subchartb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchartb
+ labels:
+ helm.sh/chart: "subchartb-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchartb
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchart
+---
+# Source: subchart/templates/tests/test-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "release-name-testconfig"
+ annotations:
+ "helm.sh/hook": test
+data:
+ message: Hello World
+---
+# Source: subchart/templates/tests/test-nothing.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "release-name-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: test
+ image: "alpine:latest"
+ envFrom:
+ - configMapRef:
+ name: "release-name-testconfig"
+ command:
+ - echo
+ - "$message"
+ restartPolicy: Never
diff --git a/helm/pkg/cmd/testdata/output/template-with-invalid-yaml-debug.txt b/helm/pkg/cmd/testdata/output/template-with-invalid-yaml-debug.txt
new file mode 100644
index 000000000..909c543d3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-with-invalid-yaml-debug.txt
@@ -0,0 +1,13 @@
+---
+# Source: chart-with-template-with-invalid-yaml/templates/alpine-pod.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "release-name-my-alpine"
+spec:
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
+invalid
+Error: YAML parse error on chart-with-template-with-invalid-yaml/templates/alpine-pod.yaml: error converting YAML to JSON: yaml: line 11: could not find expected ':'
diff --git a/helm/pkg/cmd/testdata/output/template-with-invalid-yaml.txt b/helm/pkg/cmd/testdata/output/template-with-invalid-yaml.txt
new file mode 100644
index 000000000..687227b90
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-with-invalid-yaml.txt
@@ -0,0 +1,3 @@
+Error: YAML parse error on chart-with-template-with-invalid-yaml/templates/alpine-pod.yaml: error converting YAML to JSON: yaml: line 11: could not find expected ':'
+
+Use --debug flag to render out invalid YAML
diff --git a/helm/pkg/cmd/testdata/output/template-with-kube-version.txt b/helm/pkg/cmd/testdata/output/template-with-kube-version.txt
new file mode 100644
index 000000000..9d326f328
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template-with-kube-version.txt
@@ -0,0 +1,114 @@
+---
+# Source: subchart/templates/subdir/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: subchart-sa
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
+---
+# Source: subchart/charts/subchartb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchartb
+ labels:
+ helm.sh/chart: "subchartb-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchartb
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "16"
+ kube-version/version: "v1.16.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchart
+---
+# Source: subchart/templates/tests/test-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "release-name-testconfig"
+ annotations:
+ "helm.sh/hook": test
+data:
+ message: Hello World
+---
+# Source: subchart/templates/tests/test-nothing.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "release-name-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: test
+ image: "alpine:latest"
+ envFrom:
+ - configMapRef:
+ name: "release-name-testconfig"
+ command:
+ - echo
+ - "$message"
+ restartPolicy: Never
diff --git a/helm/pkg/cmd/testdata/output/template.txt b/helm/pkg/cmd/testdata/output/template.txt
new file mode 100644
index 000000000..58c480b47
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/template.txt
@@ -0,0 +1,114 @@
+---
+# Source: subchart/templates/subdir/serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: subchart-sa
+---
+# Source: subchart/templates/subdir/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: subchart-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
+---
+# Source: subchart/templates/subdir/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: subchart-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: subchart-role
+subjects:
+- kind: ServiceAccount
+ name: subchart-sa
+ namespace: default
+---
+# Source: subchart/charts/subcharta/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subcharta
+ labels:
+ helm.sh/chart: "subcharta-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: apache
+ selector:
+ app.kubernetes.io/name: subcharta
+---
+# Source: subchart/charts/subchartb/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchartb
+ labels:
+ helm.sh/chart: "subchartb-0.1.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchartb
+---
+# Source: subchart/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: subchart
+ labels:
+ helm.sh/chart: "subchart-0.1.0"
+ app.kubernetes.io/instance: "release-name"
+ kube-version/major: "1"
+ kube-version/minor: "20"
+ kube-version/version: "v1.20.0"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: nginx
+ selector:
+ app.kubernetes.io/name: subchart
+---
+# Source: subchart/templates/tests/test-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "release-name-testconfig"
+ annotations:
+ "helm.sh/hook": test
+data:
+ message: Hello World
+---
+# Source: subchart/templates/tests/test-nothing.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "release-name-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: test
+ image: "alpine:latest"
+ envFrom:
+ - configMapRef:
+ name: "release-name-testconfig"
+ command:
+ - echo
+ - "$message"
+ restartPolicy: Never
diff --git a/helm/pkg/cmd/testdata/output/uninstall-keep-history-earlier-deployed.txt b/helm/pkg/cmd/testdata/output/uninstall-keep-history-earlier-deployed.txt
new file mode 100644
index 000000000..f5454b88d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/uninstall-keep-history-earlier-deployed.txt
@@ -0,0 +1 @@
+release "aeneas" uninstalled
diff --git a/helm/pkg/cmd/testdata/output/uninstall-keep-history.txt b/helm/pkg/cmd/testdata/output/uninstall-keep-history.txt
new file mode 100644
index 000000000..f5454b88d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/uninstall-keep-history.txt
@@ -0,0 +1 @@
+release "aeneas" uninstalled
diff --git a/helm/pkg/cmd/testdata/output/uninstall-multiple.txt b/helm/pkg/cmd/testdata/output/uninstall-multiple.txt
new file mode 100644
index 000000000..ee1c67d2f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/uninstall-multiple.txt
@@ -0,0 +1,2 @@
+release "aeneas" uninstalled
+release "aeneas2" uninstalled
diff --git a/helm/pkg/cmd/testdata/output/uninstall-no-args.txt b/helm/pkg/cmd/testdata/output/uninstall-no-args.txt
new file mode 100644
index 000000000..fc01a75b9
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/uninstall-no-args.txt
@@ -0,0 +1,3 @@
+Error: "helm uninstall" requires at least 1 argument
+
+Usage: helm uninstall RELEASE_NAME [...] [flags]
diff --git a/helm/pkg/cmd/testdata/output/uninstall-no-hooks.txt b/helm/pkg/cmd/testdata/output/uninstall-no-hooks.txt
new file mode 100644
index 000000000..f5454b88d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/uninstall-no-hooks.txt
@@ -0,0 +1 @@
+release "aeneas" uninstalled
diff --git a/helm/pkg/cmd/testdata/output/uninstall-timeout.txt b/helm/pkg/cmd/testdata/output/uninstall-timeout.txt
new file mode 100644
index 000000000..f5454b88d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/uninstall-timeout.txt
@@ -0,0 +1 @@
+release "aeneas" uninstalled
diff --git a/helm/pkg/cmd/testdata/output/uninstall-wait.txt b/helm/pkg/cmd/testdata/output/uninstall-wait.txt
new file mode 100644
index 000000000..f5454b88d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/uninstall-wait.txt
@@ -0,0 +1 @@
+release "aeneas" uninstalled
diff --git a/helm/pkg/cmd/testdata/output/uninstall.txt b/helm/pkg/cmd/testdata/output/uninstall.txt
new file mode 100644
index 000000000..f5454b88d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/uninstall.txt
@@ -0,0 +1 @@
+release "aeneas" uninstalled
diff --git a/helm/pkg/cmd/testdata/output/upgrade-and-take-ownership.txt b/helm/pkg/cmd/testdata/output/upgrade-and-take-ownership.txt
new file mode 100644
index 000000000..59267651f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-and-take-ownership.txt
@@ -0,0 +1,8 @@
+Release "funny-bunny" has been upgraded. Happy Helming!
+NAME: funny-bunny
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 3
+DESCRIPTION: Upgrade complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/upgrade-uninstalled-with-keep-history.txt b/helm/pkg/cmd/testdata/output/upgrade-uninstalled-with-keep-history.txt
new file mode 100644
index 000000000..d5c42d15c
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-uninstalled-with-keep-history.txt
@@ -0,0 +1,8 @@
+Release "funny-bunny" does not exist. Installing it now.
+NAME: funny-bunny
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 3
+DESCRIPTION: Install complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-bad-dependencies.txt b/helm/pkg/cmd/testdata/output/upgrade-with-bad-dependencies.txt
new file mode 100644
index 000000000..6dddc7344
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-bad-dependencies.txt
@@ -0,0 +1 @@
+Error: cannot load Chart.yaml: error converting YAML to JSON: yaml: line 6: did not find expected '-' indicator
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-bad-or-missing-existing-release.txt b/helm/pkg/cmd/testdata/output/upgrade-with-bad-or-missing-existing-release.txt
new file mode 100644
index 000000000..8f24574a6
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-bad-or-missing-existing-release.txt
@@ -0,0 +1 @@
+Error: UPGRADE FAILED: "funny-bunny" has no deployed releases
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-dependency-update.txt b/helm/pkg/cmd/testdata/output/upgrade-with-dependency-update.txt
new file mode 100644
index 000000000..d1517a686
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-dependency-update.txt
@@ -0,0 +1,10 @@
+Release "funny-bunny" has been upgraded. Happy Helming!
+NAME: funny-bunny
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 3
+DESCRIPTION: Upgrade complete
+TEST SUITE: None
+NOTES:
+PARENT NOTES
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-install-timeout.txt b/helm/pkg/cmd/testdata/output/upgrade-with-install-timeout.txt
new file mode 100644
index 000000000..b159dc3bc
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-install-timeout.txt
@@ -0,0 +1,8 @@
+Release "crazy-bunny" has been upgraded. Happy Helming!
+NAME: crazy-bunny
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 2
+DESCRIPTION: Upgrade complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-install.txt b/helm/pkg/cmd/testdata/output/upgrade-with-install.txt
new file mode 100644
index 000000000..7dc2fce69
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-install.txt
@@ -0,0 +1,8 @@
+Release "zany-bunny" has been upgraded. Happy Helming!
+NAME: zany-bunny
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 2
+DESCRIPTION: Upgrade complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-missing-dependencies.txt b/helm/pkg/cmd/testdata/output/upgrade-with-missing-dependencies.txt
new file mode 100644
index 000000000..b2c154a80
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-missing-dependencies.txt
@@ -0,0 +1 @@
+Error: an error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies: found in Chart.yaml, but missing in charts/ directory: reqsubchart2
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-pending-install.txt b/helm/pkg/cmd/testdata/output/upgrade-with-pending-install.txt
new file mode 100644
index 000000000..57a8e7873
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-pending-install.txt
@@ -0,0 +1 @@
+Error: UPGRADE FAILED: another operation (install/upgrade/rollback) is in progress
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-reset-values.txt b/helm/pkg/cmd/testdata/output/upgrade-with-reset-values.txt
new file mode 100644
index 000000000..d02993a5c
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-reset-values.txt
@@ -0,0 +1,8 @@
+Release "funny-bunny" has been upgraded. Happy Helming!
+NAME: funny-bunny
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 5
+DESCRIPTION: Upgrade complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-reset-values2.txt b/helm/pkg/cmd/testdata/output/upgrade-with-reset-values2.txt
new file mode 100644
index 000000000..7780c4fdc
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-reset-values2.txt
@@ -0,0 +1,8 @@
+Release "funny-bunny" has been upgraded. Happy Helming!
+NAME: funny-bunny
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 6
+DESCRIPTION: Upgrade complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-timeout.txt b/helm/pkg/cmd/testdata/output/upgrade-with-timeout.txt
new file mode 100644
index 000000000..b1edac3af
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-timeout.txt
@@ -0,0 +1,8 @@
+Release "funny-bunny" has been upgraded. Happy Helming!
+NAME: funny-bunny
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 4
+DESCRIPTION: Upgrade complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-wait-for-jobs.txt b/helm/pkg/cmd/testdata/output/upgrade-with-wait-for-jobs.txt
new file mode 100644
index 000000000..21784413c
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-wait-for-jobs.txt
@@ -0,0 +1,8 @@
+Release "crazy-bunny" has been upgraded. Happy Helming!
+NAME: crazy-bunny
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 3
+DESCRIPTION: Upgrade complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/upgrade-with-wait.txt b/helm/pkg/cmd/testdata/output/upgrade-with-wait.txt
new file mode 100644
index 000000000..21784413c
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade-with-wait.txt
@@ -0,0 +1,8 @@
+Release "crazy-bunny" has been upgraded. Happy Helming!
+NAME: crazy-bunny
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 3
+DESCRIPTION: Upgrade complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/upgrade.txt b/helm/pkg/cmd/testdata/output/upgrade.txt
new file mode 100644
index 000000000..59267651f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/upgrade.txt
@@ -0,0 +1,8 @@
+Release "funny-bunny" has been upgraded. Happy Helming!
+NAME: funny-bunny
+LAST DEPLOYED: Fri Sep 2 22:04:05 1977
+NAMESPACE: default
+STATUS: deployed
+REVISION: 3
+DESCRIPTION: Upgrade complete
+TEST SUITE: None
diff --git a/helm/pkg/cmd/testdata/output/values.json b/helm/pkg/cmd/testdata/output/values.json
new file mode 100644
index 000000000..ea8308627
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/values.json
@@ -0,0 +1 @@
+{"name":"value"}
diff --git a/helm/pkg/cmd/testdata/output/values.yaml b/helm/pkg/cmd/testdata/output/values.yaml
new file mode 100644
index 000000000..54ab03c93
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/values.yaml
@@ -0,0 +1 @@
+name: value
diff --git a/helm/pkg/cmd/testdata/output/version-comp.txt b/helm/pkg/cmd/testdata/output/version-comp.txt
new file mode 100644
index 000000000..5b0556cf5
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/version-comp.txt
@@ -0,0 +1,5 @@
+0.3.0-rc.1 App: 3.0.0, Created: November 12, 2020
+0.2.0 App: 2.3.4, Created: July 9, 2018
+0.1.0 App: 1.2.3, Created: June 27, 2018 (deprecated)
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/version-invalid-comp.txt b/helm/pkg/cmd/testdata/output/version-invalid-comp.txt
new file mode 100644
index 000000000..8d9fad576
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/version-invalid-comp.txt
@@ -0,0 +1,2 @@
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp
diff --git a/helm/pkg/cmd/testdata/output/version-short.txt b/helm/pkg/cmd/testdata/output/version-short.txt
new file mode 100644
index 000000000..8cf4318fb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/version-short.txt
@@ -0,0 +1 @@
+v4.1
diff --git a/helm/pkg/cmd/testdata/output/version-template.txt b/helm/pkg/cmd/testdata/output/version-template.txt
new file mode 100644
index 000000000..8fd8b4962
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/version-template.txt
@@ -0,0 +1 @@
+Version: v4.1
\ No newline at end of file
diff --git a/helm/pkg/cmd/testdata/output/version.txt b/helm/pkg/cmd/testdata/output/version.txt
new file mode 100644
index 000000000..1f4cf4d4a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/output/version.txt
@@ -0,0 +1 @@
+version.BuildInfo{Version:"v4.1", GitCommit:"", GitTreeState:"", GoVersion:"", KubeClientVersion:"v1.20"}
diff --git a/helm/pkg/cmd/testdata/password b/helm/pkg/cmd/testdata/password
new file mode 100644
index 000000000..f3097ab13
--- /dev/null
+++ b/helm/pkg/cmd/testdata/password
@@ -0,0 +1 @@
+password
diff --git a/helm/pkg/cmd/testdata/plugins.yaml b/helm/pkg/cmd/testdata/plugins.yaml
new file mode 100644
index 000000000..69086973e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/plugins.yaml
@@ -0,0 +1,3 @@
+plugins:
+- name: testplugin
+ url: testdata/testplugin
diff --git a/helm/pkg/cmd/testdata/repositories.yaml b/helm/pkg/cmd/testdata/repositories.yaml
new file mode 100644
index 000000000..6be26b771
--- /dev/null
+++ b/helm/pkg/cmd/testdata/repositories.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+repositories:
+ - name: charts
+ url: "https://charts.helm.sh/stable"
+ - name: firstexample
+ url: "http://firstexample.com"
+ - name: secondexample
+ url: "http://secondexample.com"
+
diff --git a/helm/pkg/cmd/testdata/testcharts/alpine/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/alpine/Chart.yaml
new file mode 100644
index 000000000..1d6bad825
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/alpine/Chart.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+appVersion: "3.9"
+description: Deploy a basic Alpine Linux pod
+home: https://helm.sh/helm
+name: alpine
+sources:
+- https://github.com/helm/helm
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/alpine/README.md b/helm/pkg/cmd/testdata/testcharts/alpine/README.md
new file mode 100644
index 000000000..05d39dbbc
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/alpine/README.md
@@ -0,0 +1,13 @@
+# Alpine: A simple Helm chart
+
+Run a single pod of Alpine Linux.
+
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.yaml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/cmd/testdata/testcharts/alpine/extra_values.yaml b/helm/pkg/cmd/testdata/testcharts/alpine/extra_values.yaml
new file mode 100644
index 000000000..468bbacbc
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/alpine/extra_values.yaml
@@ -0,0 +1,2 @@
+test:
+ Name: extra-values
diff --git a/helm/pkg/cmd/testdata/testcharts/alpine/more_values.yaml b/helm/pkg/cmd/testdata/testcharts/alpine/more_values.yaml
new file mode 100644
index 000000000..3d21e1fed
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/alpine/more_values.yaml
@@ -0,0 +1,2 @@
+test:
+ Name: more-values
diff --git a/helm/pkg/cmd/testdata/testcharts/alpine/templates/alpine-pod.yaml b/helm/pkg/cmd/testdata/testcharts/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..a1a44e53f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,27 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{.Release.Name}}-{{.Values.Name}}"
+ labels:
+ # The "app.kubernetes.io/managed-by" label is used to track which tool
+ # deployed a given chart. It is useful for admins who want to see what
+ # releases a particular tool is responsible for.
+ app.kubernetes.io/managed-by: {{.Release.Service | quote }}
+ # The "app.kubernetes.io/instance" convention makes it easy to tie a release
+ # to all of the Kubernetes resources that were created as part of that
+ # release.
+ app.kubernetes.io/instance: {{.Release.Name | quote }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ # This makes it easy to audit chart usage.
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+ values: {{.Values.Name}}
+spec:
+ # This shows how to use a simple value. This will look for a passed-in value
+ # called restartPolicy. If it is not found, it will use the default value.
+ # {{default "Never" .restartPolicy}} is a slightly optimized version of the
+ # more conventional syntax: {{.restartPolicy | default "Never"}}
+ restartPolicy: {{default "Never" .Values.restartPolicy}}
+ containers:
+ - name: waiter
+ image: "alpine:{{ .Chart.AppVersion }}"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/cmd/testdata/testcharts/alpine/values.yaml b/helm/pkg/cmd/testdata/testcharts/alpine/values.yaml
new file mode 100644
index 000000000..807e12aea
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/alpine/values.yaml
@@ -0,0 +1 @@
+Name: my-alpine
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/.helmignore b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/Chart.yaml
new file mode 100644
index 000000000..1f445ee11
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/Chart.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: chart-missing-deps
+version: 0.1.0
+dependencies:
+ - name: reqsubchart
+ version: 0.1.0
+ repository: "https://example.com/charts"
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/charts/reqsubchart/.helmignore b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/charts/reqsubchart/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/charts/reqsubchart/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/charts/reqsubchart/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/charts/reqsubchart/Chart.yaml
new file mode 100644
index 000000000..356135537
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/charts/reqsubchart/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: reqsubchart
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/charts/reqsubchart/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/charts/reqsubchart/values.yaml
new file mode 100644
index 000000000..0f0b63f2a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/charts/reqsubchart/values.yaml
@@ -0,0 +1,4 @@
+# Default values for reqsubchart.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/values.yaml
new file mode 100644
index 000000000..d57f76b07
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-requirements/values.yaml
@@ -0,0 +1,4 @@
+# Default values for reqtest.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-type/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/Chart.yaml
new file mode 100644
index 000000000..e77b5afaa
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/Chart.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+description: Deploy a basic Alpine Linux pod
+home: https://helm.sh/helm
+name: chart-bad-type
+sources:
+ - https://github.com/helm/helm
+version: 0.1.0
+type: foobar
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-type/README.md b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/README.md
new file mode 100644
index 000000000..fcf7ee017
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/README.md
@@ -0,0 +1,13 @@
+#Alpine: A simple Helm chart
+
+Run a single pod of Alpine Linux.
+
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.yaml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-type/extra_values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/extra_values.yaml
new file mode 100644
index 000000000..468bbacbc
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/extra_values.yaml
@@ -0,0 +1,2 @@
+test:
+ Name: extra-values
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-type/more_values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/more_values.yaml
new file mode 100644
index 000000000..3d21e1fed
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/more_values.yaml
@@ -0,0 +1,2 @@
+test:
+ Name: more-values
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-type/templates/alpine-pod.yaml b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/templates/alpine-pod.yaml
new file mode 100644
index 000000000..a40ae32d7
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/templates/alpine-pod.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{.Release.Name}}-{{.Values.Name}}"
+ labels:
+ # The "app.kubernetes.io/managed-by" label is used to track which tool
+ # deployed a given chart. It is useful for admins who want to see what
+ # releases a particular tool is responsible for.
+ app.kubernetes.io/managed-by: {{.Release.Service | quote }}
+ # The "release" convention makes it easy to tie a release to all of the
+ # Kubernetes resources that were created as part of that release.
+ app.kubernetes.io/instance: {{.Release.Name | quote }}
+ # This makes it easy to audit chart usage.
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+ values: {{.Values.test.Name}}
+spec:
+ # This shows how to use a simple value. This will look for a passed-in value
+ # called restartPolicy. If it is not found, it will use the default value.
+ # {{default "Never" .restartPolicy}} is a slightly optimized version of the
+ # more conventional syntax: {{.restartPolicy | default "Never"}}
+ restartPolicy: {{default "Never" .Values.restartPolicy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-bad-type/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/values.yaml
new file mode 100644
index 000000000..807e12aea
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-bad-type/values.yaml
@@ -0,0 +1 @@
+Name: my-alpine
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/.helmignore b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/Chart.yaml
new file mode 100644
index 000000000..9605636db
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: chart-missing-deps
+version: 0.1.0
+dependencies:
+ - name: reqsubchart
+ version: 0.1.0
+ repository: "https://example.com/charts"
+ - name: reqsubchart2
+ version: 0.2.0
+ repository: "https://example.com/charts"
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/charts/reqsubchart/.helmignore b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/charts/reqsubchart/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/charts/reqsubchart/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/charts/reqsubchart/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/charts/reqsubchart/Chart.yaml
new file mode 100644
index 000000000..356135537
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/charts/reqsubchart/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: reqsubchart
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/charts/reqsubchart/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/charts/reqsubchart/values.yaml
new file mode 100644
index 000000000..0f0b63f2a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/charts/reqsubchart/values.yaml
@@ -0,0 +1,4 @@
+# Default values for reqsubchart.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/values.yaml
new file mode 100644
index 000000000..d57f76b07
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-missing-deps/values.yaml
@@ -0,0 +1,4 @@
+# Default values for reqtest.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/Chart.yaml
new file mode 100644
index 000000000..a575aa9f8
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: Chart with bad subcharts
+name: chart-with-bad-subcharts
+version: 0.0.1
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/charts/bad-subchart/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/charts/bad-subchart/Chart.yaml
new file mode 100644
index 000000000..a6754b24f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/charts/bad-subchart/Chart.yaml
@@ -0,0 +1 @@
+description: Bad subchart
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/charts/bad-subchart/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/charts/bad-subchart/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/charts/good-subchart/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/charts/good-subchart/Chart.yaml
new file mode 100644
index 000000000..895433e31
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/charts/good-subchart/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: Good subchart
+name: good-subchart
+version: 0.0.1
\ No newline at end of file
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/charts/good-subchart/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/charts/good-subchart/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/requirements.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/requirements.yaml
new file mode 100644
index 000000000..de2fbb4dd
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/requirements.yaml
@@ -0,0 +1,5 @@
+dependencies:
+ - name: good-subchart
+ version: 0.0.1
+ - name: bad-subchart
+ version: 0.0.1
\ No newline at end of file
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-bad-subcharts/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/Chart.yaml
new file mode 100644
index 000000000..3a6e99952
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+appVersion: "1.0.0"
+description: A Helm chart for Kubernetes
+name: chart-with-deprecated-api
+type: application
+version: 1.0.0
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/templates/horizontalpodautoscaler.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/templates/horizontalpodautoscaler.yaml
new file mode 100644
index 000000000..b77a4beeb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/templates/horizontalpodautoscaler.yaml
@@ -0,0 +1,9 @@
+apiVersion: autoscaling/v2beta1
+kind: HorizontalPodAutoscaler
+metadata:
+ name: deprecated
+spec:
+ scaleTargetRef:
+ kind: Pod
+ name: pod
+ maxReplicas: 3
\ No newline at end of file
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-deprecated-api/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/.helmignore b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/Chart.yaml
new file mode 100644
index 000000000..773cc9f32
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: chart-with-lib-dep
+type: application
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/charts/common-0.0.5.tgz b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/charts/common-0.0.5.tgz
new file mode 100644
index 000000000..ca0a64ae3
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/charts/common-0.0.5.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/NOTES.txt b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/NOTES.txt
new file mode 100644
index 000000000..a758b7971
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/NOTES.txt
@@ -0,0 +1,19 @@
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "chart-with-lib-dep.fullname" . }})
+ export NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ template "chart-with-lib-dep.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc {{ template "chart-with-lib-dep.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods -l "app={{ template "chart-with-lib-dep.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:80
+{{- end }}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/_helpers.tpl b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/_helpers.tpl
new file mode 100644
index 000000000..b8be8cad6
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/_helpers.tpl
@@ -0,0 +1,32 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "chart-with-lib-dep.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "chart-with-lib-dep.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "chart-with-lib-dep.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/deployment.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/deployment.yaml
new file mode 100644
index 000000000..521fa5972
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/deployment.yaml
@@ -0,0 +1,51 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "chart-with-lib-dep.fullname" . }}
+ labels:
+ app.kubernetes.io/name: {{ template "chart-with-lib-dep.name" . }}
+ helm.sh/chart: {{ template "chart-with-lib-dep.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ template "chart-with-lib-dep.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ template "chart-with-lib-dep.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ spec:
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: http
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/ingress.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/ingress.yaml
new file mode 100644
index 000000000..42afd0879
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/ingress.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "chart-with-lib-dep.fullname" . -}}
+{{- $ingressPath := .Values.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ app.kubernetes.io/name: {{ template "chart-with-lib-dep.name" . }}
+ helm.sh/chart: {{ template "chart-with-lib-dep.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- with .Values.ingress.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+{{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+{{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ . }}
+ http:
+ paths:
+ - path: {{ $ingressPath }}
+ backend:
+ serviceName: {{ $fullName }}
+ servicePort: http
+ {{- end }}
+{{- end }}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/service.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/service.yaml
new file mode 100644
index 000000000..4c2b91a5a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/templates/service.yaml
@@ -0,0 +1,10 @@
+{{- template "common.service" (list . "mychart.service") -}}
+{{- define "mychart.service" -}}
+## Define overrides for your Service resource here, e.g.
+# metadata:
+# labels:
+# custom: label
+# spec:
+# ports:
+# - port: 8080
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/values.yaml
new file mode 100644
index 000000000..a0cc07e9e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-lib-dep/values.yaml
@@ -0,0 +1,48 @@
+# Default values for chart-with-lib-dep.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+
+nameOverride: ""
+fullnameOverride: ""
+
+service:
+ type: ClusterIP
+ port: 80
+
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ path: /
+ hosts:
+ - chart-example.local
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-only-crds/.helmignore b/helm/pkg/cmd/testdata/testcharts/chart-with-only-crds/.helmignore
new file mode 100644
index 000000000..0e8a0eb36
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-only-crds/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-only-crds/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-only-crds/Chart.yaml
new file mode 100644
index 000000000..ec3497670
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-only-crds/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v2
+name: crd-test
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application and it is recommended to use it with quotes.
+appVersion: "1.16.0"
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-only-crds/crds/test-crd.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-only-crds/crds/test-crd.yaml
new file mode 100644
index 000000000..1d7350f1d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-only-crds/crds/test-crd.yaml
@@ -0,0 +1,19 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: tests.test.io
+spec:
+ group: test.io
+ names:
+ kind: Test
+ listKind: TestList
+ plural: tests
+ singular: test
+ scope: Namespaced
+ versions:
+ - name : v1alpha2
+ served: true
+ storage: true
+ - name : v1alpha1
+ served: true
+ storage: false
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/Chart.yaml
new file mode 100644
index 000000000..4e24c2ebb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+name: chart-without-schema
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+appVersion: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/charts/subchart-with-schema/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/charts/subchart-with-schema/Chart.yaml
new file mode 100644
index 000000000..b5a77c5db
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/charts/subchart-with-schema/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+name: subchart-with-schema
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+appVersion: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/charts/subchart-with-schema/templates/empty.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/charts/subchart-with-schema/templates/empty.yaml
new file mode 100644
index 000000000..c80812f6e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/charts/subchart-with-schema/templates/empty.yaml
@@ -0,0 +1 @@
+# This file is intentionally blank
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/charts/subchart-with-schema/values.schema.json b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/charts/subchart-with-schema/values.schema.json
new file mode 100644
index 000000000..4ff791844
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/charts/subchart-with-schema/values.schema.json
@@ -0,0 +1,15 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Values",
+ "type": "object",
+ "properties": {
+ "age": {
+ "description": "Age",
+ "minimum": 0,
+ "type": "integer"
+ }
+ },
+ "required": [
+ "age"
+ ]
+}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/charts/subchart-with-schema/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/charts/subchart-with-schema/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/templates/empty.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/templates/empty.yaml
new file mode 100644
index 000000000..c80812f6e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/templates/empty.yaml
@@ -0,0 +1 @@
+# This file is intentionally blank
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/values.schema.json b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/values.schema.json
new file mode 100644
index 000000000..f30948038
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/values.schema.json
@@ -0,0 +1,18 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Values",
+ "type": "object",
+ "properties": {
+ "firstname": {
+ "description": "First name",
+ "type": "string"
+ },
+ "lastname": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "firstname",
+ "lastname"
+ ]
+}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/values.yaml
new file mode 100644
index 000000000..c9deafc00
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-and-subchart/values.yaml
@@ -0,0 +1 @@
+firstname: "John"
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/Chart.yaml
new file mode 100644
index 000000000..395d24f6a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: Empty testing chart
+home: https://k8s.io/helm
+name: empty
+sources:
+- https://github.com/kubernetes/helm
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/templates/empty.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/templates/empty.yaml
new file mode 100644
index 000000000..c80812f6e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/templates/empty.yaml
@@ -0,0 +1 @@
+# This file is intentionally blank
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/values.schema.json b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/values.schema.json
new file mode 100644
index 000000000..4df89bbe8
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/values.schema.json
@@ -0,0 +1,67 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "properties": {
+ "addresses": {
+ "description": "List of addresses",
+ "items": {
+ "properties": {
+ "city": {
+ "type": "string"
+ },
+ "number": {
+ "type": "number"
+ },
+ "street": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "age": {
+ "description": "Age",
+ "minimum": 0,
+ "type": "integer"
+ },
+ "employmentInfo": {
+ "properties": {
+ "salary": {
+ "minimum": 0,
+ "type": "number"
+ },
+ "title": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "salary"
+ ],
+ "type": "object"
+ },
+ "firstname": {
+ "description": "First name",
+ "type": "string"
+ },
+ "lastname": {
+ "type": "string"
+ },
+ "likesCoffee": {
+ "type": "boolean"
+ },
+ "phoneNumbers": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "firstname",
+ "lastname",
+ "addresses",
+ "employmentInfo"
+ ],
+ "title": "Values",
+ "type": "object"
+}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/values.yaml
new file mode 100644
index 000000000..5a1250bff
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative-skip-validation/values.yaml
@@ -0,0 +1,14 @@
+firstname: John
+lastname: Doe
+age: -5
+likesCoffee: true
+addresses:
+ - city: Springfield
+ street: Main
+ number: 12345
+ - city: New York
+ street: Broadway
+ number: 67890
+phoneNumbers:
+ - "(888) 888-8888"
+ - "(555) 555-5555"
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/Chart.yaml
new file mode 100644
index 000000000..395d24f6a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: Empty testing chart
+home: https://k8s.io/helm
+name: empty
+sources:
+- https://github.com/kubernetes/helm
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/templates/empty.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/templates/empty.yaml
new file mode 100644
index 000000000..c80812f6e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/templates/empty.yaml
@@ -0,0 +1 @@
+# This file is intentionally blank
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/values.schema.json b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/values.schema.json
new file mode 100644
index 000000000..4df89bbe8
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/values.schema.json
@@ -0,0 +1,67 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "properties": {
+ "addresses": {
+ "description": "List of addresses",
+ "items": {
+ "properties": {
+ "city": {
+ "type": "string"
+ },
+ "number": {
+ "type": "number"
+ },
+ "street": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "age": {
+ "description": "Age",
+ "minimum": 0,
+ "type": "integer"
+ },
+ "employmentInfo": {
+ "properties": {
+ "salary": {
+ "minimum": 0,
+ "type": "number"
+ },
+ "title": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "salary"
+ ],
+ "type": "object"
+ },
+ "firstname": {
+ "description": "First name",
+ "type": "string"
+ },
+ "lastname": {
+ "type": "string"
+ },
+ "likesCoffee": {
+ "type": "boolean"
+ },
+ "phoneNumbers": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "firstname",
+ "lastname",
+ "addresses",
+ "employmentInfo"
+ ],
+ "title": "Values",
+ "type": "object"
+}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/values.yaml
new file mode 100644
index 000000000..5a1250bff
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema-negative/values.yaml
@@ -0,0 +1,14 @@
+firstname: John
+lastname: Doe
+age: -5
+likesCoffee: true
+addresses:
+ - city: Springfield
+ street: Main
+ number: 12345
+ - city: New York
+ street: Broadway
+ number: 67890
+phoneNumbers:
+ - "(888) 888-8888"
+ - "(555) 555-5555"
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema/Chart.yaml
new file mode 100644
index 000000000..395d24f6a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: Empty testing chart
+home: https://k8s.io/helm
+name: empty
+sources:
+- https://github.com/kubernetes/helm
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema/extra-values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema/extra-values.yaml
new file mode 100644
index 000000000..76c290c4f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema/extra-values.yaml
@@ -0,0 +1,2 @@
+age: -5
+employmentInfo: null
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema/templates/empty.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema/templates/empty.yaml
new file mode 100644
index 000000000..c80812f6e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema/templates/empty.yaml
@@ -0,0 +1 @@
+# This file is intentionally blank
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema/values.schema.json b/helm/pkg/cmd/testdata/testcharts/chart-with-schema/values.schema.json
new file mode 100644
index 000000000..4df89bbe8
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema/values.schema.json
@@ -0,0 +1,67 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "properties": {
+ "addresses": {
+ "description": "List of addresses",
+ "items": {
+ "properties": {
+ "city": {
+ "type": "string"
+ },
+ "number": {
+ "type": "number"
+ },
+ "street": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "type": "array"
+ },
+ "age": {
+ "description": "Age",
+ "minimum": 0,
+ "type": "integer"
+ },
+ "employmentInfo": {
+ "properties": {
+ "salary": {
+ "minimum": 0,
+ "type": "number"
+ },
+ "title": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "salary"
+ ],
+ "type": "object"
+ },
+ "firstname": {
+ "description": "First name",
+ "type": "string"
+ },
+ "lastname": {
+ "type": "string"
+ },
+ "likesCoffee": {
+ "type": "boolean"
+ },
+ "phoneNumbers": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "firstname",
+ "lastname",
+ "addresses",
+ "employmentInfo"
+ ],
+ "title": "Values",
+ "type": "object"
+}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-schema/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-schema/values.yaml
new file mode 100644
index 000000000..042dea664
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-schema/values.yaml
@@ -0,0 +1,17 @@
+firstname: John
+lastname: Doe
+age: 25
+likesCoffee: true
+employmentInfo:
+ title: Software Developer
+ salary: 100000
+addresses:
+ - city: Springfield
+ street: Main
+ number: 12345
+ - city: New York
+ street: Broadway
+ number: 67890
+phoneNumbers:
+ - "(888) 888-8888"
+ - "(555) 555-5555"
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-secret/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-secret/Chart.yaml
new file mode 100644
index 000000000..46d069e1c
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-secret/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v2
+description: Chart with Kubernetes Secret
+name: chart-with-secret
+version: 0.0.1
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-secret/templates/configmap.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-secret/templates/configmap.yaml
new file mode 100644
index 000000000..ce9c27d56
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-secret/templates/configmap.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-configmap
+data:
+ foo: bar
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-secret/templates/secret.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-secret/templates/secret.yaml
new file mode 100644
index 000000000..b1e1cff56
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-secret/templates/secret.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: test-secret
+stringData:
+ foo: bar
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/Chart.yaml
new file mode 100644
index 000000000..90545a6a3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v2
+description: Chart with subchart notes
+name: chart-with-subchart-notes
+version: 0.0.1
+dependencies:
+ - name: subchart-with-notes
+ version: 0.0.1
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/charts/subchart-with-notes/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/charts/subchart-with-notes/Chart.yaml
new file mode 100644
index 000000000..f0fead9ee
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/charts/subchart-with-notes/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v2
+description: Subchart with notes
+name: subchart-with-notes
+version: 0.0.1
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/charts/subchart-with-notes/templates/NOTES.txt b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/charts/subchart-with-notes/templates/NOTES.txt
new file mode 100644
index 000000000..1f61a294e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/charts/subchart-with-notes/templates/NOTES.txt
@@ -0,0 +1 @@
+SUBCHART NOTES
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/templates/NOTES.txt b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/templates/NOTES.txt
new file mode 100644
index 000000000..9e166d370
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-notes/templates/NOTES.txt
@@ -0,0 +1 @@
+PARENT NOTES
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/Chart.lock b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/Chart.lock
new file mode 100644
index 000000000..31cda6bd6
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/Chart.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: subchart-with-notes
+ repository: file://../chart-with-subchart-notes/charts/subchart-with-notes
+ version: 0.0.1
+digest: sha256:8ca45f73ae3f6170a09b64a967006e98e13cd91eb51e5ab0599bb87296c7df0a
+generated: "2021-05-02T15:07:22.1099921+02:00"
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/Chart.yaml
new file mode 100644
index 000000000..1bc230200
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/Chart.yaml
@@ -0,0 +1,8 @@
+apiVersion: v2
+description: Chart with subchart that needs to be fetched
+name: chart-with-subchart-update
+version: 0.0.1
+dependencies:
+ - name: subchart-with-notes
+ version: 0.0.1
+ repository: file://../chart-with-subchart-notes/charts
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/charts/subchart-with-notes/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/charts/subchart-with-notes/Chart.yaml
new file mode 100644
index 000000000..f0fead9ee
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/charts/subchart-with-notes/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v2
+description: Subchart with notes
+name: subchart-with-notes
+version: 0.0.1
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/charts/subchart-with-notes/templates/NOTES.txt b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/charts/subchart-with-notes/templates/NOTES.txt
new file mode 100644
index 000000000..1f61a294e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/charts/subchart-with-notes/templates/NOTES.txt
@@ -0,0 +1 @@
+SUBCHART NOTES
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/templates/NOTES.txt b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/templates/NOTES.txt
new file mode 100644
index 000000000..9e166d370
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-subchart-update/templates/NOTES.txt
@@ -0,0 +1 @@
+PARENT NOTES
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/.helmignore b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/Chart.yaml
new file mode 100644
index 000000000..de53ce5e3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: chart-with-template-lib-archive-dep
+type: application
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/charts/common-0.0.5.tgz b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/charts/common-0.0.5.tgz
new file mode 100644
index 000000000..465517824
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/charts/common-0.0.5.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/NOTES.txt b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/NOTES.txt
new file mode 100644
index 000000000..5c53ac03d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/NOTES.txt
@@ -0,0 +1,19 @@
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "chart-with-template-lib-archive-dep.fullname" . }})
+ export NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ template "chart-with-template-lib-archive-dep.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc {{ template "chart-with-template-lib-archive-dep.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods -l "app={{ template "chart-with-template-lib-archive-dep.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:80
+{{- end }}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/_helpers.tpl b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/_helpers.tpl
new file mode 100644
index 000000000..76ca56b81
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/_helpers.tpl
@@ -0,0 +1,32 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "chart-with-template-lib-archive-dep.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "chart-with-template-lib-archive-dep.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "chart-with-template-lib-archive-dep.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/deployment.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/deployment.yaml
new file mode 100644
index 000000000..a49572f4a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/deployment.yaml
@@ -0,0 +1,51 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "chart-with-template-lib-archive-dep.fullname" . }}
+ labels:
+ app: {{ template "chart-with-template-lib-archive-dep.name" . }}
+ chart: {{ template "chart-with-template-lib-archive-dep.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ template "chart-with-template-lib-archive-dep.name" . }}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "chart-with-template-lib-archive-dep.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: http
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/ingress.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/ingress.yaml
new file mode 100644
index 000000000..d3325cf18
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/ingress.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "chart-with-template-lib-archive-dep.fullname" . -}}
+{{- $ingressPath := .Values.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ app: {{ template "chart-with-template-lib-archive-dep.name" . }}
+ chart: {{ template "chart-with-template-lib-archive-dep.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- with .Values.ingress.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+{{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+{{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ . }}
+ http:
+ paths:
+ - path: {{ $ingressPath }}
+ backend:
+ serviceName: {{ $fullName }}
+ servicePort: http
+ {{- end }}
+{{- end }}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/service.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/service.yaml
new file mode 100644
index 000000000..bfcb080b4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/templates/service.yaml
@@ -0,0 +1,10 @@
+{{- template "common.service" (list . "chart-with-template-lib-archive-dep.service") -}}
+{{- define "chart-with-template-lib-archive-dep.service" -}}
+## Define overrides for your Service resource here, e.g.
+# metadata:
+# labels:
+# custom: label
+# spec:
+# ports:
+# - port: 8080
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/values.yaml
new file mode 100644
index 000000000..b5474cbbd
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-archive-dep/values.yaml
@@ -0,0 +1,48 @@
+# Default values for chart-with-template-lib-archive-dep.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+
+nameOverride: ""
+fullnameOverride: ""
+
+service:
+ type: ClusterIP
+ port: 80
+
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ path: /
+ hosts:
+ - chart-example.local
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/.helmignore b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/Chart.yaml
new file mode 100644
index 000000000..cf6fc390b
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: chart-with-template-lib-dep
+type: application
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/.helmignore b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/.helmignore
new file mode 100755
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/Chart.yaml
new file mode 100755
index 000000000..ba14ca089
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/Chart.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+appVersion: 0.0.5
+description: Common chartbuilding components and helpers
+home: https://helm.sh
+maintainers:
+- email: technosophos@gmail.com
+ name: technosophos
+- email: adnan@bitnami.com
+ name: prydonius
+name: common
+version: 0.0.5
+type: library
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/README.md b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/README.md
new file mode 100755
index 000000000..cafadcd72
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/README.md
@@ -0,0 +1,831 @@
+# Common: The Helm Helper Chart
+
+This chart is designed to make it easier for you to build and maintain Helm
+charts.
+
+It provides utilities that reflect best practices of Kubernetes chart development,
+making it faster for you to write charts.
+
+## Tips
+
+A few tips for working with Common:
+
+- Be careful when using functions that generate random data (like `common.fullname.unique`).
+ They may trigger unwanted upgrades or have other side effects.
+
+In this document, we use `release-name` as the name of the release.
+
+## Resource Kinds
+
+Kubernetes defines a variety of resource kinds, from `Secret` to `StatefulSet`.
+We define some of the most common kinds in a way that lets you easily work with
+them.
+
+The resource kind templates are designed to make it much faster for you to
+define _basic_ versions of these resources. They allow you to extend and modify
+just what you need, without having to copy around lots of boilerplate.
+
+To make use of these templates you must define a template that will extend the
+base template (though it can be empty). The name of this template is then passed
+to the base template, for example:
+
+```yaml
+{{- template "common.service" (list . "mychart.service") -}}
+{{- define "mychart.service" -}}
+## Define overrides for your Service resource here, e.g.
+# metadata:
+# labels:
+# custom: label
+# spec:
+# ports:
+# - port: 8080
+{{- end -}}
+```
+
+Note that the `common.service` template defines two parameters:
+
+ - The root context (usually `.`)
+ - A template name containing the service definition overrides
+
+A limitation of the Go template library is that a template can only take a
+single argument. The `list` function is used to work around this by constructing
+a list or array of arguments that is passed to the template.
+
+The `common.service` template is responsible for rendering the templates with
+the root context and merging any overrides. As you can see, this makes it very
+easy to create a basic `Service` resource without having to copy around the
+standard metadata and labels.
+
+Each implemented base resource is described in greater detail below.
+
+### `common.service`
+
+The `common.service` template creates a basic `Service` resource with the
+following defaults:
+
+- Service type (ClusterIP, NodePort, LoadBalancer) made configurable by `.Values.service.type`
+- Named port `http` configured on port 80
+- Selector set to `app: {{ template "common.name" }}, release: {{ .Release.Name | quote }}` to match the default used in the `Deployment` resource
+
+Example template:
+
+```yaml
+{{- template "common.service" (list . "mychart.mail.service") -}}
+{{- define "mychart.mail.service" -}}
+metadata:
+ name: {{ template "common.fullname" . }}-mail # overrides the default name to add a suffix
+ labels: # appended to the labels section
+ protocol: mail
+spec:
+ ports: # composes the `ports` section of the service definition.
+ - name: smtp
+ port: 25
+ targetPort: 25
+ - name: imaps
+ port: 993
+ targetPort: 993
+ selector: # this is appended to the default selector
+ protocol: mail
+{{- end -}}
+---
+{{ template "common.service" (list . "mychart.web.service") -}}
+{{- define "mychart.web.service" -}}
+metadata:
+ name: {{ template "common.fullname" . }}-www # overrides the default name to add a suffix
+ labels: # appended to the labels section
+ protocol: www
+spec:
+ ports: # composes the `ports` section of the service definition.
+ - name: www
+ port: 80
+ targetPort: 8080
+{{- end -}}
+```
+
+The above template defines _two_ services: a web service and a mail service.
+
+The most important part of a service definition is the `ports` object, which
+defines the ports that this service will listen on. Most of the time,
+`selector` is computed for you. But you can replace it or add to it.
+
+The output of the example above is:
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: service
+ chart: service-0.1.0
+ heritage: Tiller
+ protocol: mail
+ release: release-name
+ name: release-name-service-mail
+spec:
+ ports:
+ - name: smtp
+ port: 25
+ targetPort: 25
+ - name: imaps
+ port: 993
+ targetPort: 993
+ selector:
+ app: service
+ release: release-name
+ protocol: mail
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: service
+ chart: service-0.1.0
+ heritage: Tiller
+ protocol: www
+ release: release-name
+ name: release-name-service-www
+spec:
+ ports:
+ - name: www
+ port: 80
+ targetPort: 8080
+ type: ClusterIP
+```
+
+## `common.deployment`
+
+The `common.deployment` template defines a basic `Deployment`. Underneath the
+hood, it uses `common.container` (see next section).
+
+By default, the pod template within the deployment defines the labels `app: {{ template "common.name" . }}`
+and `release: {{ .Release.Name | quote }` as this is also used as the selector. The
+standard set of labels are not used as some of these can change during upgrades,
+which causes the replica sets and pods to not correctly match.
+
+Example use:
+
+```yaml
+{{- template "common.deployment" (list . "mychart.deployment") -}}
+{{- define "mychart.deployment" -}}
+## Define overrides for your Deployment resource here, e.g.
+spec:
+ replicas: {{ .Values.replicaCount }}
+{{- end -}}
+```
+
+## `common.container`
+
+The `common.container` template creates a basic `Container` spec to be used
+within a `Deployment` or `ReplicaSet`. It holds the following defaults:
+
+- The name is set to the chart name
+- Uses `.Values.image` to describe the image to run, with the following spec:
+ ```yaml
+ image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+ ```
+- Exposes the named port `http` as port 80
+- Lays out the compute resources using `.Values.resources`
+
+Example use:
+
+```yaml
+{{- template "common.deployment" (list . "mychart.deployment") -}}
+{{- define "mychart.deployment" -}}
+## Define overrides for your Deployment resource here, e.g.
+spec:
+ template:
+ spec:
+ containers:
+ - {{ template "common.container" (list . "mychart.deployment.container") }}
+{{- end -}}
+{{- define "mychart.deployment.container" -}}
+## Define overrides for your Container here, e.g.
+livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+{{- end -}}
+```
+
+The above example creates a `Deployment` resource which makes use of the
+`common.container` template to populate the PodSpec's container list. The usage
+of this template is similar to the other resources, you must define and
+reference a template that contains overrides for the container object.
+
+The most important part of a container definition is the image you want to run.
+As mentioned above, this is derived from `.Values.image` by default. It is a
+best practice to define the image, tag and pull policy in your charts' values as
+this makes it easy for an operator to change the image registry, or use a
+specific tag or version. Another example of configuration that should be exposed
+to chart operators is the container's required compute resources, as this is
+also very specific to an operators environment. An example `values.yaml` for
+your chart could look like:
+
+```yaml
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+```
+
+The output of running the above values through the earlier template is:
+
+```yaml
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: deployment
+ chart: deployment-0.1.0
+ heritage: Tiller
+ release: release-name
+ name: release-name-deployment
+spec:
+ template:
+ metadata:
+ labels:
+ app: deployment
+ spec:
+ containers:
+ - image: nginx:stable
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ name: deployment
+ ports:
+ - containerPort: 80
+ name: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+ resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+```
+
+## `common.configmap`
+
+The `common.configmap` template creates an empty `ConfigMap` resource that you
+can override with your configuration.
+
+Example use:
+
+```yaml
+{{- template "common.configmap" (list . "mychart.configmap") -}}
+{{- define "mychart.configmap" -}}
+data:
+ zeus: cat
+ athena: cat
+ julius: cat
+ one: |-
+ {{ .Files.Get "file1.txt" }}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+apiVersion: v1
+data:
+ athena: cat
+ julius: cat
+ one: This is a file.
+ zeus: cat
+kind: ConfigMap
+metadata:
+ labels:
+ app: configmap
+ chart: configmap-0.1.0
+ heritage: Tiller
+ release: release-name
+ name: release-name-configmap
+```
+
+## `common.secret`
+
+The `common.secret` template creates an empty `Secret` resource that you
+can override with your secrets.
+
+Example use:
+
+```yaml
+{{- template "common.secret" (list . "mychart.secret") -}}
+{{- define "mychart.secret" -}}
+data:
+ zeus: {{ print "cat" | b64enc }}
+ athena: {{ print "cat" | b64enc }}
+ julius: {{ print "cat" | b64enc }}
+ one: |-
+ {{ .Files.Get "file1.txt" | b64enc }}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+apiVersion: v1
+data:
+ athena: Y2F0
+ julius: Y2F0
+ one: VGhpcyBpcyBhIGZpbGUuCg==
+ zeus: Y2F0
+kind: Secret
+metadata:
+ labels:
+ app: secret
+ chart: secret-0.1.0
+ heritage: Tiller
+ release: release-name
+ name: release-name-secret
+type: Opaque
+```
+
+## `common.ingress`
+
+The `common.ingress` template is designed to give you a well-defined `Ingress`
+resource, that can be configured using `.Values.ingress`. An example values file
+that can be used to configure the `Ingress` resource is:
+
+```yaml
+ingress:
+ hosts:
+ - chart-example.local
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ kubernetes.io/tls-acme: "true"
+ tls:
+ - secretName: chart-example-tls
+ hosts:
+ - chart-example.local
+```
+
+Example use:
+
+```yaml
+{{- template "common.ingress" (list . "mychart.ingress") -}}
+{{- define "mychart.ingress" -}}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ kubernetes.io/tls-acme: "true"
+ labels:
+ app: ingress
+ chart: ingress-0.1.0
+ heritage: Tiller
+ release: release-name
+ name: release-name-ingress
+spec:
+ rules:
+ - host: chart-example.local
+ http:
+ paths:
+ - backend:
+ serviceName: release-name-ingress
+ servicePort: 80
+ path: /
+ tls:
+ - hosts:
+ - chart-example.local
+ secretName: chart-example-tls
+```
+
+## `common.persistentvolumeclaim`
+
+`common.persistentvolumeclaim` can be used to easily add a
+`PersistentVolumeClaim` resource to your chart that can be configured using
+`.Values.persistence`:
+
+| Value | Description |
+| ------------------------- | ------------------------------------------------------------------------------------------------------- |
+| persistence.enabled | Whether or not to claim a persistent volume. If false, `common.volume.pvc` will use an emptyDir instead |
+| persistence.storageClass | `StorageClass` name |
+| persistence.accessMode | Access mode for persistent volume |
+| persistence.size | Size of persistent volume |
+| persistence.existingClaim | If defined, `PersistentVolumeClaim` is not created and `common.volume.pvc` helper uses this claim |
+
+An example values file that can be used to configure the
+`PersistentVolumeClaim` resource is:
+
+```yaml
+persistence:
+ enabled: true
+ storageClass: fast
+ accessMode: ReadWriteOnce
+ size: 8Gi
+```
+
+Example use:
+
+```yaml
+{{- template "common.persistentvolumeclaim" (list . "mychart.persistentvolumeclaim") -}}
+{{- define "mychart.persistentvolumeclaim" -}}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ labels:
+ app: persistentvolumeclaim
+ chart: persistentvolumeclaim-0.1.0
+ heritage: Tiller
+ release: release-name
+ name: release-name-persistentvolumeclaim
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 8Gi
+ storageClassName: "fast"
+```
+
+## Partial API Objects
+
+When writing Kubernetes resources, you may find the following helpers useful to
+construct parts of the spec.
+
+### EnvVar
+
+Use the EnvVar helpers within a container spec to simplify specifying key-value
+environment variables or referencing secrets as values.
+
+Example Use:
+
+```yaml
+{{- template "common.deployment" (list . "mychart.deployment") -}}
+{{- define "mychart.deployment" -}}
+spec:
+ template:
+ spec:
+ containers:
+ - {{ template "common.container" (list . "mychart.deployment.container") }}
+{{- end -}}
+{{- define "mychart.deployment.container" -}}
+{{- $fullname := include "common.fullname" . -}}
+env:
+- {{ template "common.envvar.value" (list "ZEUS" "cat") }}
+- {{ template "common.envvar.secret" (list "ATHENA" "secret-name" "athena") }}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+...
+ spec:
+ containers:
+ - env:
+ - name: ZEUS
+ value: cat
+ - name: ATHENA
+ valueFrom:
+ secretKeyRef:
+ key: athena
+ name: secret-name
+...
+```
+
+### Volume
+
+Use the Volume helpers within a `Deployment` spec to help define ConfigMap and
+PersistentVolumeClaim volumes.
+
+Example Use:
+
+```yaml
+{{- template "common.deployment" (list . "mychart.deployment") -}}
+{{- define "mychart.deployment" -}}
+spec:
+ template:
+ spec:
+ volumes:
+ - {{ template "common.volume.configMap" (list "config" "configmap-name") }}
+ - {{ template "common.volume.pvc" (list "data" "pvc-name" .Values.persistence) }}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+...
+ spec:
+ volumes:
+ - configMap:
+ name: configmap-name
+ name: config
+ - name: data
+ persistentVolumeClaim:
+ claimName: pvc-name
+...
+```
+
+The `common.volume.pvc` helper uses the following configuration from the `.Values.persistence` object:
+
+| Value | Description |
+| ------------------------- | ----------------------------------------------------- |
+| persistence.enabled | If false, creates an `emptyDir` instead |
+| persistence.existingClaim | If set, uses this instead of the passed in claim name |
+
+## Utilities
+
+### `common.fullname`
+
+The `common.fullname` template generates a name suitable for the `name:` field
+in Kubernetes metadata. It is used like this:
+
+```yaml
+name: {{ template "common.fullname" . }}
+```
+
+The following different values can influence it:
+
+```yaml
+# By default, fullname uses '{{ .Release.Name }}-{{ .Chart.Name }}'. This
+# overrides that and uses the given string instead.
+fullnameOverride: "some-name"
+
+# This adds a prefix
+fullnamePrefix: "pre-"
+# This appends a suffix
+fullnameSuffix: "-suf"
+
+# Global versions of the above
+global:
+ fullnamePrefix: "pp-"
+ fullnameSuffix: "-ps"
+```
+
+Example output:
+
+```yaml
+---
+# with the values above
+name: pp-pre-some-name-suf-ps
+
+---
+# the default, for release "happy-panda" and chart "wordpress"
+name: happy-panda-wordpress
+```
+
+Output of this function is truncated at 54 characters, which leaves 9 additional
+characters for customized overriding. Thus you can easily extend this name
+in your own charts:
+
+```yaml
+{{- define "my.fullname" -}}
+ {{ template "common.fullname" . }}-my-stuff
+{{- end -}}
+```
+
+### `common.fullname.unique`
+
+The `common.fullname.unique` variant of fullname appends a unique seven-character
+sequence to the end of the common name field.
+
+This takes all of the same parameters as `common.fullname`
+
+Example template:
+
+```yaml
+uniqueName: {{ template "common.fullname.unique" . }}
+```
+
+Example output:
+
+```yaml
+uniqueName: release-name-fullname-jl0dbwx
+```
+
+It is also impacted by the prefix and suffix definitions, as well as by
+`.Values.fullnameOverride`
+
+Note that the effective maximum length of this function is 63 characters, not 54.
+
+### `common.name`
+
+The `common.name` template generates a name suitable for the `app` label. It is used like this:
+
+```yaml
+app: {{ template "common.name" . }}
+```
+
+The following different values can influence it:
+
+```yaml
+# By default, name uses '{{ .Chart.Name }}'. This
+# overrides that and uses the given string instead.
+nameOverride: "some-name"
+
+# This adds a prefix
+namePrefix: "pre-"
+# This appends a suffix
+nameSuffix: "-suf"
+
+# Global versions of the above
+global:
+ namePrefix: "pp-"
+ nameSuffix: "-ps"
+```
+
+Example output:
+
+```yaml
+---
+# with the values above
+name: pp-pre-some-name-suf-ps
+
+---
+# the default, for chart "wordpress"
+name: wordpress
+```
+
+Output of this function is truncated at 54 characters, which leaves 9 additional
+characters for customized overriding. Thus you can easily extend this name
+in your own charts:
+
+```yaml
+{{- define "my.name" -}}
+ {{ template "common.name" . }}-my-stuff
+{{- end -}}
+```
+
+### `common.metadata`
+
+The `common.metadata` helper generates the `metadata:` section of a Kubernetes
+resource.
+
+This takes three objects:
+ - .top: top context
+ - .fullnameOverride: override the fullname with this name
+ - .metadata
+ - .labels: key/value list of labels
+ - .annotations: key/value list of annotations
+ - .hook: name(s) of hook(s)
+
+It generates standard labels, annotations, hooks, and a name field.
+
+Example template:
+
+```yaml
+{{ template "common.metadata" (dict "top" . "metadata" .Values.bio) }}
+---
+{{ template "common.metadata" (dict "top" . "metadata" .Values.pet "fullnameOverride" .Values.pet.fullnameOverride) }}
+```
+
+Example values:
+
+```yaml
+bio:
+ name: example
+ labels:
+ first: matt
+ last: butcher
+ nick: technosophos
+ annotations:
+ format: bio
+ destination: archive
+ hook: pre-install
+
+pet:
+ fullnameOverride: Zeus
+
+```
+
+Example output:
+
+```yaml
+metadata:
+ name: release-name-metadata
+ labels:
+ app: metadata
+ heritage: "Tiller"
+ release: "release-name"
+ chart: metadata-0.1.0
+ first: "matt"
+ last: "butcher"
+ nick: "technosophos"
+ annotations:
+ "destination": "archive"
+ "format": "bio"
+ "helm.sh/hook": "pre-install"
+---
+metadata:
+ name: Zeus
+ labels:
+ app: metadata
+ heritage: "Tiller"
+ release: "release-name"
+ chart: metadata-0.1.0
+ annotations:
+```
+
+Most of the common templates that define a resource type (e.g. `common.configmap`
+or `common.job`) use this to generate the metadata, which means they inherit
+the same `labels`, `annotations`, `nameOverride`, and `hook` fields.
+
+### `common.labelize`
+
+`common.labelize` turns a map into a set of labels.
+
+Example template:
+
+```yaml
+{{- $map := dict "first" "1" "second" "2" "third" "3" -}}
+{{- template "common.labelize" $map -}}
+```
+
+Example output:
+
+```yaml
+first: "1"
+second: "2"
+third: "3"
+```
+
+### `common.labels.standard`
+
+`common.labels.standard` prints the standard set of labels.
+
+Example usage:
+
+```
+{{ template "common.labels.standard" . }}
+```
+
+Example output:
+
+```yaml
+app: labelizer
+heritage: "Tiller"
+release: "release-name"
+chart: labelizer-0.1.0
+```
+
+### `common.hook`
+
+The `common.hook` template is a convenience for defining hooks.
+
+Example template:
+
+```yaml
+{{ template "common.hook" "pre-install,post-install" }}
+```
+
+Example output:
+
+```yaml
+"helm.sh/hook": "pre-install,post-install"
+```
+
+### `common.chartref`
+
+The `common.chartref` helper prints the chart name and version, escaped to be
+legal in a Kubernetes label field.
+
+Example template:
+
+```yaml
+chartref: {{ template "common.chartref" . }}
+```
+
+For the chart `foo` with version `1.2.3-beta.55+1234`, this will render:
+
+```yaml
+chartref: foo-1.2.3-beta.55_1234
+```
+
+(Note that `+` is an illegal character in label values)
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_chartref.tpl b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_chartref.tpl
new file mode 100755
index 000000000..e6c14866f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_chartref.tpl
@@ -0,0 +1,14 @@
+{{- /*
+common.chartref prints a chart name and version.
+
+It does minimal escaping for use in Kubernetes labels.
+
+Example output:
+
+ zookeeper-1.2.3
+ wordpress-3.2.1_20170219
+
+*/ -}}
+{{- define "common.chartref" -}}
+ {{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_configmap.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_configmap.yaml
new file mode 100755
index 000000000..03dbbf858
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_configmap.yaml
@@ -0,0 +1,9 @@
+{{- define "common.configmap.tpl" -}}
+apiVersion: v1
+kind: ConfigMap
+{{ template "common.metadata" . }}
+data: {}
+{{- end -}}
+{{- define "common.configmap" -}}
+{{- template "common.util.merge" (append . "common.configmap.tpl") -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_container.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_container.yaml
new file mode 100755
index 000000000..540eb0e6a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_container.yaml
@@ -0,0 +1,15 @@
+{{- define "common.container.tpl" -}}
+name: {{ .Chart.Name }}
+image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+imagePullPolicy: {{ .Values.image.pullPolicy }}
+ports:
+- name: http
+ containerPort: 80
+resources:
+{{ toYaml .Values.resources | indent 2 }}
+{{- end -}}
+{{- define "common.container" -}}
+{{- /* clear new line so indentation works correctly */ -}}
+{{- println "" -}}
+{{- include "common.util.merge" (append . "common.container.tpl") | indent 8 -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_deployment.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_deployment.yaml
new file mode 100755
index 000000000..c49dae3eb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_deployment.yaml
@@ -0,0 +1,18 @@
+{{- define "common.deployment.tpl" -}}
+apiVersion: extensions/v1beta1
+kind: Deployment
+{{ template "common.metadata" . }}
+spec:
+ template:
+ metadata:
+ labels:
+ app: {{ template "common.name" . }}
+ release: {{ .Release.Name | quote }}
+ spec:
+ containers:
+ -
+{{ include "common.container.tpl" . | indent 8 }}
+{{- end -}}
+{{- define "common.deployment" -}}
+{{- template "common.util.merge" (append . "common.deployment.tpl") -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_envvar.tpl b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_envvar.tpl
new file mode 100755
index 000000000..709251f8f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_envvar.tpl
@@ -0,0 +1,31 @@
+{{- define "common.envvar.value" -}}
+ {{- $name := index . 0 -}}
+ {{- $value := index . 1 -}}
+
+ name: {{ $name }}
+ value: {{ default "" $value | quote }}
+{{- end -}}
+
+{{- define "common.envvar.configmap" -}}
+ {{- $name := index . 0 -}}
+ {{- $configMapName := index . 1 -}}
+ {{- $configMapKey := index . 2 -}}
+
+ name: {{ $name }}
+ valueFrom:
+ configMapKeyRef:
+ name: {{ $configMapName }}
+ key: {{ $configMapKey }}
+{{- end -}}
+
+{{- define "common.envvar.secret" -}}
+ {{- $name := index . 0 -}}
+ {{- $secretName := index . 1 -}}
+ {{- $secretKey := index . 2 -}}
+
+ name: {{ $name }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ $secretName }}
+ key: {{ $secretKey }}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_fullname.tpl b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_fullname.tpl
new file mode 100755
index 000000000..2da6cdf18
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_fullname.tpl
@@ -0,0 +1,39 @@
+{{- /*
+fullname defines a suitably unique name for a resource by combining
+the release name and the chart name.
+
+The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should
+not exceed 63 characters.
+
+Parameters:
+
+- .Values.fullnameOverride: Replaces the computed name with this given name
+- .Values.fullnamePrefix: Prefix
+- .Values.global.fullnamePrefix: Global prefix
+- .Values.fullnameSuffix: Suffix
+- .Values.global.fullnameSuffix: Global suffix
+
+The applied order is: "global prefix + prefix + name + suffix + global suffix"
+
+Usage: 'name: "{{- template "common.fullname" . -}}"'
+*/ -}}
+{{- define "common.fullname"}}
+ {{- $global := default (dict) .Values.global -}}
+ {{- $base := default (printf "%s-%s" .Release.Name .Chart.Name) .Values.fullnameOverride -}}
+ {{- $gpre := default "" $global.fullnamePrefix -}}
+ {{- $pre := default "" .Values.fullnamePrefix -}}
+ {{- $suf := default "" .Values.fullnameSuffix -}}
+ {{- $gsuf := default "" $global.fullnameSuffix -}}
+ {{- $name := print $gpre $pre $base $suf $gsuf -}}
+ {{- $name | lower | trunc 54 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- /*
+common.fullname.unique adds a random suffix to the unique name.
+
+This takes the same parameters as common.fullname
+
+*/ -}}
+{{- define "common.fullname.unique" -}}
+ {{ template "common.fullname" . }}-{{ randAlphaNum 7 | lower }}
+{{- end }}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_ingress.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_ingress.yaml
new file mode 100755
index 000000000..78411e15b
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_ingress.yaml
@@ -0,0 +1,27 @@
+{{- define "common.ingress.tpl" -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+{{ template "common.metadata" . }}
+ {{- if .Values.ingress.annotations }}
+ annotations:
+ {{ include "common.annotate" .Values.ingress.annotations | indent 4 }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: /
+ backend:
+ serviceName: {{ template "common.fullname" $ }}
+ servicePort: 80
+ {{- end }}
+ {{- if .Values.ingress.tls }}
+ tls:
+{{ toYaml .Values.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
+{{- define "common.ingress" -}}
+{{- template "common.util.merge" (append . "common.ingress.tpl") -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_metadata.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_metadata.yaml
new file mode 100755
index 000000000..f96ed09fe
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_metadata.yaml
@@ -0,0 +1,10 @@
+{{- /*
+common.metadata creates a standard metadata header.
+It creates a 'metadata:' section with name and labels.
+*/ -}}
+{{ define "common.metadata" -}}
+metadata:
+ name: {{ template "common.fullname" . }}
+ labels:
+{{ include "common.labels.standard" . | indent 4 -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_metadata_annotations.tpl b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_metadata_annotations.tpl
new file mode 100755
index 000000000..dffe1eca9
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_metadata_annotations.tpl
@@ -0,0 +1,18 @@
+{{- /*
+common.hook defines a hook.
+
+This is to be used in a 'metadata.annotations' section.
+
+This should be called as 'template "common.metadata.hook" "post-install"'
+
+Any valid hook may be passed in. Separate multiple hooks with a ",".
+*/ -}}
+{{- define "common.hook" -}}
+"helm.sh/hook": {{printf "%s" . | quote}}
+{{- end -}}
+
+{{- define "common.annotate" -}}
+{{- range $k, $v := . }}
+{{ $k | quote }}: {{ $v | quote }}
+{{- end -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_metadata_labels.tpl b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_metadata_labels.tpl
new file mode 100755
index 000000000..15fe00c5f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_metadata_labels.tpl
@@ -0,0 +1,28 @@
+{{- /*
+common.labelize takes a dict or map and generates labels.
+
+Values will be quoted. Keys will not.
+
+Example output:
+
+ first: "Matt"
+ last: "Butcher"
+
+*/ -}}
+{{- define "common.labelize" -}}
+{{- range $k, $v := . }}
+{{ $k }}: {{ $v | quote }}
+{{- end -}}
+{{- end -}}
+
+{{- /*
+common.labels.standard prints the standard Helm labels.
+
+The standard labels are frequently used in metadata.
+*/ -}}
+{{- define "common.labels.standard" -}}
+app: {{ template "common.name" . }}
+chart: {{ template "common.chartref" . }}
+heritage: {{ .Release.Service | quote }}
+release: {{ .Release.Name | quote }}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_name.tpl b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_name.tpl
new file mode 100755
index 000000000..1d42fb068
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_name.tpl
@@ -0,0 +1,29 @@
+{{- /*
+name defines a template for the name of the chart. It should be used for the `app` label.
+This is common practice in many Kubernetes manifests, and is not Helm-specific.
+
+The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should
+not exceed 63 characters.
+
+Parameters:
+
+- .Values.nameOverride: Replaces the computed name with this given name
+- .Values.namePrefix: Prefix
+- .Values.global.namePrefix: Global prefix
+- .Values.nameSuffix: Suffix
+- .Values.global.nameSuffix: Global suffix
+
+The applied order is: "global prefix + prefix + name + suffix + global suffix"
+
+Usage: 'name: "{{- template "common.name" . -}}"'
+*/ -}}
+{{- define "common.name"}}
+ {{- $global := default (dict) .Values.global -}}
+ {{- $base := default .Chart.Name .Values.nameOverride -}}
+ {{- $gpre := default "" $global.namePrefix -}}
+ {{- $pre := default "" .Values.namePrefix -}}
+ {{- $suf := default "" .Values.nameSuffix -}}
+ {{- $gsuf := default "" $global.nameSuffix -}}
+ {{- $name := print $gpre $pre $base $suf $gsuf -}}
+ {{- $name | lower | trunc 54 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_persistentvolumeclaim.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_persistentvolumeclaim.yaml
new file mode 100755
index 000000000..6c1578c7e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_persistentvolumeclaim.yaml
@@ -0,0 +1,24 @@
+{{- define "common.persistentvolumeclaim.tpl" -}}
+apiVersion: v1
+kind: PersistentVolumeClaim
+{{ template "common.metadata" . }}
+spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+{{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end -}}
+{{- define "common.persistentvolumeclaim" -}}
+{{- $top := first . -}}
+{{- if and $top.Values.persistence.enabled (not $top.Values.persistence.existingClaim) -}}
+{{- template "common.util.merge" (append . "common.persistentvolumeclaim.tpl") -}}
+{{- end -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_secret.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_secret.yaml
new file mode 100755
index 000000000..0615d35cb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_secret.yaml
@@ -0,0 +1,10 @@
+{{- define "common.secret.tpl" -}}
+apiVersion: v1
+kind: Secret
+{{ template "common.metadata" . }}
+type: Opaque
+data: {}
+{{- end -}}
+{{- define "common.secret" -}}
+{{- template "common.util.merge" (append . "common.secret.tpl") -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_service.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_service.yaml
new file mode 100755
index 000000000..67379525f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_service.yaml
@@ -0,0 +1,17 @@
+{{- define "common.service.tpl" -}}
+apiVersion: v1
+kind: Service
+{{ template "common.metadata" . }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ selector:
+ app: {{ template "common.name" . }}
+ release: {{ .Release.Name | quote }}
+{{- end -}}
+{{- define "common.service" -}}
+{{- template "common.util.merge" (append . "common.service.tpl") -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_util.tpl b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_util.tpl
new file mode 100755
index 000000000..a7d4cc751
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_util.tpl
@@ -0,0 +1,15 @@
+{{- /*
+common.util.merge will merge two YAML templates and output the result.
+
+This takes an array of three values:
+- the top context
+- the template name of the overrides (destination)
+- the template name of the base (source)
+
+*/ -}}
+{{- define "common.util.merge" -}}
+{{- $top := first . -}}
+{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}
+{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}}
+{{- toYaml (merge $overrides $tpl) -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_volume.tpl b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_volume.tpl
new file mode 100755
index 000000000..521a1f48b
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/_volume.tpl
@@ -0,0 +1,22 @@
+{{- define "common.volume.configMap" -}}
+ {{- $name := index . 0 -}}
+ {{- $configMapName := index . 1 -}}
+
+ name: {{ $name }}
+ configMap:
+ name: {{ $configMapName }}
+{{- end -}}
+
+{{- define "common.volume.pvc" -}}
+ {{- $name := index . 0 -}}
+ {{- $claimName := index . 1 -}}
+ {{- $persistence := index . 2 -}}
+
+ name: {{ $name }}
+ {{- if $persistence.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ $persistence.existingClaim | default $claimName }}
+ {{- else }}
+ emptyDir: {}
+ {{- end -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/configmap.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/configmap.yaml
new file mode 100644
index 000000000..b5bf1dfc3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/templates/configmap.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: common-configmap
+data:
+ myvalue: "Hello World"
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/values.yaml
new file mode 100755
index 000000000..b7cf514d5
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/charts/common/values.yaml
@@ -0,0 +1,4 @@
+# Default values for commons.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/NOTES.txt b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/NOTES.txt
new file mode 100644
index 000000000..8f6bb9b1d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/NOTES.txt
@@ -0,0 +1,19 @@
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "chart-with-template-lib-dep.fullname" . }})
+ export NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ template "chart-with-template-lib-dep.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc {{ template "chart-with-template-lib-dep.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods -l "app={{ template "chart-with-template-lib-dep.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:80
+{{- end }}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/_helpers.tpl b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/_helpers.tpl
new file mode 100644
index 000000000..0ab79743d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/_helpers.tpl
@@ -0,0 +1,32 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "chart-with-template-lib-dep.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "chart-with-template-lib-dep.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "chart-with-template-lib-dep.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/deployment.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/deployment.yaml
new file mode 100644
index 000000000..6b950d139
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/deployment.yaml
@@ -0,0 +1,51 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "chart-with-template-lib-dep.fullname" . }}
+ labels:
+ app: {{ template "chart-with-template-lib-dep.name" . }}
+ chart: {{ template "chart-with-template-lib-dep.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ template "chart-with-template-lib-dep.name" . }}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "chart-with-template-lib-dep.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: http
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/ingress.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/ingress.yaml
new file mode 100644
index 000000000..a978df4e7
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/ingress.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "chart-with-template-lib-dep.fullname" . -}}
+{{- $ingressPath := .Values.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ app: {{ template "chart-with-template-lib-dep.name" . }}
+ chart: {{ template "chart-with-template-lib-dep.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- with .Values.ingress.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+{{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+{{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ . }}
+ http:
+ paths:
+ - path: {{ $ingressPath }}
+ backend:
+ serviceName: {{ $fullName }}
+ servicePort: http
+ {{- end }}
+{{- end }}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/service.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/service.yaml
new file mode 100644
index 000000000..d532bb3d8
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/templates/service.yaml
@@ -0,0 +1,10 @@
+{{- template "common.service" (list . "chart-with-template-lib-dep.service") -}}
+{{- define "chart-with-template-lib-dep.service" -}}
+## Define overrides for your Service resource here, e.g.
+# metadata:
+# labels:
+# custom: label
+# spec:
+# ports:
+# - port: 8080
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/values.yaml
new file mode 100644
index 000000000..d49955c26
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-lib-dep/values.yaml
@@ -0,0 +1,48 @@
+# Default values for chart-with-template-lib-dep.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+
+nameOverride: ""
+fullnameOverride: ""
+
+service:
+ type: ClusterIP
+ port: 80
+
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ path: /
+ hosts:
+ - chart-example.local
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/Chart.yaml
new file mode 100644
index 000000000..29b477b06
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/Chart.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+description: Deploy a basic Alpine Linux pod
+home: https://helm.sh/helm
+name: chart-with-template-with-invalid-yaml
+sources:
+ - https://github.com/helm/helm
+version: 0.1.0
+type: application
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/README.md b/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/README.md
new file mode 100644
index 000000000..fcf7ee017
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/README.md
@@ -0,0 +1,13 @@
+#Alpine: A simple Helm chart
+
+Run a single pod of Alpine Linux.
+
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.yaml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/templates/alpine-pod.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/templates/alpine-pod.yaml
new file mode 100644
index 000000000..697cb50fe
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/templates/alpine-pod.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{.Release.Name}}-{{.Values.Name}}"
+spec:
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
+invalid
diff --git a/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/values.yaml b/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/values.yaml
new file mode 100644
index 000000000..807e12aea
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/chart-with-template-with-invalid-yaml/values.yaml
@@ -0,0 +1 @@
+Name: my-alpine
diff --git a/helm/pkg/cmd/testdata/testcharts/compressedchart-0.1.0.tar.gz b/helm/pkg/cmd/testdata/testcharts/compressedchart-0.1.0.tar.gz
new file mode 100644
index 000000000..3c9c24d76
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/compressedchart-0.1.0.tar.gz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/compressedchart-0.1.0.tgz b/helm/pkg/cmd/testdata/testcharts/compressedchart-0.1.0.tgz
new file mode 100644
index 000000000..3c9c24d76
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/compressedchart-0.1.0.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/compressedchart-0.2.0.tgz b/helm/pkg/cmd/testdata/testcharts/compressedchart-0.2.0.tgz
new file mode 100644
index 000000000..16a644a79
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/compressedchart-0.2.0.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/compressedchart-0.3.0.tgz b/helm/pkg/cmd/testdata/testcharts/compressedchart-0.3.0.tgz
new file mode 100644
index 000000000..051bd6fd9
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/compressedchart-0.3.0.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/compressedchart-with-hyphens-0.1.0.tgz b/helm/pkg/cmd/testdata/testcharts/compressedchart-with-hyphens-0.1.0.tgz
new file mode 100644
index 000000000..379210a92
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/compressedchart-with-hyphens-0.1.0.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/deprecated/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/deprecated/Chart.yaml
new file mode 100644
index 000000000..10185beeb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/deprecated/Chart.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+description: Deprecated testing chart
+home: https://helm.sh/helm
+name: deprecated
+sources:
+ - https://github.com/helm/helm
+version: 0.1.0
+deprecated: true
diff --git a/helm/pkg/cmd/testdata/testcharts/deprecated/README.md b/helm/pkg/cmd/testdata/testcharts/deprecated/README.md
new file mode 100644
index 000000000..0df9a8bbc
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/deprecated/README.md
@@ -0,0 +1,3 @@
+#Deprecated
+
+This space intentionally left blank.
diff --git a/helm/pkg/cmd/testdata/testcharts/empty/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/empty/Chart.yaml
new file mode 100644
index 000000000..4f1dc0012
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/empty/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: Empty testing chart
+home: https://helm.sh/helm
+name: empty
+sources:
+ - https://github.com/helm/helm
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/empty/README.md b/helm/pkg/cmd/testdata/testcharts/empty/README.md
new file mode 100644
index 000000000..ed73c1797
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/empty/README.md
@@ -0,0 +1,3 @@
+#Empty
+
+This space intentionally left blank.
diff --git a/helm/pkg/cmd/testdata/testcharts/empty/templates/empty.yaml b/helm/pkg/cmd/testdata/testcharts/empty/templates/empty.yaml
new file mode 100644
index 000000000..c80812f6e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/empty/templates/empty.yaml
@@ -0,0 +1 @@
+# This file is intentionally blank
diff --git a/helm/pkg/cmd/testdata/testcharts/empty/values.yaml b/helm/pkg/cmd/testdata/testcharts/empty/values.yaml
new file mode 100644
index 000000000..1f0ff00e3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/empty/values.yaml
@@ -0,0 +1 @@
+Name: my-empty
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-7233/.helmignore b/helm/pkg/cmd/testdata/testcharts/issue-7233/.helmignore
new file mode 100644
index 000000000..50af03172
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-7233/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-7233/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/issue-7233/Chart.yaml
new file mode 100644
index 000000000..b31997acb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-7233/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: issue-7233
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-7233/requirements.lock b/helm/pkg/cmd/testdata/testcharts/issue-7233/requirements.lock
new file mode 100644
index 000000000..62744125b
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-7233/requirements.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: alpine
+ repository: file://../alpine
+ version: 0.1.0
+digest: sha256:7b380b1a826e7be1eecb089f66209d6d3df54be4bf879d4a8e6f8a9e871710e5
+generated: "2020-01-31T11:30:21.911547651Z"
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-7233/requirements.yaml b/helm/pkg/cmd/testdata/testcharts/issue-7233/requirements.yaml
new file mode 100644
index 000000000..f0195cb15
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-7233/requirements.yaml
@@ -0,0 +1,4 @@
+dependencies:
+- name: alpine
+ version: 0.1.0
+ repository: file://../alpine
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-7233/templates/configmap.yaml b/helm/pkg/cmd/testdata/testcharts/issue-7233/templates/configmap.yaml
new file mode 100644
index 000000000..53880b25d
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-7233/templates/configmap.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-configmap
+data:
+ myvalue: "Hello World"
+ drink: {{ .Values.favoriteDrink }}
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-7233/values.yaml b/helm/pkg/cmd/testdata/testcharts/issue-7233/values.yaml
new file mode 100644
index 000000000..b1aa168d7
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-7233/values.yaml
@@ -0,0 +1 @@
+favoriteDrink: coffee
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-9027/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/issue-9027/Chart.yaml
new file mode 100644
index 000000000..ea6761a1c
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-9027/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: issue-9027
+version: 0.1.0
+dependencies:
+ - name: subchart
+ version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-9027/charts/subchart/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/issue-9027/charts/subchart/Chart.yaml
new file mode 100644
index 000000000..0639b1806
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-9027/charts/subchart/Chart.yaml
@@ -0,0 +1,3 @@
+apiVersion: v2
+name: subchart
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-9027/charts/subchart/templates/values.yaml b/helm/pkg/cmd/testdata/testcharts/issue-9027/charts/subchart/templates/values.yaml
new file mode 100644
index 000000000..fe0018e1a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-9027/charts/subchart/templates/values.yaml
@@ -0,0 +1 @@
+{{ .Values | toYaml }}
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-9027/charts/subchart/values.yaml b/helm/pkg/cmd/testdata/testcharts/issue-9027/charts/subchart/values.yaml
new file mode 100644
index 000000000..0da524211
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-9027/charts/subchart/values.yaml
@@ -0,0 +1,17 @@
+global:
+ hash:
+ key1: 1
+ key2: 2
+ key3: 3
+ key4: 4
+ key5: 5
+ key6: 6
+
+
+hash:
+ key1: 1
+ key2: 2
+ key3: 3
+ key4: 4
+ key5: 5
+ key6: 6
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-9027/templates/values.yaml b/helm/pkg/cmd/testdata/testcharts/issue-9027/templates/values.yaml
new file mode 100644
index 000000000..fe0018e1a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-9027/templates/values.yaml
@@ -0,0 +1 @@
+{{ .Values | toYaml }}
diff --git a/helm/pkg/cmd/testdata/testcharts/issue-9027/values.yaml b/helm/pkg/cmd/testdata/testcharts/issue-9027/values.yaml
new file mode 100644
index 000000000..22577e4f8
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue-9027/values.yaml
@@ -0,0 +1,11 @@
+global:
+ hash:
+ key1: null
+ key2: null
+ key3: 13
+
+subchart:
+ hash:
+ key1: null
+ key2: null
+ key3: 13
diff --git a/helm/pkg/cmd/testdata/testcharts/issue1979/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/issue1979/Chart.yaml
new file mode 100644
index 000000000..5269b5cf6
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue1979/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: Deploy a basic Alpine Linux pod
+home: https://helm.sh/helm
+name: alpine
+sources:
+ - https://github.com/helm/helm
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/issue1979/README.md b/helm/pkg/cmd/testdata/testcharts/issue1979/README.md
new file mode 100644
index 000000000..fcf7ee017
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue1979/README.md
@@ -0,0 +1,13 @@
+#Alpine: A simple Helm chart
+
+Run a single pod of Alpine Linux.
+
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.yaml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/cmd/testdata/testcharts/issue1979/extra_values.yaml b/helm/pkg/cmd/testdata/testcharts/issue1979/extra_values.yaml
new file mode 100644
index 000000000..468bbacbc
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue1979/extra_values.yaml
@@ -0,0 +1,2 @@
+test:
+ Name: extra-values
diff --git a/helm/pkg/cmd/testdata/testcharts/issue1979/more_values.yaml b/helm/pkg/cmd/testdata/testcharts/issue1979/more_values.yaml
new file mode 100644
index 000000000..3d21e1fed
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue1979/more_values.yaml
@@ -0,0 +1,2 @@
+test:
+ Name: more-values
diff --git a/helm/pkg/cmd/testdata/testcharts/issue1979/templates/alpine-pod.yaml b/helm/pkg/cmd/testdata/testcharts/issue1979/templates/alpine-pod.yaml
new file mode 100644
index 000000000..6f025fecb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue1979/templates/alpine-pod.yaml
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{.Release.Name}}-{{.Values.Name}}"
+ labels:
+ # The "app.kubernetes.io/managed-by" label is used to track which tool
+ # deployed a given chart. It is useful for admins who want to see what
+ # releases a particular tool is responsible for.
+ app.kubernetes.io/managed-by: {{.Release.Service | quote }}
+ # The "app.kubernetes.io/instance" convention makes it easy to tie a release
+ # to all of the Kubernetes resources that were created as part of that
+ # release.
+ app.kubernetes.io/instance: {{.Release.Name | quote }}
+ # This makes it easy to audit chart usage.
+ helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}"
+ values: {{.Values.test.Name}}
+spec:
+ # This shows how to use a simple value. This will look for a passed-in value
+ # called restartPolicy. If it is not found, it will use the default value.
+ # {{default "Never" .restartPolicy}} is a slightly optimized version of the
+ # more conventional syntax: {{.restartPolicy | default "Never"}}
+ restartPolicy: {{default "Never" .Values.restartPolicy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.9"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/cmd/testdata/testcharts/issue1979/values.yaml b/helm/pkg/cmd/testdata/testcharts/issue1979/values.yaml
new file mode 100644
index 000000000..879d760f9
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/issue1979/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+Name: my-alpine
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/.helmignore b/helm/pkg/cmd/testdata/testcharts/lib-chart/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/lib-chart/Chart.yaml
new file mode 100755
index 000000000..4dcddc85e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/Chart.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+description: Common chartbuilding components and helpers
+name: lib-chart
+version: 0.0.5
+appVersion: 0.0.5
+home: https://helm.sh
+maintainers:
+- name: technosophos
+ email: technosophos@gmail.com
+- name: prydonius
+ email: adnan@bitnami.com
+type: Library
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/README.md b/helm/pkg/cmd/testdata/testcharts/lib-chart/README.md
new file mode 100644
index 000000000..f69ff1c02
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/README.md
@@ -0,0 +1,831 @@
+# Common: The Helm Helper Chart
+
+This chart is designed to make it easier for you to build and maintain Helm
+charts.
+
+It provides utilities that reflect best practices of Kubernetes chart development,
+making it faster for you to write charts.
+
+## Tips
+
+A few tips for working with Common:
+
+- Be careful when using functions that generate random data (like `common.fullname.unique`).
+ They may trigger unwanted upgrades or have other side effects.
+
+In this document, we use `release-name` as the name of the release.
+
+## Resource Kinds
+
+Kubernetes defines a variety of resource kinds, from `Secret` to `StatefulSet`.
+We define some of the most common kinds in a way that lets you easily work with
+them.
+
+The resource kind templates are designed to make it much faster for you to
+define _basic_ versions of these resources. They allow you to extend and modify
+just what you need, without having to copy around lots of boilerplate.
+
+To make use of these templates you must define a template that will extend the
+base template (though it can be empty). The name of this template is then passed
+to the base template, for example:
+
+```yaml
+{{- template "common.service" (list . "mychart.service") -}}
+{{- define "mychart.service" -}}
+## Define overrides for your Service resource here, e.g.
+# metadata:
+# labels:
+# custom: label
+# spec:
+# ports:
+# - port: 8080
+{{- end -}}
+```
+
+Note that the `common.service` template defines two parameters:
+
+ - The root context (usually `.`)
+ - A template name containing the service definition overrides
+
+A limitation of the Go template library is that a template can only take a
+single argument. The `list` function is used to work around this by constructing
+a list or array of arguments that is passed to the template.
+
+The `common.service` template is responsible for rendering the templates with
+the root context and merging any overrides. As you can see, this makes it very
+easy to create a basic `Service` resource without having to copy around the
+standard metadata and labels.
+
+Each implemented base resource is described in greater detail below.
+
+### `common.service`
+
+The `common.service` template creates a basic `Service` resource with the
+following defaults:
+
+- Service type (ClusterIP, NodePort, LoadBalancer) made configurable by `.Values.service.type`
+- Named port `http` configured on port 80
+- Selector set to `app.kubernetes.io/name: {{ template "common.name" }}, app.kubernetes.io/instance: {{ .Release.Name | quote }}` to match the default used in the `Deployment` resource
+
+Example template:
+
+```yaml
+{{- template "common.service" (list . "mychart.mail.service") -}}
+{{- define "mychart.mail.service" -}}
+metadata:
+ name: {{ template "common.fullname" . }}-mail # overrides the default name to add a suffix
+ labels: # appended to the labels section
+ protocol: mail
+spec:
+ ports: # composes the `ports` section of the service definition.
+ - name: smtp
+ port: 25
+ targetPort: 25
+ - name: imaps
+ port: 993
+ targetPort: 993
+ selector: # this is appended to the default selector
+ protocol: mail
+{{- end -}}
+---
+{{ template "common.service" (list . "mychart.web.service") -}}
+{{- define "mychart.web.service" -}}
+metadata:
+ name: {{ template "common.fullname" . }}-www # overrides the default name to add a suffix
+ labels: # appended to the labels section
+ protocol: www
+spec:
+ ports: # composes the `ports` section of the service definition.
+ - name: www
+ port: 80
+ targetPort: 8080
+{{- end -}}
+```
+
+The above template defines _two_ services: a web service and a mail service.
+
+The most important part of a service definition is the `ports` object, which
+defines the ports that this service will listen on. Most of the time,
+`selector` is computed for you. But you can replace it or add to it.
+
+The output of the example above is:
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/name: service
+ helm.sh/chart: service-0.1.0
+ app.kubernetes.io/managed-by: Helm
+ protocol: mail
+ app.kubernetes.io/instance: release-name
+ name: release-name-service-mail
+spec:
+ ports:
+ - name: smtp
+ port: 25
+ targetPort: 25
+ - name: imaps
+ port: 993
+ targetPort: 993
+ selector:
+ app.kubernetes.io/name: service
+ app.kubernetes.io/instance: release-name
+ protocol: mail
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/name: service
+ helm.sh/chart: service-0.1.0
+ app.kubernetes.io/managed-by: Helm
+ protocol: www
+ app.kubernetes.io/instance: release-name
+ name: release-name-service-www
+spec:
+ ports:
+ - name: www
+ port: 80
+ targetPort: 8080
+ type: ClusterIP
+```
+
+## `common.deployment`
+
+The `common.deployment` template defines a basic `Deployment`. Underneath the
+hood, it uses `common.container` (see next section).
+
+By default, the pod template within the deployment defines the labels `app: {{ template "common.name" . }}`
+and `release: {{ .Release.Name | quote }` as this is also used as the selector. The
+standard set of labels are not used as some of these can change during upgrades,
+which causes the replica sets and pods to not correctly match.
+
+Example use:
+
+```yaml
+{{- template "common.deployment" (list . "mychart.deployment") -}}
+{{- define "mychart.deployment" -}}
+## Define overrides for your Deployment resource here, e.g.
+spec:
+ replicas: {{ .Values.replicaCount }}
+{{- end -}}
+```
+
+## `common.container`
+
+The `common.container` template creates a basic `Container` spec to be used
+within a `Deployment` or `ReplicaSet`. It holds the following defaults:
+
+- The name is set to the chart name
+- Uses `.Values.image` to describe the image to run, with the following spec:
+ ```yaml
+ image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+ ```
+- Exposes the named port `http` as port 80
+- Lays out the compute resources using `.Values.resources`
+
+Example use:
+
+```yaml
+{{- template "common.deployment" (list . "mychart.deployment") -}}
+{{- define "mychart.deployment" -}}
+## Define overrides for your Deployment resource here, e.g.
+spec:
+ template:
+ spec:
+ containers:
+ - {{ template "common.container" (list . "mychart.deployment.container") }}
+{{- end -}}
+{{- define "mychart.deployment.container" -}}
+## Define overrides for your Container here, e.g.
+livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+{{- end -}}
+```
+
+The above example creates a `Deployment` resource which makes use of the
+`common.container` template to populate the PodSpec's container list. The usage
+of this template is similar to the other resources, you must define and
+reference a template that contains overrides for the container object.
+
+The most important part of a container definition is the image you want to run.
+As mentioned above, this is derived from `.Values.image` by default. It is a
+best practice to define the image, tag and pull policy in your charts' values as
+this makes it easy for an operator to change the image registry, or use a
+specific tag or version. Another example of configuration that should be exposed
+to chart operators is the container's required compute resources, as this is
+also very specific to an operators environment. An example `values.yaml` for
+your chart could look like:
+
+```yaml
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+```
+
+The output of running the above values through the earlier template is:
+
+```yaml
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/name: deployment
+ helm.sh/chart: deployment-0.1.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: release-name
+ name: release-name-deployment
+spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: deployment
+ spec:
+ containers:
+ - image: nginx:stable
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ name: deployment
+ ports:
+ - containerPort: 80
+ name: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+ resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+```
+
+## `common.configmap`
+
+The `common.configmap` template creates an empty `ConfigMap` resource that you
+can override with your configuration.
+
+Example use:
+
+```yaml
+{{- template "common.configmap" (list . "mychart.configmap") -}}
+{{- define "mychart.configmap" -}}
+data:
+ zeus: cat
+ athena: cat
+ julius: cat
+ one: |-
+ {{ .Files.Get "file1.txt" }}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+apiVersion: v1
+data:
+ athena: cat
+ julius: cat
+ one: This is a file.
+ zeus: cat
+kind: ConfigMap
+metadata:
+ labels:
+ app.kubernetes.io/name: configmap
+ helm.sh/chart: configmap-0.1.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: release-name
+ name: release-name-configmap
+```
+
+## `common.secret`
+
+The `common.secret` template creates an empty `Secret` resource that you
+can override with your secrets.
+
+Example use:
+
+```yaml
+{{- template "common.secret" (list . "mychart.secret") -}}
+{{- define "mychart.secret" -}}
+data:
+ zeus: {{ print "cat" | b64enc }}
+ athena: {{ print "cat" | b64enc }}
+ julius: {{ print "cat" | b64enc }}
+ one: |-
+ {{ .Files.Get "file1.txt" | b64enc }}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+apiVersion: v1
+data:
+ athena: Y2F0
+ julius: Y2F0
+ one: VGhpcyBpcyBhIGZpbGUuCg==
+ zeus: Y2F0
+kind: Secret
+metadata:
+ labels:
+ app.kubernetes.io/name: secret
+ helm.sh/chart: secret-0.1.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: release-name
+ name: release-name-secret
+type: Opaque
+```
+
+## `common.ingress`
+
+The `common.ingress` template is designed to give you a well-defined `Ingress`
+resource, that can be configured using `.Values.ingress`. An example values file
+that can be used to configure the `Ingress` resource is:
+
+```yaml
+ingress:
+ hosts:
+ - chart-example.local
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ kubernetes.io/tls-acme: "true"
+ tls:
+ - secretName: chart-example-tls
+ hosts:
+ - chart-example.local
+```
+
+Example use:
+
+```yaml
+{{- template "common.ingress" (list . "mychart.ingress") -}}
+{{- define "mychart.ingress" -}}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ kubernetes.io/tls-acme: "true"
+ labels:
+ app.kubernetes.io/name: ingress
+ helm.sh/chart: ingress-0.1.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: release-name
+ name: release-name-ingress
+spec:
+ rules:
+ - host: chart-example.local
+ http:
+ paths:
+ - backend:
+ serviceName: release-name-ingress
+ servicePort: 80
+ path: /
+ tls:
+ - hosts:
+ - chart-example.local
+ secretName: chart-example-tls
+```
+
+## `common.persistentvolumeclaim`
+
+`common.persistentvolumeclaim` can be used to easily add a
+`PersistentVolumeClaim` resource to your chart that can be configured using
+`.Values.persistence`:
+
+| Value | Description |
+| ------------------------- | ------------------------------------------------------------------------------------------------------- |
+| persistence.enabled | Whether or not to claim a persistent volume. If false, `common.volume.pvc` will use an emptyDir instead |
+| persistence.storageClass | `StorageClass` name |
+| persistence.accessMode | Access mode for persistent volume |
+| persistence.size | Size of persistent volume |
+| persistence.existingClaim | If defined, `PersistentVolumeClaim` is not created and `common.volume.pvc` helper uses this claim |
+
+An example values file that can be used to configure the
+`PersistentVolumeClaim` resource is:
+
+```yaml
+persistence:
+ enabled: true
+ storageClass: fast
+ accessMode: ReadWriteOnce
+ size: 8Gi
+```
+
+Example use:
+
+```yaml
+{{- template "common.persistentvolumeclaim" (list . "mychart.persistentvolumeclaim") -}}
+{{- define "mychart.persistentvolumeclaim" -}}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ labels:
+ app.kubernetes.io/name: persistentvolumeclaim
+ helm.sh/chart: persistentvolumeclaim-0.1.0
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/instance: release-name
+ name: release-name-persistentvolumeclaim
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 8Gi
+ storageClassName: "fast"
+```
+
+## Partial API Objects
+
+When writing Kubernetes resources, you may find the following helpers useful to
+construct parts of the spec.
+
+### EnvVar
+
+Use the EnvVar helpers within a container spec to simplify specifying key-value
+environment variables or referencing secrets as values.
+
+Example Use:
+
+```yaml
+{{- template "common.deployment" (list . "mychart.deployment") -}}
+{{- define "mychart.deployment" -}}
+spec:
+ template:
+ spec:
+ containers:
+ - {{ template "common.container" (list . "mychart.deployment.container") }}
+{{- end -}}
+{{- define "mychart.deployment.container" -}}
+{{- $fullname := include "common.fullname" . -}}
+env:
+- {{ template "common.envvar.value" (list "ZEUS" "cat") }}
+- {{ template "common.envvar.secret" (list "ATHENA" "secret-name" "athena") }}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+...
+ spec:
+ containers:
+ - env:
+ - name: ZEUS
+ value: cat
+ - name: ATHENA
+ valueFrom:
+ secretKeyRef:
+ key: athena
+ name: secret-name
+...
+```
+
+### Volume
+
+Use the Volume helpers within a `Deployment` spec to help define ConfigMap and
+PersistentVolumeClaim volumes.
+
+Example Use:
+
+```yaml
+{{- template "common.deployment" (list . "mychart.deployment") -}}
+{{- define "mychart.deployment" -}}
+spec:
+ template:
+ spec:
+ volumes:
+ - {{ template "common.volume.configMap" (list "config" "configmap-name") }}
+ - {{ template "common.volume.pvc" (list "data" "pvc-name" .Values.persistence) }}
+{{- end -}}
+```
+
+Output:
+
+```yaml
+...
+ spec:
+ volumes:
+ - configMap:
+ name: configmap-name
+ name: config
+ - name: data
+ persistentVolumeClaim:
+ claimName: pvc-name
+...
+```
+
+The `common.volume.pvc` helper uses the following configuration from the `.Values.persistence` object:
+
+| Value | Description |
+| ------------------------- | ----------------------------------------------------- |
+| persistence.enabled | If false, creates an `emptyDir` instead |
+| persistence.existingClaim | If set, uses this instead of the passed in claim name |
+
+## Utilities
+
+### `common.fullname`
+
+The `common.fullname` template generates a name suitable for the `name:` field
+in Kubernetes metadata. It is used like this:
+
+```yaml
+name: {{ template "common.fullname" . }}
+```
+
+The following different values can influence it:
+
+```yaml
+# By default, fullname uses '{{ .Release.Name }}-{{ .Chart.Name }}'. This
+# overrides that and uses the given string instead.
+fullnameOverride: "some-name"
+
+# This adds a prefix
+fullnamePrefix: "pre-"
+# This appends a suffix
+fullnameSuffix: "-suf"
+
+# Global versions of the above
+global:
+ fullnamePrefix: "pp-"
+ fullnameSuffix: "-ps"
+```
+
+Example output:
+
+```yaml
+---
+# with the values above
+name: pp-pre-some-name-suf-ps
+
+---
+# the default, for release "happy-panda" and chart "wordpress"
+name: happy-panda-wordpress
+```
+
+Output of this function is truncated at 54 characters, which leaves 9 additional
+characters for customized overriding. Thus you can easily extend this name
+in your own charts:
+
+```yaml
+{{- define "my.fullname" -}}
+ {{ template "common.fullname" . }}-my-stuff
+{{- end -}}
+```
+
+### `common.fullname.unique`
+
+The `common.fullname.unique` variant of fullname appends a unique seven-character
+sequence to the end of the common name field.
+
+This takes all of the same parameters as `common.fullname`
+
+Example template:
+
+```yaml
+uniqueName: {{ template "common.fullname.unique" . }}
+```
+
+Example output:
+
+```yaml
+uniqueName: release-name-fullname-jl0dbwx
+```
+
+It is also impacted by the prefix and suffix definitions, as well as by
+`.Values.fullnameOverride`
+
+Note that the effective maximum length of this function is 63 characters, not 54.
+
+### `common.name`
+
+The `common.name` template generates a name suitable for the `app` label. It is used like this:
+
+```yaml
+app: {{ template "common.name" . }}
+```
+
+The following different values can influence it:
+
+```yaml
+# By default, name uses '{{ .Chart.Name }}'. This
+# overrides that and uses the given string instead.
+nameOverride: "some-name"
+
+# This adds a prefix
+namePrefix: "pre-"
+# This appends a suffix
+nameSuffix: "-suf"
+
+# Global versions of the above
+global:
+ namePrefix: "pp-"
+ nameSuffix: "-ps"
+```
+
+Example output:
+
+```yaml
+---
+# with the values above
+name: pp-pre-some-name-suf-ps
+
+---
+# the default, for chart "wordpress"
+name: wordpress
+```
+
+Output of this function is truncated at 54 characters, which leaves 9 additional
+characters for customized overriding. Thus you can easily extend this name
+in your own charts:
+
+```yaml
+{{- define "my.name" -}}
+ {{ template "common.name" . }}-my-stuff
+{{- end -}}
+```
+
+### `common.metadata`
+
+The `common.metadata` helper generates the `metadata:` section of a Kubernetes
+resource.
+
+This takes three objects:
+ - .top: top context
+ - .fullnameOverride: override the fullname with this name
+ - .metadata
+ - .labels: key/value list of labels
+ - .annotations: key/value list of annotations
+ - .hook: name(s) of hook(s)
+
+It generates standard labels, annotations, hooks, and a name field.
+
+Example template:
+
+```yaml
+{{ template "common.metadata" (dict "top" . "metadata" .Values.bio) }}
+---
+{{ template "common.metadata" (dict "top" . "metadata" .Values.pet "fullnameOverride" .Values.pet.fullnameOverride) }}
+```
+
+Example values:
+
+```yaml
+bio:
+ name: example
+ labels:
+ first: matt
+ last: butcher
+ nick: technosophos
+ annotations:
+ format: bio
+ destination: archive
+ hook: pre-install
+
+pet:
+ fullnameOverride: Zeus
+
+```
+
+Example output:
+
+```yaml
+metadata:
+ name: release-name-metadata
+ labels:
+ app.kubernetes.io/name: metadata
+ app.kubernetes.io/managed-by: "Helm"
+ app.kubernetes.io/instance: "release-name"
+ helm.sh/chart: metadata-0.1.0
+ first: "matt"
+ last: "butcher"
+ nick: "technosophos"
+ annotations:
+ "destination": "archive"
+ "format": "bio"
+ "helm.sh/hook": "pre-install"
+---
+metadata:
+ name: Zeus
+ labels:
+ app.kubernetes.io/name: metadata
+ app.kubernetes.io/managed-by: "Helm"
+ app.kubernetes.io/instance: "release-name"
+ helm.sh/chart: metadata-0.1.0
+ annotations:
+```
+
+Most of the common templates that define a resource type (e.g. `common.configmap`
+or `common.job`) use this to generate the metadata, which means they inherit
+the same `labels`, `annotations`, `nameOverride`, and `hook` fields.
+
+### `common.labelize`
+
+`common.labelize` turns a map into a set of labels.
+
+Example template:
+
+```yaml
+{{- $map := dict "first" "1" "second" "2" "third" "3" -}}
+{{- template "common.labelize" $map -}}
+```
+
+Example output:
+
+```yaml
+first: "1"
+second: "2"
+third: "3"
+```
+
+### `common.labels.standard`
+
+`common.labels.standard` prints the standard set of labels.
+
+Example usage:
+
+```
+{{ template "common.labels.standard" . }}
+```
+
+Example output:
+
+```yaml
+app.kubernetes.io/name: labelizer
+app.kubernetes.io/managed-by: "Tiller"
+app.kubernetes.io/instance: "release-name"
+helm.sh/chart: labelizer-0.1.0
+```
+
+### `common.hook`
+
+The `common.hook` template is a convenience for defining hooks.
+
+Example template:
+
+```yaml
+{{ template "common.hook" "pre-install,post-install" }}
+```
+
+Example output:
+
+```yaml
+"helm.sh/hook": "pre-install,post-install"
+```
+
+### `common.chartref`
+
+The `common.chartref` helper prints the chart name and version, escaped to be
+legal in a Kubernetes label field.
+
+Example template:
+
+```yaml
+chartref: {{ template "common.chartref" . }}
+```
+
+For the chart `foo` with version `1.2.3-beta.55+1234`, this will render:
+
+```yaml
+chartref: foo-1.2.3-beta.55_1234
+```
+
+(Note that `+` is an illegal character in label values)
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_chartref.tpl b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_chartref.tpl
new file mode 100644
index 000000000..e6c14866f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_chartref.tpl
@@ -0,0 +1,14 @@
+{{- /*
+common.chartref prints a chart name and version.
+
+It does minimal escaping for use in Kubernetes labels.
+
+Example output:
+
+ zookeeper-1.2.3
+ wordpress-3.2.1_20170219
+
+*/ -}}
+{{- define "common.chartref" -}}
+ {{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_configmap.yaml b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_configmap.yaml
new file mode 100644
index 000000000..03dbbf858
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_configmap.yaml
@@ -0,0 +1,9 @@
+{{- define "common.configmap.tpl" -}}
+apiVersion: v1
+kind: ConfigMap
+{{ template "common.metadata" . }}
+data: {}
+{{- end -}}
+{{- define "common.configmap" -}}
+{{- template "common.util.merge" (append . "common.configmap.tpl") -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_container.yaml b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_container.yaml
new file mode 100644
index 000000000..540eb0e6a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_container.yaml
@@ -0,0 +1,15 @@
+{{- define "common.container.tpl" -}}
+name: {{ .Chart.Name }}
+image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+imagePullPolicy: {{ .Values.image.pullPolicy }}
+ports:
+- name: http
+ containerPort: 80
+resources:
+{{ toYaml .Values.resources | indent 2 }}
+{{- end -}}
+{{- define "common.container" -}}
+{{- /* clear new line so indentation works correctly */ -}}
+{{- println "" -}}
+{{- include "common.util.merge" (append . "common.container.tpl") | indent 8 -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_deployment.yaml b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_deployment.yaml
new file mode 100644
index 000000000..e99a8cd33
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_deployment.yaml
@@ -0,0 +1,18 @@
+{{- define "common.deployment.tpl" -}}
+apiVersion: extensions/v1beta1
+kind: Deployment
+{{ template "common.metadata" . }}
+spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ template "common.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+ spec:
+ containers:
+ -
+{{ include "common.container.tpl" . | indent 8 }}
+{{- end -}}
+{{- define "common.deployment" -}}
+{{- template "common.util.merge" (append . "common.deployment.tpl") -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_envvar.tpl b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_envvar.tpl
new file mode 100644
index 000000000..709251f8f
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_envvar.tpl
@@ -0,0 +1,31 @@
+{{- define "common.envvar.value" -}}
+ {{- $name := index . 0 -}}
+ {{- $value := index . 1 -}}
+
+ name: {{ $name }}
+ value: {{ default "" $value | quote }}
+{{- end -}}
+
+{{- define "common.envvar.configmap" -}}
+ {{- $name := index . 0 -}}
+ {{- $configMapName := index . 1 -}}
+ {{- $configMapKey := index . 2 -}}
+
+ name: {{ $name }}
+ valueFrom:
+ configMapKeyRef:
+ name: {{ $configMapName }}
+ key: {{ $configMapKey }}
+{{- end -}}
+
+{{- define "common.envvar.secret" -}}
+ {{- $name := index . 0 -}}
+ {{- $secretName := index . 1 -}}
+ {{- $secretKey := index . 2 -}}
+
+ name: {{ $name }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ $secretName }}
+ key: {{ $secretKey }}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_fullname.tpl b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_fullname.tpl
new file mode 100644
index 000000000..2da6cdf18
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_fullname.tpl
@@ -0,0 +1,39 @@
+{{- /*
+fullname defines a suitably unique name for a resource by combining
+the release name and the chart name.
+
+The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should
+not exceed 63 characters.
+
+Parameters:
+
+- .Values.fullnameOverride: Replaces the computed name with this given name
+- .Values.fullnamePrefix: Prefix
+- .Values.global.fullnamePrefix: Global prefix
+- .Values.fullnameSuffix: Suffix
+- .Values.global.fullnameSuffix: Global suffix
+
+The applied order is: "global prefix + prefix + name + suffix + global suffix"
+
+Usage: 'name: "{{- template "common.fullname" . -}}"'
+*/ -}}
+{{- define "common.fullname"}}
+ {{- $global := default (dict) .Values.global -}}
+ {{- $base := default (printf "%s-%s" .Release.Name .Chart.Name) .Values.fullnameOverride -}}
+ {{- $gpre := default "" $global.fullnamePrefix -}}
+ {{- $pre := default "" .Values.fullnamePrefix -}}
+ {{- $suf := default "" .Values.fullnameSuffix -}}
+ {{- $gsuf := default "" $global.fullnameSuffix -}}
+ {{- $name := print $gpre $pre $base $suf $gsuf -}}
+ {{- $name | lower | trunc 54 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- /*
+common.fullname.unique adds a random suffix to the unique name.
+
+This takes the same parameters as common.fullname
+
+*/ -}}
+{{- define "common.fullname.unique" -}}
+ {{ template "common.fullname" . }}-{{ randAlphaNum 7 | lower }}
+{{- end }}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_ingress.yaml b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_ingress.yaml
new file mode 100644
index 000000000..78411e15b
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_ingress.yaml
@@ -0,0 +1,27 @@
+{{- define "common.ingress.tpl" -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+{{ template "common.metadata" . }}
+ {{- if .Values.ingress.annotations }}
+ annotations:
+ {{ include "common.annotate" .Values.ingress.annotations | indent 4 }}
+ {{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.ingress.hosts }}
+ - host: {{ $host }}
+ http:
+ paths:
+ - path: /
+ backend:
+ serviceName: {{ template "common.fullname" $ }}
+ servicePort: 80
+ {{- end }}
+ {{- if .Values.ingress.tls }}
+ tls:
+{{ toYaml .Values.ingress.tls | indent 4 }}
+ {{- end -}}
+{{- end -}}
+{{- define "common.ingress" -}}
+{{- template "common.util.merge" (append . "common.ingress.tpl") -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_metadata.yaml b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_metadata.yaml
new file mode 100644
index 000000000..f96ed09fe
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_metadata.yaml
@@ -0,0 +1,10 @@
+{{- /*
+common.metadata creates a standard metadata header.
+It creates a 'metadata:' section with name and labels.
+*/ -}}
+{{ define "common.metadata" -}}
+metadata:
+ name: {{ template "common.fullname" . }}
+ labels:
+{{ include "common.labels.standard" . | indent 4 -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_metadata_annotations.tpl b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_metadata_annotations.tpl
new file mode 100644
index 000000000..dffe1eca9
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_metadata_annotations.tpl
@@ -0,0 +1,18 @@
+{{- /*
+common.hook defines a hook.
+
+This is to be used in a 'metadata.annotations' section.
+
+This should be called as 'template "common.metadata.hook" "post-install"'
+
+Any valid hook may be passed in. Separate multiple hooks with a ",".
+*/ -}}
+{{- define "common.hook" -}}
+"helm.sh/hook": {{printf "%s" . | quote}}
+{{- end -}}
+
+{{- define "common.annotate" -}}
+{{- range $k, $v := . }}
+{{ $k | quote }}: {{ $v | quote }}
+{{- end -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_metadata_labels.tpl b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_metadata_labels.tpl
new file mode 100644
index 000000000..bcb8cdaa8
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_metadata_labels.tpl
@@ -0,0 +1,28 @@
+{{- /*
+common.labelize takes a dict or map and generates labels.
+
+Values will be quoted. Keys will not.
+
+Example output:
+
+ first: "Matt"
+ last: "Butcher"
+
+*/ -}}
+{{- define "common.labelize" -}}
+{{- range $k, $v := . }}
+{{ $k }}: {{ $v | quote }}
+{{- end -}}
+{{- end -}}
+
+{{- /*
+common.labels.standard prints the standard Helm labels.
+
+The standard labels are frequently used in metadata.
+*/ -}}
+{{- define "common.labels.standard" -}}
+app.kubernetes.io/name: {{ template "common.name" . }}
+helm.sh/chart: {{ template "common.chartref" . }}
+app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
+app.kubernetes.io/instance: {{ .Release.Name | quote }}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_name.tpl b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_name.tpl
new file mode 100644
index 000000000..1d42fb068
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_name.tpl
@@ -0,0 +1,29 @@
+{{- /*
+name defines a template for the name of the chart. It should be used for the `app` label.
+This is common practice in many Kubernetes manifests, and is not Helm-specific.
+
+The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should
+not exceed 63 characters.
+
+Parameters:
+
+- .Values.nameOverride: Replaces the computed name with this given name
+- .Values.namePrefix: Prefix
+- .Values.global.namePrefix: Global prefix
+- .Values.nameSuffix: Suffix
+- .Values.global.nameSuffix: Global suffix
+
+The applied order is: "global prefix + prefix + name + suffix + global suffix"
+
+Usage: 'name: "{{- template "common.name" . -}}"'
+*/ -}}
+{{- define "common.name"}}
+ {{- $global := default (dict) .Values.global -}}
+ {{- $base := default .Chart.Name .Values.nameOverride -}}
+ {{- $gpre := default "" $global.namePrefix -}}
+ {{- $pre := default "" .Values.namePrefix -}}
+ {{- $suf := default "" .Values.nameSuffix -}}
+ {{- $gsuf := default "" $global.nameSuffix -}}
+ {{- $name := print $gpre $pre $base $suf $gsuf -}}
+ {{- $name | lower | trunc 54 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_persistentvolumeclaim.yaml b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_persistentvolumeclaim.yaml
new file mode 100644
index 000000000..6c1578c7e
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_persistentvolumeclaim.yaml
@@ -0,0 +1,24 @@
+{{- define "common.persistentvolumeclaim.tpl" -}}
+apiVersion: v1
+kind: PersistentVolumeClaim
+{{ template "common.metadata" . }}
+spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+{{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end -}}
+{{- define "common.persistentvolumeclaim" -}}
+{{- $top := first . -}}
+{{- if and $top.Values.persistence.enabled (not $top.Values.persistence.existingClaim) -}}
+{{- template "common.util.merge" (append . "common.persistentvolumeclaim.tpl") -}}
+{{- end -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_secret.yaml b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_secret.yaml
new file mode 100644
index 000000000..0615d35cb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_secret.yaml
@@ -0,0 +1,10 @@
+{{- define "common.secret.tpl" -}}
+apiVersion: v1
+kind: Secret
+{{ template "common.metadata" . }}
+type: Opaque
+data: {}
+{{- end -}}
+{{- define "common.secret" -}}
+{{- template "common.util.merge" (append . "common.secret.tpl") -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_service.yaml b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_service.yaml
new file mode 100644
index 000000000..b9dfc378a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_service.yaml
@@ -0,0 +1,17 @@
+{{- define "common.service.tpl" -}}
+apiVersion: v1
+kind: Service
+{{ template "common.metadata" . }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - name: http
+ port: 80
+ targetPort: http
+ selector:
+ app.kubernetes.io/name: {{ template "common.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name | quote }}
+{{- end -}}
+{{- define "common.service" -}}
+{{- template "common.util.merge" (append . "common.service.tpl") -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_util.tpl b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_util.tpl
new file mode 100644
index 000000000..a7d4cc751
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_util.tpl
@@ -0,0 +1,15 @@
+{{- /*
+common.util.merge will merge two YAML templates and output the result.
+
+This takes an array of three values:
+- the top context
+- the template name of the overrides (destination)
+- the template name of the base (source)
+
+*/ -}}
+{{- define "common.util.merge" -}}
+{{- $top := first . -}}
+{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}
+{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}}
+{{- toYaml (merge $overrides $tpl) -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_volume.tpl b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_volume.tpl
new file mode 100644
index 000000000..521a1f48b
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/templates/_volume.tpl
@@ -0,0 +1,22 @@
+{{- define "common.volume.configMap" -}}
+ {{- $name := index . 0 -}}
+ {{- $configMapName := index . 1 -}}
+
+ name: {{ $name }}
+ configMap:
+ name: {{ $configMapName }}
+{{- end -}}
+
+{{- define "common.volume.pvc" -}}
+ {{- $name := index . 0 -}}
+ {{- $claimName := index . 1 -}}
+ {{- $persistence := index . 2 -}}
+
+ name: {{ $name }}
+ {{- if $persistence.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ $persistence.existingClaim | default $claimName }}
+ {{- else }}
+ emptyDir: {}
+ {{- end -}}
+{{- end -}}
diff --git a/helm/pkg/cmd/testdata/testcharts/lib-chart/values.yaml b/helm/pkg/cmd/testdata/testcharts/lib-chart/values.yaml
new file mode 100644
index 000000000..b7cf514d5
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/lib-chart/values.yaml
@@ -0,0 +1,4 @@
+# Default values for commons.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
diff --git a/helm/pkg/cmd/testdata/testcharts/object-order/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/object-order/Chart.yaml
new file mode 100644
index 000000000..d2eb42fd7
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/object-order/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v2
+name: object-order
+description: Test ordering of manifests in output
+type: application
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/object-order/templates/01-a.yml b/helm/pkg/cmd/testdata/testcharts/object-order/templates/01-a.yml
new file mode 100644
index 000000000..32aa4a475
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/object-order/templates/01-a.yml
@@ -0,0 +1,57 @@
+# 1
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: first
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 2
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: second
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 3
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: third
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 4 (Deployment should come after all NetworkPolicy manifests, since 'helm template' outputs in install order)
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: fourth
+spec:
+ selector:
+ matchLabels:
+ pod: fourth
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ pod: fourth
+ spec:
+ containers:
+ - name: hello-world
+ image: gcr.io/google-samples/node-hello:1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/object-order/templates/02-b.yml b/helm/pkg/cmd/testdata/testcharts/object-order/templates/02-b.yml
new file mode 100644
index 000000000..895db8cf7
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/object-order/templates/02-b.yml
@@ -0,0 +1,143 @@
+# 5
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fifth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 6 (implementation detail: currently, 'helm template' outputs hook manifests last; and yes, NetworkPolicy won't make a reasonable hook, this is just a dummy unit test manifest)
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ annotations:
+ "helm.sh/hook": pre-install
+ name: sixth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 7
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: seventh
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 8
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: eighth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 9
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: ninth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 10
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: tenth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 11
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: eleventh
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 12
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: twelfth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 13
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: thirteenth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 14
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fourteenth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+
+---
+
+# 15 (11th object within 02-b.yml, in order to test `SplitManifests` which assigns `manifest-10`
+# to this object which should then come *after* `manifest-9`)
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fifteenth
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
diff --git a/helm/pkg/cmd/testdata/testcharts/object-order/values.yaml b/helm/pkg/cmd/testdata/testcharts/object-order/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/cmd/testdata/testcharts/oci-dependent-chart-0.1.0.tgz b/helm/pkg/cmd/testdata/testcharts/oci-dependent-chart-0.1.0.tgz
new file mode 100644
index 000000000..7b4cbeccc
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/oci-dependent-chart-0.1.0.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/pre-release-chart-0.1.0-alpha.tgz b/helm/pkg/cmd/testdata/testcharts/pre-release-chart-0.1.0-alpha.tgz
new file mode 100644
index 000000000..5d5770fed
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/pre-release-chart-0.1.0-alpha.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest-0.1.0.tgz b/helm/pkg/cmd/testdata/testcharts/reqtest-0.1.0.tgz
new file mode 100644
index 000000000..5d8e46a50
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/reqtest-0.1.0.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest/.helmignore b/helm/pkg/cmd/testdata/testcharts/reqtest/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/reqtest/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest/Chart.lock b/helm/pkg/cmd/testdata/testcharts/reqtest/Chart.lock
new file mode 100755
index 000000000..ab1ae8cc0
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/reqtest/Chart.lock
@@ -0,0 +1,3 @@
+dependencies: []
+digest: Not implemented
+generated: 2016-09-13T17:25:17.593788787-06:00
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/reqtest/Chart.yaml
new file mode 100644
index 000000000..07b6e2c97
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/reqtest/Chart.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: reqtest
+version: 0.1.0
+dependencies:
+ - name: reqsubchart
+ version: 0.1.0
+ repository: "https://example.com/charts"
+ - name: reqsubchart2
+ version: 0.2.0
+ repository: "https://example.com/charts"
+ - name: reqsubchart3
+ version: ">=0.1.0"
+ repository: "https://example.com/charts"
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart/.helmignore b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart/Chart.yaml
new file mode 100644
index 000000000..356135537
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: reqsubchart
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart/values.yaml b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart/values.yaml
new file mode 100644
index 000000000..0f0b63f2a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart/values.yaml
@@ -0,0 +1,4 @@
+# Default values for reqsubchart.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart2/.helmignore b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart2/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart2/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart2/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart2/Chart.yaml
new file mode 100644
index 000000000..5b9277370
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart2/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: reqsubchart2
+version: 0.2.0
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart2/values.yaml b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart2/values.yaml
new file mode 100644
index 000000000..0f0b63f2a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart2/values.yaml
@@ -0,0 +1,4 @@
+# Default values for reqsubchart.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart3-0.2.0.tgz b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart3-0.2.0.tgz
new file mode 100644
index 000000000..37962b0ab
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/reqtest/charts/reqsubchart3-0.2.0.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/reqtest/values.yaml b/helm/pkg/cmd/testdata/testcharts/reqtest/values.yaml
new file mode 100644
index 000000000..d57f76b07
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/reqtest/values.yaml
@@ -0,0 +1,4 @@
+# Default values for reqtest.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
diff --git a/helm/pkg/cmd/testdata/testcharts/signtest-0.1.0.tgz b/helm/pkg/cmd/testdata/testcharts/signtest-0.1.0.tgz
new file mode 100644
index 000000000..c74e5b0ef
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/signtest-0.1.0.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/signtest-0.1.0.tgz.prov b/helm/pkg/cmd/testdata/testcharts/signtest-0.1.0.tgz.prov
new file mode 100644
index 000000000..d325bb266
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/signtest-0.1.0.tgz.prov
@@ -0,0 +1,21 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA512
+
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: signtest
+version: 0.1.0
+
+...
+files:
+ signtest-0.1.0.tgz: sha256:e5ef611620fb97704d8751c16bab17fedb68883bfb0edc76f78a70e9173f9b55
+-----BEGIN PGP SIGNATURE-----
+
+wsBcBAEBCgAQBQJcoosfCRCEO7+YH8GHYgAA220IALAs8T8NPgkcLvHu+5109cAN
+BOCNPSZDNsqLZW/2Dc9cKoBG7Jen4Qad+i5l9351kqn3D9Gm6eRfAWcjfggRobV/
+9daZ19h0nl4O1muQNAkjvdgZt8MOP3+PB3I3/Tu2QCYjI579SLUmuXlcZR5BCFPR
+PJy+e3QpV2PcdeU2KZLG4tjtlrq+3QC9ZHHEJLs+BVN9d46Dwo6CxJdHJrrrAkTw
+M8MhA92vbiTTPRSCZI9x5qDAwJYhoq0oxLflpuL2tIlo3qVoCsaTSURwMESEHO32
+XwYG7BaVDMELWhAorBAGBGBwWFbJ1677qQ2gd9CN0COiVhekWlFRcnn60800r84=
+=k9Y9
+-----END PGP SIGNATURE-----
\ No newline at end of file
diff --git a/helm/pkg/cmd/testdata/testcharts/signtest/.helmignore b/helm/pkg/cmd/testdata/testcharts/signtest/.helmignore
new file mode 100644
index 000000000..435b756d8
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/signtest/.helmignore
@@ -0,0 +1,5 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+.git
diff --git a/helm/pkg/cmd/testdata/testcharts/signtest/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/signtest/Chart.yaml
new file mode 100644
index 000000000..f1f73723a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/signtest/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: signtest
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/signtest/alpine/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/signtest/alpine/Chart.yaml
new file mode 100644
index 000000000..eec261220
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/signtest/alpine/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: Deploy a basic Alpine Linux pod
+home: https://helm.sh/helm
+name: alpine
+sources:
+- https://github.com/helm/helm
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/signtest/alpine/README.md b/helm/pkg/cmd/testdata/testcharts/signtest/alpine/README.md
new file mode 100644
index 000000000..28bebae07
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/signtest/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.yaml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/cmd/testdata/testcharts/signtest/alpine/templates/alpine-pod.yaml b/helm/pkg/cmd/testdata/testcharts/signtest/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/signtest/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/cmd/testdata/testcharts/signtest/alpine/values.yaml b/helm/pkg/cmd/testdata/testcharts/signtest/alpine/values.yaml
new file mode 100644
index 000000000..bb6c06ae4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/signtest/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: my-alpine
diff --git a/helm/pkg/cmd/testdata/testcharts/signtest/templates/pod.yaml b/helm/pkg/cmd/testdata/testcharts/signtest/templates/pod.yaml
new file mode 100644
index 000000000..9b00ccaf7
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/signtest/templates/pod.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: signtest
+spec:
+ restartPolicy: Never
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/cmd/testdata/testcharts/signtest/values.yaml b/helm/pkg/cmd/testdata/testcharts/signtest/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/Chart.yaml
new file mode 100644
index 000000000..ae844c349
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/Chart.yaml
@@ -0,0 +1,39 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: subchart
+version: 0.1.0
+dependencies:
+ - name: subcharta
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subcharta.enabled
+ tags:
+ - front-end
+ - subcharta
+ import-values:
+ - child: SCAdata
+ parent: imported-chartA
+ - child: SCAdata
+ parent: overridden-chartA
+ - child: SCAdata
+ parent: imported-chartA-B
+
+ - name: subchartb
+ repository: http://localhost:10191
+ version: 0.1.0
+ condition: subchartb.enabled
+ import-values:
+ - child: SCBdata
+ parent: imported-chartB
+ - child: SCBdata
+ parent: imported-chartA-B
+ - child: exports.SCBexported2
+ parent: exports.SCBexported2
+ # - child: exports.configmap
+ # parent: configmap
+ - configmap
+ - SCBexported1
+
+ tags:
+ - front-end
+ - subchartb
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartA/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartA/Chart.yaml
new file mode 100644
index 000000000..be3edcefb
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartA/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: subcharta
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartA/templates/service.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartA/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartA/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartA/values.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartA/values.yaml
new file mode 100644
index 000000000..f0381ae6a
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartA/values.yaml
@@ -0,0 +1,17 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+# subchartA
+service:
+ name: apache
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+SCAdata:
+ SCAbool: false
+ SCAfloat: 3.1
+ SCAint: 55
+ SCAstring: "jabba"
+ SCAnested1:
+ SCAnested2: true
+
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartB/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartB/Chart.yaml
new file mode 100644
index 000000000..c3c6bbaf0
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartB/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: subchartb
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartB/templates/service.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartB/templates/service.yaml
new file mode 100644
index 000000000..27501e1e0
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartB/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartB/values.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartB/values.yaml
new file mode 100644
index 000000000..0ada0aadc
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/charts/subchartB/values.yaml
@@ -0,0 +1,39 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+
+SCBdata:
+ SCBbool: true
+ SCBfloat: 7.77
+ SCBint: 33
+ SCBstring: "boba"
+
+exports:
+ SCBexported1:
+ SCBexported1A:
+ SCBexported1B: 1965
+
+ SCBexported2:
+ SCBexported2A: "blaster"
+
+ configmap:
+ configmap:
+ value: "bar"
+
+global:
+ kolla:
+ nova:
+ api:
+ all:
+ port: 8774
+ metadata:
+ all:
+ port: 8775
+
+
+
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/crds/crdA.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/crds/crdA.yaml
new file mode 100644
index 000000000..ad770b632
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/crds/crdA.yaml
@@ -0,0 +1,14 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: testcrds.testcrdgroups.example.com
+spec:
+ group: testcrdgroups.example.com
+ version: v1alpha1
+ names:
+ kind: TestCRD
+ listKind: TestCRDList
+ plural: testcrds
+ shortNames:
+ - tc
+ singular: authconfig
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/extra_values.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/extra_values.yaml
new file mode 100644
index 000000000..5976bd178
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/extra_values.yaml
@@ -0,0 +1,5 @@
+# This file is used to test values passed by file at the command line
+
+configmap:
+ enabled: true
+ value: "qux"
\ No newline at end of file
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/templates/NOTES.txt b/helm/pkg/cmd/testdata/testcharts/subchart/templates/NOTES.txt
new file mode 100644
index 000000000..4bdf443f6
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/templates/NOTES.txt
@@ -0,0 +1 @@
+Sample notes for {{ .Chart.Name }}
\ No newline at end of file
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/templates/service.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/templates/service.yaml
new file mode 100644
index 000000000..19c931cc3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/templates/service.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Chart.Name }}
+ labels:
+ helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app.kubernetes.io/instance: "{{ .Release.Name }}"
+ kube-version/major: "{{ .Capabilities.KubeVersion.Major }}"
+ kube-version/minor: "{{ .Capabilities.KubeVersion.Minor }}"
+ kube-version/version: "v{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}.0"
+{{- if .Capabilities.APIVersions.Has "helm.k8s.io/test" }}
+ kube-api-version/test: v1
+{{- end }}
+{{- if .Capabilities.APIVersions.Has "helm.k8s.io/test2" }}
+ kube-api-version/test2: v2
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ protocol: TCP
+ name: {{ .Values.service.name }}
+ selector:
+ app.kubernetes.io/name: {{ .Chart.Name }}
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/configmap.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/configmap.yaml
new file mode 100644
index 000000000..e404a6cb2
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/configmap.yaml
@@ -0,0 +1,8 @@
+{{ if .Values.configmap.enabled -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Chart.Name }}-cm
+data:
+ value: {{ .Values.configmap.value }}
+{{- end }}
\ No newline at end of file
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/role.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/role.yaml
new file mode 100644
index 000000000..31cff9200
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/role.yaml
@@ -0,0 +1,8 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ .Chart.Name }}-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","list","watch"]
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/rolebinding.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/rolebinding.yaml
new file mode 100644
index 000000000..5d193f1a6
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/rolebinding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ .Chart.Name }}-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ .Chart.Name }}-role
+subjects:
+- kind: ServiceAccount
+ name: {{ .Chart.Name }}-sa
+ namespace: default
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/serviceaccount.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/serviceaccount.yaml
new file mode 100644
index 000000000..7126c7d89
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/templates/subdir/serviceaccount.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Chart.Name }}-sa
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/templates/tests/test-config.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/templates/tests/test-config.yaml
new file mode 100644
index 000000000..0aa3eea29
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/templates/tests/test-config.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "{{ .Release.Name }}-testconfig"
+ annotations:
+ "helm.sh/hook": test
+data:
+ message: Hello World
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/templates/tests/test-nothing.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/templates/tests/test-nothing.yaml
new file mode 100644
index 000000000..0fe6dbbf3
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/templates/tests/test-nothing.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ .Release.Name }}-test"
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: test
+ image: "alpine:latest"
+ envFrom:
+ - configMapRef:
+ name: "{{ .Release.Name }}-testconfig"
+ command:
+ - echo
+ - "$message"
+ restartPolicy: Never
diff --git a/helm/pkg/cmd/testdata/testcharts/subchart/values.yaml b/helm/pkg/cmd/testdata/testcharts/subchart/values.yaml
new file mode 100644
index 000000000..bcbebb5c0
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/subchart/values.yaml
@@ -0,0 +1,59 @@
+# Default values for subchart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+# subchart
+service:
+ name: nginx
+ type: ClusterIP
+ externalPort: 80
+ internalPort: 80
+
+
+SC1data:
+ SC1bool: true
+ SC1float: 3.14
+ SC1int: 100
+ SC1string: "dollywood"
+ SC1extra1: 11
+
+imported-chartA:
+ SC1extra2: 1.337
+
+overridden-chartA:
+ SCAbool: true
+ SCAfloat: 3.14
+ SCAint: 100
+ SCAstring: "jabbathehut"
+ SC1extra3: true
+
+imported-chartA-B:
+ SC1extra5: "tiller"
+
+overridden-chartA-B:
+ SCAbool: true
+ SCAfloat: 3.33
+ SCAint: 555
+ SCAstring: "wormwood"
+ SCAextra1: 23
+
+ SCBbool: true
+ SCBfloat: 0.25
+ SCBint: 98
+ SCBstring: "murkwood"
+ SCBextra1: 13
+
+ SC1extra6: 77
+
+SCBexported1A:
+ SC1extra7: true
+
+exports:
+ SC1exported1:
+ global:
+ SC1exported2:
+ all:
+ SC1exported3: "SC1expstr"
+
+configmap:
+ enabled: false
+ value: "foo"
diff --git a/helm/pkg/cmd/testdata/testcharts/test-0.1.0.tgz b/helm/pkg/cmd/testdata/testcharts/test-0.1.0.tgz
new file mode 100644
index 000000000..9ed772a7f
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/test-0.1.0.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/test/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/test/Chart.yaml
new file mode 100644
index 000000000..53e47c820
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/test/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: Test chart for untar conflict testing
+name: test
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/test/values.yaml b/helm/pkg/cmd/testdata/testcharts/test/values.yaml
new file mode 100644
index 000000000..2f01ba536
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/test/values.yaml
@@ -0,0 +1 @@
+# Default values for test
diff --git a/helm/pkg/cmd/testdata/testcharts/test1-0.1.0.tgz b/helm/pkg/cmd/testdata/testcharts/test1-0.1.0.tgz
new file mode 100644
index 000000000..60e00324c
Binary files /dev/null and b/helm/pkg/cmd/testdata/testcharts/test1-0.1.0.tgz differ
diff --git a/helm/pkg/cmd/testdata/testcharts/test1/Chart.yaml b/helm/pkg/cmd/testdata/testcharts/test1/Chart.yaml
new file mode 100644
index 000000000..3dc8fbbf2
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/test1/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: Test chart for untar conflict testing
+name: test1
+version: 0.1.0
diff --git a/helm/pkg/cmd/testdata/testcharts/test1/values.yaml b/helm/pkg/cmd/testdata/testcharts/test1/values.yaml
new file mode 100644
index 000000000..823016ffc
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/test1/values.yaml
@@ -0,0 +1,2 @@
+# Default values for test1# Default values for test1
+
diff --git a/helm/pkg/cmd/testdata/testcharts/upgradetest/templates/configmap.yaml b/helm/pkg/cmd/testdata/testcharts/upgradetest/templates/configmap.yaml
new file mode 100644
index 000000000..b6b90efba
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/upgradetest/templates/configmap.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "{{ .Release.Name }}-configmap"
+data:
+ myvalue: "Hello World"
+ drink: {{ .Values.favoriteDrink }}
\ No newline at end of file
diff --git a/helm/pkg/cmd/testdata/testcharts/upgradetest/values.yaml b/helm/pkg/cmd/testdata/testcharts/upgradetest/values.yaml
new file mode 100644
index 000000000..c429f41f4
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testcharts/upgradetest/values.yaml
@@ -0,0 +1 @@
+favoriteDrink: beer
\ No newline at end of file
diff --git a/helm/pkg/cmd/testdata/testplugin/plugin.yaml b/helm/pkg/cmd/testdata/testplugin/plugin.yaml
new file mode 100644
index 000000000..3ee5d04f6
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testplugin/plugin.yaml
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+name: testplugin
+type: cli/v1
+runtime: subprocess
+config:
+ shortHelp: "echo test"
+ longHelp: "This echos test"
+ ignoreFlags: false
+runtimeConfig:
+ platformCommand:
+ - command: "echo test"
diff --git a/helm/pkg/cmd/testdata/testserver/index.yaml b/helm/pkg/cmd/testdata/testserver/index.yaml
new file mode 100644
index 000000000..9cde8e8dd
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testserver/index.yaml
@@ -0,0 +1 @@
+apiVersion: v1
diff --git a/helm/pkg/cmd/testdata/testserver/repository/repositories.yaml b/helm/pkg/cmd/testdata/testserver/repository/repositories.yaml
new file mode 100644
index 000000000..271301c95
--- /dev/null
+++ b/helm/pkg/cmd/testdata/testserver/repository/repositories.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+generated: 2016-10-04T13:50:02.87649685-06:00
+repositories:
+- cache: ""
+ name: test
+ url: http://127.0.0.1:49216
diff --git a/helm/pkg/cmd/uninstall.go b/helm/pkg/cmd/uninstall.go
new file mode 100644
index 000000000..4cc14ae1e
--- /dev/null
+++ b/helm/pkg/cmd/uninstall.go
@@ -0,0 +1,92 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const uninstallDesc = `
+This command takes a release name and uninstalls the release.
+
+It removes all of the resources associated with the last release of the chart
+as well as the release history, freeing it up for future use.
+
+Use the '--dry-run' flag to see which releases will be uninstalled without actually
+uninstalling them.
+`
+
+func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewUninstall(cfg)
+
+ cmd := &cobra.Command{
+ Use: "uninstall RELEASE_NAME [...]",
+ Aliases: []string{"del", "delete", "un"},
+ SuggestFor: []string{"remove", "rm"},
+ Short: "uninstall a release",
+ Long: uninstallDesc,
+ Args: require.MinimumNArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return compListReleases(toComplete, args, cfg)
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ validationErr := validateCascadeFlag(client)
+ if validationErr != nil {
+ return validationErr
+ }
+ for i := range args {
+
+ res, err := client.Run(args[i])
+ if err != nil {
+ return err
+ }
+ if res != nil && res.Info != "" {
+ fmt.Fprintln(out, res.Info)
+ }
+
+ fmt.Fprintf(out, "release \"%s\" uninstalled\n", args[i])
+ }
+ return nil
+ },
+ }
+
+ f := cmd.Flags()
+ f.BoolVar(&client.DryRun, "dry-run", false, "simulate a uninstall")
+ f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during uninstallation")
+ f.BoolVar(&client.IgnoreNotFound, "ignore-not-found", false, `Treat "release not found" as a successful uninstall`)
+ f.BoolVar(&client.KeepHistory, "keep-history", false, "remove all associated resources and mark the release as deleted, but retain the release history")
+ f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.")
+ f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
+ f.StringVar(&client.Description, "description", "", "add a custom description")
+ AddWaitFlag(cmd, &client.WaitStrategy)
+
+ return cmd
+}
+
+func validateCascadeFlag(client *action.Uninstall) error {
+ if client.DeletionPropagation != "background" && client.DeletionPropagation != "foreground" && client.DeletionPropagation != "orphan" {
+ return fmt.Errorf("invalid cascade value (%s). Must be \"background\", \"foreground\", or \"orphan\"", client.DeletionPropagation)
+ }
+ return nil
+}
diff --git a/helm/pkg/cmd/uninstall_test.go b/helm/pkg/cmd/uninstall_test.go
new file mode 100644
index 000000000..ce436e68c
--- /dev/null
+++ b/helm/pkg/cmd/uninstall_test.go
@@ -0,0 +1,93 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+
+ "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestUninstall(t *testing.T) {
+ tests := []cmdTestCase{
+ {
+ name: "basic uninstall",
+ cmd: "uninstall aeneas",
+ golden: "output/uninstall.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "aeneas"})},
+ },
+ {
+ name: "multiple uninstall",
+ cmd: "uninstall aeneas aeneas2",
+ golden: "output/uninstall-multiple.txt",
+ rels: []*release.Release{
+ release.Mock(&release.MockReleaseOptions{Name: "aeneas"}),
+ release.Mock(&release.MockReleaseOptions{Name: "aeneas2"}),
+ },
+ },
+ {
+ name: "uninstall with timeout",
+ cmd: "uninstall aeneas --timeout 120s",
+ golden: "output/uninstall-timeout.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "aeneas"})},
+ },
+ {
+ name: "uninstall without hooks",
+ cmd: "uninstall aeneas --no-hooks",
+ golden: "output/uninstall-no-hooks.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "aeneas"})},
+ },
+ {
+ name: "keep history",
+ cmd: "uninstall aeneas --keep-history",
+ golden: "output/uninstall-keep-history.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "aeneas"})},
+ },
+ {
+ name: "keep history with earlier deployed release",
+ cmd: "uninstall aeneas --keep-history",
+ golden: "output/uninstall-keep-history-earlier-deployed.txt",
+ rels: []*release.Release{
+ release.Mock(&release.MockReleaseOptions{Name: "aeneas", Version: 1, Status: common.StatusDeployed}),
+ release.Mock(&release.MockReleaseOptions{Name: "aeneas", Version: 2, Status: common.StatusFailed}),
+ },
+ },
+ {
+ name: "wait",
+ cmd: "uninstall aeneas --wait",
+ golden: "output/uninstall-wait.txt",
+ rels: []*release.Release{release.Mock(&release.MockReleaseOptions{Name: "aeneas"})},
+ },
+ {
+ name: "uninstall without release",
+ cmd: "uninstall",
+ golden: "output/uninstall-no-args.txt",
+ wantError: true,
+ },
+ }
+ runTestCmd(t, tests)
+}
+
+func TestUninstallCompletion(t *testing.T) {
+ checkReleaseCompletion(t, "uninstall", true)
+}
+
+func TestUninstallFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "uninstall", false)
+ checkFileCompletion(t, "uninstall myrelease", false)
+}
diff --git a/helm/pkg/cmd/upgrade.go b/helm/pkg/cmd/upgrade.go
new file mode 100644
index 000000000..918d6f5b8
--- /dev/null
+++ b/helm/pkg/cmd/upgrade.go
@@ -0,0 +1,331 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+ "log/slog"
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ ci "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/loader"
+ "helm.sh/helm/v4/pkg/cli/output"
+ "helm.sh/helm/v4/pkg/cli/values"
+ "helm.sh/helm/v4/pkg/cmd/require"
+ "helm.sh/helm/v4/pkg/downloader"
+ "helm.sh/helm/v4/pkg/getter"
+ ri "helm.sh/helm/v4/pkg/release"
+ "helm.sh/helm/v4/pkg/release/common"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+const upgradeDesc = `
+This command upgrades a release to a new version of a chart.
+
+The upgrade arguments must be a release and chart. The chart
+argument can be either: a chart reference('example/mariadb'), a path to a chart directory,
+a packaged chart, or a fully qualified URL. For chart references, the latest
+version will be specified unless the '--version' flag is set.
+
+To override values in a chart, use either the '--values' flag and pass in a file
+or use the '--set' flag and pass configuration from the command line, to force string
+values, use '--set-string'. You can use '--set-file' to set individual
+values from a file when the value itself is too long for the command line
+or is dynamically generated. You can also use '--set-json' to set json values
+(scalars/objects/arrays) from the command line. Additionally, you can use '--set-json' and passing json object as a string.
+
+You can specify the '--values'/'-f' flag multiple times. The priority will be given to the
+last (right-most) file specified. For example, if both myvalues.yaml and override.yaml
+contained a key called 'Test', the value set in override.yaml would take precedence:
+
+ $ helm upgrade -f myvalues.yaml -f override.yaml redis ./redis
+
+You can specify the '--set' flag multiple times. The priority will be given to the
+last (right-most) set specified. For example, if both 'bar' and 'newbar' values are
+set for a key called 'foo', the 'newbar' value would take precedence:
+
+ $ helm upgrade --set foo=bar --set foo=newbar redis ./redis
+
+You can update the values for an existing release with this command as well via the
+'--reuse-values' flag. The 'RELEASE' and 'CHART' arguments should be set to the original
+parameters, and existing values will be merged with any values set via '--values'/'-f'
+or '--set' flags. Priority is given to new values.
+
+ $ helm upgrade --reuse-values --set foo=bar --set foo=newbar redis ./redis
+
+The --dry-run flag will output all generated chart manifests, including Secrets
+which can contain sensitive values. To hide Kubernetes Secrets use the
+--hide-secret flag. Please carefully consider how and when these flags are used.
+`
+
+func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
+ client := action.NewUpgrade(cfg)
+ valueOpts := &values.Options{}
+ var outfmt output.Format
+ var createNamespace bool
+
+ cmd := &cobra.Command{
+ Use: "upgrade [RELEASE] [CHART]",
+ Short: "upgrade a release",
+ Long: upgradeDesc,
+ Args: require.ExactArgs(2),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 0 {
+ return compListReleases(toComplete, args, cfg)
+ }
+ if len(args) == 1 {
+ return compListCharts(toComplete, true)
+ }
+ return noMoreArgsComp()
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ client.Namespace = settings.Namespace()
+
+ registryClient, err := newRegistryClient(client.CertFile, client.KeyFile, client.CaFile,
+ client.InsecureSkipTLSVerify, client.PlainHTTP, client.Username, client.Password)
+ if err != nil {
+ return fmt.Errorf("missing registry client: %w", err)
+ }
+ client.SetRegistryClient(registryClient)
+
+ dryRunStrategy, err := cmdGetDryRunFlagStrategy(cmd, false)
+ if err != nil {
+ return err
+ }
+ client.DryRunStrategy = dryRunStrategy
+
+ // Fixes #7002 - Support reading values from STDIN for `upgrade` command
+ // Must load values AFTER determining if we have to call install so that values loaded from stdin are not read twice
+ if client.Install {
+ // If a release does not exist, install it.
+ histClient := action.NewHistory(cfg)
+ histClient.Max = 1
+ versions, err := histClient.Run(args[0])
+ if err == driver.ErrReleaseNotFound || isReleaseUninstalled(versions) {
+ // Only print this to stdout for table output
+ if outfmt == output.Table {
+ fmt.Fprintf(out, "Release %q does not exist. Installing it now.\n", args[0])
+ }
+ instClient := action.NewInstall(cfg)
+ instClient.CreateNamespace = createNamespace
+ instClient.ChartPathOptions = client.ChartPathOptions
+ instClient.ForceReplace = client.ForceReplace
+ instClient.DryRunStrategy = client.DryRunStrategy
+ instClient.DisableHooks = client.DisableHooks
+ instClient.SkipCRDs = client.SkipCRDs
+ instClient.Timeout = client.Timeout
+ instClient.WaitStrategy = client.WaitStrategy
+ instClient.WaitForJobs = client.WaitForJobs
+ instClient.Devel = client.Devel
+ instClient.Namespace = client.Namespace
+ instClient.RollbackOnFailure = client.RollbackOnFailure
+ instClient.PostRenderer = client.PostRenderer
+ instClient.DisableOpenAPIValidation = client.DisableOpenAPIValidation
+ instClient.SubNotes = client.SubNotes
+ instClient.HideNotes = client.HideNotes
+ instClient.SkipSchemaValidation = client.SkipSchemaValidation
+ instClient.Description = client.Description
+ instClient.DependencyUpdate = client.DependencyUpdate
+ instClient.Labels = client.Labels
+ instClient.EnableDNS = client.EnableDNS
+ instClient.HideSecret = client.HideSecret
+ instClient.TakeOwnership = client.TakeOwnership
+ instClient.ForceConflicts = client.ForceConflicts
+ instClient.ServerSideApply = client.ServerSideApply != "false"
+
+ if isReleaseUninstalled(versions) {
+ instClient.Replace = true
+ }
+
+ rel, err := runInstall(args, instClient, valueOpts, out)
+ if err != nil {
+ return err
+ }
+ return outfmt.Write(out, &statusPrinter{
+ release: rel,
+ debug: settings.Debug,
+ showMetadata: false,
+ hideNotes: instClient.HideNotes,
+ noColor: settings.ShouldDisableColor(),
+ })
+ } else if err != nil {
+ return err
+ }
+ }
+
+ if client.Version == "" && client.Devel {
+ slog.Debug("setting version to >0.0.0-0")
+ client.Version = ">0.0.0-0"
+ }
+
+ chartPath, err := client.LocateChart(args[1], settings)
+ if err != nil {
+ return err
+ }
+
+ p := getter.All(settings)
+ vals, err := valueOpts.MergeValues(p)
+ if err != nil {
+ return err
+ }
+
+ // Check chart dependencies to make sure all are present in /charts
+ ch, err := loader.Load(chartPath)
+ if err != nil {
+ return err
+ }
+
+ ac, err := ci.NewAccessor(ch)
+ if err != nil {
+ return err
+ }
+ if req := ac.MetaDependencies(); len(req) > 0 {
+ if err := action.CheckDependencies(ch, req); err != nil {
+ err = fmt.Errorf("an error occurred while checking for chart dependencies. You may need to run `helm dependency build` to fetch missing dependencies: %w", err)
+ if client.DependencyUpdate {
+ man := &downloader.Manager{
+ Out: out,
+ ChartPath: chartPath,
+ Keyring: client.Keyring,
+ SkipUpdate: false,
+ Getters: p,
+ RepositoryConfig: settings.RepositoryConfig,
+ RepositoryCache: settings.RepositoryCache,
+ ContentCache: settings.ContentCache,
+ Debug: settings.Debug,
+ }
+ if err := man.Update(); err != nil {
+ return err
+ }
+ // Reload the chart with the updated Chart.lock file.
+ if ch, err = loader.Load(chartPath); err != nil {
+ return fmt.Errorf("failed reloading chart after repo update: %w", err)
+ }
+ } else {
+ return err
+ }
+ }
+ }
+
+ if ac.Deprecated() {
+ slog.Warn("this chart is deprecated")
+ }
+
+ // Create context and prepare the handle of SIGTERM
+ ctx := context.Background()
+ ctx, cancel := context.WithCancel(ctx)
+
+ // Set up channel on which to send signal notifications.
+ // We must use a buffered channel or risk missing the signal
+ // if we're not ready to receive when the signal is sent.
+ cSignal := make(chan os.Signal, 2)
+ signal.Notify(cSignal, os.Interrupt, syscall.SIGTERM)
+ go func() {
+ <-cSignal
+ fmt.Fprintf(out, "Release %s has been cancelled.\n", args[0])
+ cancel()
+ }()
+
+ rel, err := client.RunWithContext(ctx, args[0], ch, vals)
+ if err != nil {
+ return fmt.Errorf("UPGRADE FAILED: %w", err)
+ }
+
+ if outfmt == output.Table {
+ fmt.Fprintf(out, "Release %q has been upgraded. Happy Helming!\n", args[0])
+ }
+
+ return outfmt.Write(out, &statusPrinter{
+ release: rel,
+ debug: settings.Debug,
+ showMetadata: false,
+ hideNotes: client.HideNotes,
+ noColor: settings.ShouldDisableColor(),
+ })
+ },
+ }
+
+ f := cmd.Flags()
+ f.BoolVar(&createNamespace, "create-namespace", false, "if --install is set, create the release namespace if not present")
+ f.BoolVarP(&client.Install, "install", "i", false, "if a release by this name doesn't already exist, run an install")
+ f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored")
+ f.BoolVar(&client.HideSecret, "hide-secret", false, "hide Kubernetes Secrets when also using the --dry-run flag")
+ f.BoolVar(&client.ForceReplace, "force-replace", false, "force resource updates by replacement")
+ f.BoolVar(&client.ForceReplace, "force", false, "deprecated")
+ f.MarkDeprecated("force", "use --force-replace instead")
+ f.BoolVar(&client.ForceConflicts, "force-conflicts", false, "if set server-side apply will force changes against conflicts")
+ f.StringVar(&client.ServerSideApply, "server-side", "auto", "must be \"true\", \"false\" or \"auto\". Object updates run in the server instead of the client (\"auto\" defaults the value from the previous chart release's method)")
+ f.BoolVar(&client.DisableHooks, "no-hooks", false, "disable pre/post upgrade hooks")
+ f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the upgrade process will not validate rendered templates against the Kubernetes OpenAPI Schema")
+ f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed when an upgrade is performed with install flag enabled. By default, CRDs are installed if not already present, when an upgrade is performed with install flag enabled")
+ f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
+ f.BoolVar(&client.ResetValues, "reset-values", false, "when upgrading, reset the values to the ones built into the chart")
+ f.BoolVar(&client.ReuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored")
+ f.BoolVar(&client.ResetThenReuseValues, "reset-then-reuse-values", false, "when upgrading, reset the values to the ones built into the chart, apply the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' or '--reuse-values' is specified, this is ignored")
+ f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
+ f.BoolVar(&client.RollbackOnFailure, "rollback-on-failure", false, "if set, Helm will rollback the upgrade to previous success release upon failure. The --wait flag will be defaulted to \"watcher\" if --rollback-on-failure is set")
+ f.BoolVar(&client.RollbackOnFailure, "atomic", false, "deprecated")
+ f.MarkDeprecated("atomic", "use --rollback-on-failure instead")
+ f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit")
+ f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this upgrade when upgrade fails")
+ f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent")
+ f.BoolVar(&client.HideNotes, "hide-notes", false, "if set, do not show notes in upgrade output. Does not affect presence in chart metadata")
+ f.BoolVar(&client.SkipSchemaValidation, "skip-schema-validation", false, "if set, disables JSON schema validation")
+ f.StringToStringVarP(&client.Labels, "labels", "l", nil, "Labels that would be added to release metadata. Should be separated by comma. Original release labels will be merged with upgrade labels. You can unset label using null.")
+ f.StringVar(&client.Description, "description", "", "add a custom description")
+ f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart")
+ f.BoolVar(&client.EnableDNS, "enable-dns", false, "enable DNS lookups when rendering templates")
+ f.BoolVar(&client.TakeOwnership, "take-ownership", false, "if set, upgrade will ignore the check for helm annotations and take ownership of the existing resources")
+ addDryRunFlag(cmd)
+ addChartPathOptionsFlags(f, &client.ChartPathOptions)
+ addValueOptionsFlags(f, valueOpts)
+ bindOutputFlag(cmd, &outfmt)
+ bindPostRenderFlag(cmd, &client.PostRenderer, settings)
+ AddWaitFlag(cmd, &client.WaitStrategy)
+ cmd.MarkFlagsMutuallyExclusive("force-replace", "force-conflicts")
+ cmd.MarkFlagsMutuallyExclusive("force", "force-conflicts")
+
+ err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 2 {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return compVersionFlag(args[1], toComplete)
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ return cmd
+}
+
+func isReleaseUninstalled(versionsi []ri.Releaser) bool {
+ versions, err := releaseListToV1List(versionsi)
+ if err != nil {
+ slog.Error("cannot convert release list to v1 release list", "error", err)
+ return false
+ }
+ return len(versions) > 0 && versions[len(versions)-1].Info.Status == common.StatusUninstalled
+}
diff --git a/helm/pkg/cmd/upgrade_test.go b/helm/pkg/cmd/upgrade_test.go
new file mode 100644
index 000000000..0ae1e3561
--- /dev/null
+++ b/helm/pkg/cmd/upgrade_test.go
@@ -0,0 +1,662 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ rcommon "helm.sh/helm/v4/pkg/release/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestUpgradeCmd(t *testing.T) {
+
+ tmpChart := t.TempDir()
+ cfile := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV1,
+ Name: "testUpgradeChart",
+ Description: "A Helm chart for Kubernetes",
+ Version: "0.1.0",
+ },
+ }
+ chartPath := filepath.Join(tmpChart, cfile.Metadata.Name)
+ if err := chartutil.SaveDir(cfile, tmpChart); err != nil {
+ t.Fatalf("Error creating chart for upgrade: %v", err)
+ }
+ ch, err := loader.Load(chartPath)
+ if err != nil {
+ t.Fatalf("Error loading chart: %v", err)
+ }
+ _ = release.Mock(&release.MockReleaseOptions{
+ Name: "funny-bunny",
+ Chart: ch,
+ })
+
+ // update chart version
+ cfile.Metadata.Version = "0.1.2"
+
+ if err := chartutil.SaveDir(cfile, tmpChart); err != nil {
+ t.Fatalf("Error creating chart: %v", err)
+ }
+ ch, err = loader.Load(chartPath)
+ if err != nil {
+ t.Fatalf("Error loading updated chart: %v", err)
+ }
+
+ // update chart version again
+ cfile.Metadata.Version = "0.1.3"
+
+ if err := chartutil.SaveDir(cfile, tmpChart); err != nil {
+ t.Fatalf("Error creating chart: %v", err)
+ }
+ var ch2 *chart.Chart
+ ch2, err = loader.Load(chartPath)
+ if err != nil {
+ t.Fatalf("Error loading updated chart: %v", err)
+ }
+
+ missingDepsPath := "testdata/testcharts/chart-missing-deps"
+ badDepsPath := "testdata/testcharts/chart-bad-requirements"
+ presentDepsPath := "testdata/testcharts/chart-with-subchart-update"
+
+ relWithStatusMock := func(n string, v int, ch *chart.Chart, status rcommon.Status) *release.Release {
+ return release.Mock(&release.MockReleaseOptions{Name: n, Version: v, Chart: ch, Status: status})
+ }
+
+ relMock := func(n string, v int, ch *chart.Chart) *release.Release {
+ return release.Mock(&release.MockReleaseOptions{Name: n, Version: v, Chart: ch})
+ }
+
+ tests := []cmdTestCase{
+ {
+ name: "upgrade a release",
+ cmd: fmt.Sprintf("upgrade funny-bunny '%s'", chartPath),
+ golden: "output/upgrade.txt",
+ rels: []*release.Release{relMock("funny-bunny", 2, ch)},
+ },
+ {
+ name: "upgrade a release with timeout",
+ cmd: fmt.Sprintf("upgrade funny-bunny --timeout 120s '%s'", chartPath),
+ golden: "output/upgrade-with-timeout.txt",
+ rels: []*release.Release{relMock("funny-bunny", 3, ch2)},
+ },
+ {
+ name: "upgrade a release with --reset-values",
+ cmd: fmt.Sprintf("upgrade funny-bunny --reset-values '%s'", chartPath),
+ golden: "output/upgrade-with-reset-values.txt",
+ rels: []*release.Release{relMock("funny-bunny", 4, ch2)},
+ },
+ {
+ name: "upgrade a release with --reuse-values",
+ cmd: fmt.Sprintf("upgrade funny-bunny --reuse-values '%s'", chartPath),
+ golden: "output/upgrade-with-reset-values2.txt",
+ rels: []*release.Release{relMock("funny-bunny", 5, ch2)},
+ },
+ {
+ name: "upgrade a release with --take-ownership",
+ cmd: fmt.Sprintf("upgrade funny-bunny '%s' --take-ownership", chartPath),
+ golden: "output/upgrade-and-take-ownership.txt",
+ rels: []*release.Release{relMock("funny-bunny", 2, ch)},
+ },
+ {
+ name: "install a release with 'upgrade --install'",
+ cmd: fmt.Sprintf("upgrade zany-bunny -i '%s'", chartPath),
+ golden: "output/upgrade-with-install.txt",
+ rels: []*release.Release{relMock("zany-bunny", 1, ch)},
+ },
+ {
+ name: "install a release with 'upgrade --install' and timeout",
+ cmd: fmt.Sprintf("upgrade crazy-bunny -i --timeout 120s '%s'", chartPath),
+ golden: "output/upgrade-with-install-timeout.txt",
+ rels: []*release.Release{relMock("crazy-bunny", 1, ch)},
+ },
+ {
+ name: "upgrade a release with wait",
+ cmd: fmt.Sprintf("upgrade crazy-bunny --wait '%s'", chartPath),
+ golden: "output/upgrade-with-wait.txt",
+ rels: []*release.Release{relMock("crazy-bunny", 2, ch2)},
+ },
+ {
+ name: "upgrade a release with wait-for-jobs",
+ cmd: fmt.Sprintf("upgrade crazy-bunny --wait --wait-for-jobs '%s'", chartPath),
+ golden: "output/upgrade-with-wait-for-jobs.txt",
+ rels: []*release.Release{relMock("crazy-bunny", 2, ch2)},
+ },
+ {
+ name: "upgrade a release with missing dependencies",
+ cmd: fmt.Sprintf("upgrade bonkers-bunny %s", missingDepsPath),
+ golden: "output/upgrade-with-missing-dependencies.txt",
+ wantError: true,
+ },
+ {
+ name: "upgrade a release with bad dependencies",
+ cmd: fmt.Sprintf("upgrade bonkers-bunny '%s'", badDepsPath),
+ golden: "output/upgrade-with-bad-dependencies.txt",
+ wantError: true,
+ },
+ {
+ name: "upgrade a release with resolving missing dependencies",
+ cmd: fmt.Sprintf("upgrade --dependency-update funny-bunny %s", presentDepsPath),
+ golden: "output/upgrade-with-dependency-update.txt",
+ rels: []*release.Release{relMock("funny-bunny", 2, ch2)},
+ },
+ {
+ name: "upgrade a non-existent release",
+ cmd: fmt.Sprintf("upgrade funny-bunny '%s'", chartPath),
+ golden: "output/upgrade-with-bad-or-missing-existing-release.txt",
+ wantError: true,
+ },
+ {
+ name: "upgrade a failed release",
+ cmd: fmt.Sprintf("upgrade funny-bunny '%s'", chartPath),
+ golden: "output/upgrade.txt",
+ rels: []*release.Release{relWithStatusMock("funny-bunny", 2, ch, rcommon.StatusFailed)},
+ },
+ {
+ name: "upgrade a pending install release",
+ cmd: fmt.Sprintf("upgrade funny-bunny '%s'", chartPath),
+ golden: "output/upgrade-with-pending-install.txt",
+ wantError: true,
+ rels: []*release.Release{relWithStatusMock("funny-bunny", 2, ch, rcommon.StatusPendingInstall)},
+ },
+ {
+ name: "install a previously uninstalled release with '--keep-history' using 'upgrade --install'",
+ cmd: fmt.Sprintf("upgrade funny-bunny -i '%s'", chartPath),
+ golden: "output/upgrade-uninstalled-with-keep-history.txt",
+ rels: []*release.Release{relWithStatusMock("funny-bunny", 2, ch, rcommon.StatusUninstalled)},
+ },
+ }
+ runTestCmd(t, tests)
+}
+
+func TestUpgradeWithValue(t *testing.T) {
+ releaseName := "funny-bunny-v2"
+ relMock, ch, chartPath := prepareMockRelease(t, releaseName)
+
+ defer resetEnv()()
+
+ store := storageFixture()
+
+ store.Create(relMock(releaseName, 3, ch))
+
+ cmd := fmt.Sprintf("upgrade %s --set favoriteDrink=tea '%s'", releaseName, chartPath)
+ _, _, err := executeActionCommandC(store, cmd)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ updatedReli, err := store.Get(releaseName, 4)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+ updatedRel, err := releaserToV1Release(updatedReli)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ if !strings.Contains(updatedRel.Manifest, "drink: tea") {
+ t.Errorf("The value is not set correctly. manifest: %s", updatedRel.Manifest)
+ }
+
+}
+
+func TestUpgradeWithStringValue(t *testing.T) {
+ releaseName := "funny-bunny-v3"
+ relMock, ch, chartPath := prepareMockRelease(t, releaseName)
+
+ defer resetEnv()()
+
+ store := storageFixture()
+
+ store.Create(relMock(releaseName, 3, ch))
+
+ cmd := fmt.Sprintf("upgrade %s --set-string favoriteDrink=coffee '%s'", releaseName, chartPath)
+ _, _, err := executeActionCommandC(store, cmd)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ updatedReli, err := store.Get(releaseName, 4)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+ updatedRel, err := releaserToV1Release(updatedReli)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ if !strings.Contains(updatedRel.Manifest, "drink: coffee") {
+ t.Errorf("The value is not set correctly. manifest: %s", updatedRel.Manifest)
+ }
+
+}
+
+func TestUpgradeInstallWithSubchartNotes(t *testing.T) {
+
+ releaseName := "wacky-bunny-v1"
+ relMock, ch, _ := prepareMockRelease(t, releaseName)
+
+ defer resetEnv()()
+
+ store := storageFixture()
+
+ store.Create(relMock(releaseName, 1, ch))
+
+ cmd := fmt.Sprintf("upgrade %s -i --render-subchart-notes '%s'", releaseName, "testdata/testcharts/chart-with-subchart-notes")
+ _, _, err := executeActionCommandC(store, cmd)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ upgradedReli, err := store.Get(releaseName, 2)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+ upgradedRel, err := releaserToV1Release(upgradedReli)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ if !strings.Contains(upgradedRel.Info.Notes, "PARENT NOTES") {
+ t.Errorf("The parent notes are not set correctly. NOTES: %s", upgradedRel.Info.Notes)
+ }
+
+ if !strings.Contains(upgradedRel.Info.Notes, "SUBCHART NOTES") {
+ t.Errorf("The subchart notes are not set correctly. NOTES: %s", upgradedRel.Info.Notes)
+ }
+
+}
+
+func TestUpgradeWithValuesFile(t *testing.T) {
+
+ releaseName := "funny-bunny-v4"
+ relMock, ch, chartPath := prepareMockRelease(t, releaseName)
+
+ defer resetEnv()()
+
+ store := storageFixture()
+
+ store.Create(relMock(releaseName, 3, ch))
+
+ cmd := fmt.Sprintf("upgrade %s --values testdata/testcharts/upgradetest/values.yaml '%s'", releaseName, chartPath)
+ _, _, err := executeActionCommandC(store, cmd)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ updatedReli, err := store.Get(releaseName, 4)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+ updatedRel, err := releaserToV1Release(updatedReli)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ if !strings.Contains(updatedRel.Manifest, "drink: beer") {
+ t.Errorf("The value is not set correctly. manifest: %s", updatedRel.Manifest)
+ }
+
+}
+
+func TestUpgradeWithValuesFromStdin(t *testing.T) {
+
+ releaseName := "funny-bunny-v5"
+ relMock, ch, chartPath := prepareMockRelease(t, releaseName)
+
+ defer resetEnv()()
+
+ store := storageFixture()
+
+ store.Create(relMock(releaseName, 3, ch))
+
+ in, err := os.Open("testdata/testcharts/upgradetest/values.yaml")
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ cmd := fmt.Sprintf("upgrade %s --values - '%s'", releaseName, chartPath)
+ _, _, err = executeActionCommandStdinC(store, in, cmd)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ updatedReli, err := store.Get(releaseName, 4)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+ updatedRel, err := releaserToV1Release(updatedReli)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ if !strings.Contains(updatedRel.Manifest, "drink: beer") {
+ t.Errorf("The value is not set correctly. manifest: %s", updatedRel.Manifest)
+ }
+}
+
+func TestUpgradeInstallWithValuesFromStdin(t *testing.T) {
+
+ releaseName := "funny-bunny-v6"
+ _, _, chartPath := prepareMockRelease(t, releaseName)
+
+ defer resetEnv()()
+
+ store := storageFixture()
+
+ in, err := os.Open("testdata/testcharts/upgradetest/values.yaml")
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ cmd := fmt.Sprintf("upgrade %s -f - --install '%s'", releaseName, chartPath)
+ _, _, err = executeActionCommandStdinC(store, in, cmd)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ updatedReli, err := store.Get(releaseName, 1)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+ updatedRel, err := releaserToV1Release(updatedReli)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ if !strings.Contains(updatedRel.Manifest, "drink: beer") {
+ t.Errorf("The value is not set correctly. manifest: %s", updatedRel.Manifest)
+ }
+
+}
+
+func prepareMockRelease(t *testing.T, releaseName string) (func(n string, v int, ch *chart.Chart) *release.Release, *chart.Chart, string) {
+ t.Helper()
+ tmpChart := t.TempDir()
+ configmapData, err := os.ReadFile("testdata/testcharts/upgradetest/templates/configmap.yaml")
+ if err != nil {
+ t.Fatalf("Error loading template yaml %v", err)
+ }
+ cfile := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV1,
+ Name: "testUpgradeChart",
+ Description: "A Helm chart for Kubernetes",
+ Version: "0.1.0",
+ },
+ Templates: []*common.File{{Name: "templates/configmap.yaml", ModTime: time.Now(), Data: configmapData}},
+ }
+ chartPath := filepath.Join(tmpChart, cfile.Metadata.Name)
+ if err := chartutil.SaveDir(cfile, tmpChart); err != nil {
+ t.Fatalf("Error creating chart for upgrade: %v", err)
+ }
+ ch, err := loader.Load(chartPath)
+ if err != nil {
+ t.Fatalf("Error loading chart: %v", err)
+ }
+ _ = release.Mock(&release.MockReleaseOptions{
+ Name: releaseName,
+ Chart: ch,
+ })
+
+ relMock := func(n string, v int, ch *chart.Chart) *release.Release {
+ return release.Mock(&release.MockReleaseOptions{Name: n, Version: v, Chart: ch})
+ }
+
+ return relMock, ch, chartPath
+}
+
+func TestUpgradeOutputCompletion(t *testing.T) {
+ outputFlagCompletionTest(t, "upgrade")
+}
+
+func TestUpgradeVersionCompletion(t *testing.T) {
+ repoFile := "testdata/helmhome/helm/repositories.yaml"
+ repoCache := "testdata/helmhome/helm/repository"
+
+ repoSetup := fmt.Sprintf("--repository-config %s --repository-cache %s", repoFile, repoCache)
+
+ tests := []cmdTestCase{{
+ name: "completion for upgrade version flag",
+ cmd: fmt.Sprintf("%s __complete upgrade releasename testing/alpine --version ''", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for upgrade version flag, no filter",
+ cmd: fmt.Sprintf("%s __complete upgrade releasename testing/alpine --version 0.3", repoSetup),
+ golden: "output/version-comp.txt",
+ }, {
+ name: "completion for upgrade version flag too few args",
+ cmd: fmt.Sprintf("%s __complete upgrade releasename --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }, {
+ name: "completion for upgrade version flag too many args",
+ cmd: fmt.Sprintf("%s __complete upgrade releasename testing/alpine badarg --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }, {
+ name: "completion for upgrade version flag invalid chart",
+ cmd: fmt.Sprintf("%s __complete upgrade releasename invalid/invalid --version ''", repoSetup),
+ golden: "output/version-invalid-comp.txt",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestUpgradeFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "upgrade", false)
+ checkFileCompletion(t, "upgrade myrelease", true)
+ checkFileCompletion(t, "upgrade myrelease repo/chart", false)
+}
+
+func TestUpgradeInstallWithLabels(t *testing.T) {
+ releaseName := "funny-bunny-labels"
+ _, _, chartPath := prepareMockRelease(t, releaseName)
+
+ defer resetEnv()()
+
+ store := storageFixture()
+
+ expectedLabels := map[string]string{
+ "key1": "val1",
+ "key2": "val2",
+ }
+ cmd := fmt.Sprintf("upgrade %s --install --labels key1=val1,key2=val2 '%s'", releaseName, chartPath)
+ _, _, err := executeActionCommandC(store, cmd)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ updatedReli, err := store.Get(releaseName, 1)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+ updatedRel, err := releaserToV1Release(updatedReli)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ if !reflect.DeepEqual(updatedRel.Labels, expectedLabels) {
+ t.Errorf("Expected {%v}, got {%v}", expectedLabels, updatedRel.Labels)
+ }
+}
+
+func prepareMockReleaseWithSecret(t *testing.T, releaseName string) (func(n string, v int, ch *chart.Chart) *release.Release, *chart.Chart, string) {
+ t.Helper()
+ tmpChart := t.TempDir()
+ configmapData, err := os.ReadFile("testdata/testcharts/chart-with-secret/templates/configmap.yaml")
+ if err != nil {
+ t.Fatalf("Error loading template yaml %v", err)
+ }
+ secretData, err := os.ReadFile("testdata/testcharts/chart-with-secret/templates/secret.yaml")
+ if err != nil {
+ t.Fatalf("Error loading template yaml %v", err)
+ }
+ modTime := time.Now()
+ cfile := &chart.Chart{
+ Metadata: &chart.Metadata{
+ APIVersion: chart.APIVersionV1,
+ Name: "testUpgradeChart",
+ Description: "A Helm chart for Kubernetes",
+ Version: "0.1.0",
+ },
+ Templates: []*common.File{{Name: "templates/configmap.yaml", ModTime: modTime, Data: configmapData}, {Name: "templates/secret.yaml", ModTime: modTime, Data: secretData}},
+ }
+ chartPath := filepath.Join(tmpChart, cfile.Metadata.Name)
+ if err := chartutil.SaveDir(cfile, tmpChart); err != nil {
+ t.Fatalf("Error creating chart for upgrade: %v", err)
+ }
+ ch, err := loader.Load(chartPath)
+ if err != nil {
+ t.Fatalf("Error loading chart: %v", err)
+ }
+ _ = release.Mock(&release.MockReleaseOptions{
+ Name: releaseName,
+ Chart: ch,
+ })
+
+ relMock := func(n string, v int, ch *chart.Chart) *release.Release {
+ return release.Mock(&release.MockReleaseOptions{Name: n, Version: v, Chart: ch})
+ }
+
+ return relMock, ch, chartPath
+}
+
+func TestUpgradeWithDryRun(t *testing.T) {
+ releaseName := "funny-bunny-labels"
+ _, _, chartPath := prepareMockReleaseWithSecret(t, releaseName)
+
+ defer resetEnv()()
+
+ store := storageFixture()
+
+ // First install a release into the store so that future --dry-run attempts
+ // have it available.
+ cmd := fmt.Sprintf("upgrade %s --install '%s'", releaseName, chartPath)
+ _, _, err := executeActionCommandC(store, cmd)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ _, err = store.Get(releaseName, 1)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ cmd = fmt.Sprintf("upgrade %s --dry-run '%s'", releaseName, chartPath)
+ _, out, err := executeActionCommandC(store, cmd)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ // No second release should be stored because this is a dry run.
+ _, err = store.Get(releaseName, 2)
+ if err == nil {
+ t.Error("expected error as there should be no new release but got none")
+ }
+
+ if !strings.Contains(out, "kind: Secret") {
+ t.Error("expected secret in output from --dry-run but found none")
+ }
+
+ // Ensure the secret is not in the output
+ cmd = fmt.Sprintf("upgrade %s --dry-run --hide-secret '%s'", releaseName, chartPath)
+ _, out, err = executeActionCommandC(store, cmd)
+ if err != nil {
+ t.Errorf("unexpected error, got '%v'", err)
+ }
+
+ // No second release should be stored because this is a dry run.
+ _, err = store.Get(releaseName, 2)
+ if err == nil {
+ t.Error("expected error as there should be no new release but got none")
+ }
+
+ if strings.Contains(out, "kind: Secret") {
+ t.Error("expected no secret in output from --dry-run --hide-secret but found one")
+ }
+
+ // Ensure there is an error when --hide-secret used without dry-run
+ cmd = fmt.Sprintf("upgrade %s --hide-secret '%s'", releaseName, chartPath)
+ _, _, err = executeActionCommandC(store, cmd)
+ if err == nil {
+ t.Error("expected error when --hide-secret used without --dry-run")
+ }
+}
+
+func TestUpgradeInstallServerSideApply(t *testing.T) {
+ _, _, chartPath := prepareMockRelease(t, "ssa-test")
+
+ defer resetEnv()()
+
+ tests := []struct {
+ name string
+ serverSideFlag string
+ expectedApplyMethod string
+ }{
+ {
+ name: "upgrade --install with --server-side=false uses client-side apply",
+ serverSideFlag: "--server-side=false",
+ expectedApplyMethod: "csa",
+ },
+ {
+ name: "upgrade --install with --server-side=true uses server-side apply",
+ serverSideFlag: "--server-side=true",
+ expectedApplyMethod: "ssa",
+ },
+ {
+ name: "upgrade --install with --server-side=auto uses server-side apply (default for new install)",
+ serverSideFlag: "--server-side=auto",
+ expectedApplyMethod: "ssa",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ store := storageFixture()
+ releaseName := fmt.Sprintf("ssa-test-%s", tt.expectedApplyMethod)
+
+ cmd := fmt.Sprintf("upgrade %s --install %s '%s'", releaseName, tt.serverSideFlag, chartPath)
+ _, _, err := executeActionCommandC(store, cmd)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ rel, err := store.Get(releaseName, 1)
+ if err != nil {
+ t.Fatalf("unexpected error getting release: %v", err)
+ }
+
+ relV1, err := releaserToV1Release(rel)
+ if err != nil {
+ t.Fatalf("unexpected error converting release: %v", err)
+ }
+
+ if relV1.ApplyMethod != tt.expectedApplyMethod {
+ t.Errorf("expected ApplyMethod %q, got %q", tt.expectedApplyMethod, relV1.ApplyMethod)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/cmd/verify.go b/helm/pkg/cmd/verify.go
new file mode 100644
index 000000000..3b7574386
--- /dev/null
+++ b/helm/pkg/cmd/verify.go
@@ -0,0 +1,70 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const verifyDesc = `
+Verify that the given chart has a valid provenance file.
+
+Provenance files provide cryptographic verification that a chart has not been
+tampered with, and was packaged by a trusted provider.
+
+This command can be used to verify a local chart. Several other commands provide
+'--verify' flags that run the same validation. To generate a signed package, use
+the 'helm package --sign' command.
+`
+
+func newVerifyCmd(out io.Writer) *cobra.Command {
+ client := action.NewVerify()
+
+ cmd := &cobra.Command{
+ Use: "verify PATH",
+ Short: "verify that a chart at the given path has been signed and is valid",
+ Long: verifyDesc,
+ Args: require.ExactArgs(1),
+ ValidArgsFunction: func(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) {
+ if len(args) == 0 {
+ // Allow file completion when completing the argument for the path
+ return nil, cobra.ShellCompDirectiveDefault
+ }
+ // No more completions, so disable file completion
+ return noMoreArgsComp()
+ },
+ RunE: func(_ *cobra.Command, args []string) error {
+ result, err := client.Run(args[0])
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprint(out, result)
+
+ return nil
+ },
+ }
+
+ cmd.Flags().StringVar(&client.Keyring, "keyring", defaultKeyring(), "keyring containing public keys")
+
+ return cmd
+}
diff --git a/helm/pkg/cmd/verify_test.go b/helm/pkg/cmd/verify_test.go
new file mode 100644
index 000000000..ae373afd2
--- /dev/null
+++ b/helm/pkg/cmd/verify_test.go
@@ -0,0 +1,97 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "runtime"
+ "testing"
+)
+
+func TestVerifyCmd(t *testing.T) {
+
+ statExe := "stat"
+ statPathMsg := "no such file or directory"
+ statFileMsg := statPathMsg
+ if runtime.GOOS == "windows" {
+ statExe = "FindFirstFile"
+ statPathMsg = "The system cannot find the path specified."
+ statFileMsg = "The system cannot find the file specified."
+ }
+
+ tests := []struct {
+ name string
+ cmd string
+ expect string
+ wantError bool
+ }{
+ {
+ name: "verify requires a chart",
+ cmd: "verify",
+ expect: "\"helm verify\" requires 1 argument\n\nUsage: helm verify PATH [flags]",
+ wantError: true,
+ },
+ {
+ name: "verify requires that chart exists",
+ cmd: "verify no/such/file",
+ expect: fmt.Sprintf("%s no/such/file: %s", statExe, statPathMsg),
+ wantError: true,
+ },
+ {
+ name: "verify requires that chart is not a directory",
+ cmd: "verify testdata/testcharts/signtest",
+ expect: "unpacked charts cannot be verified",
+ wantError: true,
+ },
+ {
+ name: "verify requires that chart has prov file",
+ cmd: "verify testdata/testcharts/compressedchart-0.1.0.tgz",
+ expect: fmt.Sprintf("could not load provenance file testdata/testcharts/compressedchart-0.1.0.tgz.prov: %s testdata/testcharts/compressedchart-0.1.0.tgz.prov: %s", statExe, statFileMsg),
+ wantError: true,
+ },
+ {
+ name: "verify validates a properly signed chart",
+ cmd: "verify testdata/testcharts/signtest-0.1.0.tgz --keyring testdata/helm-test-key.pub",
+ expect: "Signed by: Helm Testing (This key should only be used for testing. DO NOT TRUST.) \nUsing Key With Fingerprint: 5E615389B53CA37F0EE60BD3843BBF981FC18762\nChart Hash Verified: sha256:e5ef611620fb97704d8751c16bab17fedb68883bfb0edc76f78a70e9173f9b55\n",
+ wantError: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, out, err := executeActionCommand(tt.cmd)
+ if tt.wantError {
+ if err == nil {
+ t.Errorf("Expected error, but got none: %q", out)
+ }
+ if err.Error() != tt.expect {
+ t.Errorf("Expected error %q, got %q", tt.expect, err)
+ }
+ return
+ } else if err != nil {
+ t.Errorf("Unexpected error: %s", err)
+ }
+ if out != tt.expect {
+ t.Errorf("Expected %q, got %q", tt.expect, out)
+ }
+ })
+ }
+}
+
+func TestVerifyFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "verify", true)
+ checkFileCompletion(t, "verify mypath", false)
+}
diff --git a/helm/pkg/cmd/version.go b/helm/pkg/cmd/version.go
new file mode 100644
index 000000000..80fb0d712
--- /dev/null
+++ b/helm/pkg/cmd/version.go
@@ -0,0 +1,101 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+ "text/template"
+
+ "github.com/spf13/cobra"
+
+ "helm.sh/helm/v4/internal/version"
+ "helm.sh/helm/v4/pkg/cmd/require"
+)
+
+const versionDesc = `
+Show the version for Helm.
+
+This will print a representation the version of Helm.
+The output will look something like this:
+
+version.BuildInfo{Version:"v3.2.1", GitCommit:"fe51cd1e31e6a202cba7dead9552a6d418ded79a", GitTreeState:"clean", GoVersion:"go1.13.10"}
+
+- Version is the semantic version of the release.
+- GitCommit is the SHA for the commit that this version was built from.
+- GitTreeState is "clean" if there are no local code changes when this binary was
+ built, and "dirty" if the binary was built from locally modified code.
+- GoVersion is the version of Go that was used to compile Helm.
+
+When using the --template flag the following properties are available to use in
+the template:
+
+- .Version contains the semantic version of Helm
+- .GitCommit is the git commit
+- .GitTreeState is the state of the git tree when Helm was built
+- .GoVersion contains the version of Go that Helm was compiled with
+
+For example, --template='Version: {{.Version}}' outputs 'Version: v3.2.1'.
+`
+
+type versionOptions struct {
+ short bool
+ template string
+}
+
+func newVersionCmd(out io.Writer) *cobra.Command {
+ o := &versionOptions{}
+
+ cmd := &cobra.Command{
+ Use: "version",
+ Short: "print the helm version information",
+ Long: versionDesc,
+ Args: require.NoArgs,
+ ValidArgsFunction: noMoreArgsCompFunc,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ return o.run(out)
+ },
+ }
+ f := cmd.Flags()
+ f.BoolVar(&o.short, "short", false, "print the version number")
+ f.StringVar(&o.template, "template", "", "template for version string format")
+
+ return cmd
+}
+
+func (o *versionOptions) run(out io.Writer) error {
+ if o.template != "" {
+ tt, err := template.New("_").Parse(o.template)
+ if err != nil {
+ return err
+ }
+ return tt.Execute(out, version.Get())
+ }
+ fmt.Fprintln(out, formatVersion(o.short))
+ return nil
+}
+
+func formatVersion(short bool) string {
+ v := version.Get()
+ if short {
+ if len(v.GitCommit) >= 7 {
+ return fmt.Sprintf("%s+g%s", v.Version, v.GitCommit[:7])
+ }
+ return version.GetVersion()
+ }
+ return fmt.Sprintf("%#v", v)
+}
diff --git a/helm/pkg/cmd/version_test.go b/helm/pkg/cmd/version_test.go
new file mode 100644
index 000000000..9551de767
--- /dev/null
+++ b/helm/pkg/cmd/version_test.go
@@ -0,0 +1,41 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+)
+
+func TestVersion(t *testing.T) {
+ tests := []cmdTestCase{{
+ name: "default",
+ cmd: "version",
+ golden: "output/version.txt",
+ }, {
+ name: "short",
+ cmd: "version --short",
+ golden: "output/version-short.txt",
+ }, {
+ name: "template",
+ cmd: "version --template='Version: {{.Version}}'",
+ golden: "output/version-template.txt",
+ }}
+ runTestCmd(t, tests)
+}
+
+func TestVersionFileCompletion(t *testing.T) {
+ checkFileCompletion(t, "version", false)
+}
diff --git a/helm/pkg/downloader/cache.go b/helm/pkg/downloader/cache.go
new file mode 100644
index 000000000..cecfc8bd7
--- /dev/null
+++ b/helm/pkg/downloader/cache.go
@@ -0,0 +1,89 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package downloader
+
+import (
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "os"
+ "path/filepath"
+
+ "helm.sh/helm/v4/internal/fileutil"
+)
+
+// Cache describes a cache that can get and put chart data.
+// The cache key is the sha256 has of the content. sha256 is used in Helm for
+// digests in index files providing a common key for checking content.
+type Cache interface {
+ // Get returns a reader for the given key.
+ Get(key [sha256.Size]byte, cacheType string) (string, error)
+ // Put stores the given reader for the given key.
+ Put(key [sha256.Size]byte, data io.Reader, cacheType string) (string, error)
+}
+
+// CacheChart specifies the content is a chart
+var CacheChart = ".chart"
+
+// CacheProv specifies the content is a provenance file
+var CacheProv = ".prov"
+
+// TODO: The cache assumes files because much of Helm assumes files. Convert
+// Helm to pass content around instead of file locations.
+
+// DiskCache is a cache that stores data on disk.
+type DiskCache struct {
+ Root string
+}
+
+// Get returns a reader for the given key.
+func (c *DiskCache) Get(key [sha256.Size]byte, cacheType string) (string, error) {
+ p := c.fileName(key, cacheType)
+ fi, err := os.Stat(p)
+ if err != nil {
+ return "", err
+ }
+ // Empty files treated as not exist because there is no content.
+ if fi.Size() == 0 {
+ return p, os.ErrNotExist
+ }
+ // directories should never happen unless something outside helm is operating
+ // on this content.
+ if fi.IsDir() {
+ return p, errors.New("is a directory")
+ }
+ return p, nil
+}
+
+// Put stores the given reader for the given key.
+// It returns the path to the stored file.
+func (c *DiskCache) Put(key [sha256.Size]byte, data io.Reader, cacheType string) (string, error) {
+ // TODO: verify the key and digest of the key are the same.
+ p := c.fileName(key, cacheType)
+ if err := os.MkdirAll(filepath.Dir(p), 0755); err != nil {
+ slog.Error("failed to create cache directory")
+ return p, err
+ }
+ return p, fileutil.AtomicWriteFile(p, data, 0644)
+}
+
+// fileName generates the filename in a structured manner where the first part is the
+// directory and the full hash is the filename.
+func (c *DiskCache) fileName(id [sha256.Size]byte, cacheType string) string {
+ return filepath.Join(c.Root, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+cacheType)
+}
diff --git a/helm/pkg/downloader/cache_test.go b/helm/pkg/downloader/cache_test.go
new file mode 100644
index 000000000..340c77aba
--- /dev/null
+++ b/helm/pkg/downloader/cache_test.go
@@ -0,0 +1,122 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package downloader
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// compiler check to ensure DiskCache implements the Cache interface.
+var _ Cache = (*DiskCache)(nil)
+
+func TestDiskCache_PutAndGet(t *testing.T) {
+ // Setup a temporary directory for the cache
+ tmpDir := t.TempDir()
+ cache := &DiskCache{Root: tmpDir}
+
+ // Test data
+ content := []byte("hello world")
+ key := sha256.Sum256(content)
+
+ // --- Test case 1: Put and Get a regular file (prov=false) ---
+ t.Run("PutAndGetTgz", func(t *testing.T) {
+ // Put the data into the cache
+ path, err := cache.Put(key, bytes.NewReader(content), CacheChart)
+ require.NoError(t, err, "Put should not return an error")
+
+ // Verify the file exists at the returned path
+ _, err = os.Stat(path)
+ require.NoError(t, err, "File should exist after Put")
+
+ // Get the file from the cache
+ retrievedPath, err := cache.Get(key, CacheChart)
+ require.NoError(t, err, "Get should not return an error for existing file")
+ assert.Equal(t, path, retrievedPath, "Get should return the same path as Put")
+
+ // Verify content
+ data, err := os.ReadFile(retrievedPath)
+ require.NoError(t, err)
+ assert.Equal(t, content, data, "Content of retrieved file should match original content")
+ })
+
+ // --- Test case 2: Put and Get a provenance file (prov=true) ---
+ t.Run("PutAndGetProv", func(t *testing.T) {
+ provContent := []byte("provenance data")
+ provKey := sha256.Sum256(provContent)
+
+ path, err := cache.Put(provKey, bytes.NewReader(provContent), CacheProv)
+ require.NoError(t, err)
+
+ retrievedPath, err := cache.Get(provKey, CacheProv)
+ require.NoError(t, err)
+ assert.Equal(t, path, retrievedPath)
+
+ data, err := os.ReadFile(retrievedPath)
+ require.NoError(t, err)
+ assert.Equal(t, provContent, data)
+ })
+
+ // --- Test case 3: Get a non-existent file ---
+ t.Run("GetNonExistent", func(t *testing.T) {
+ nonExistentKey := sha256.Sum256([]byte("does not exist"))
+ _, err := cache.Get(nonExistentKey, CacheChart)
+ assert.ErrorIs(t, err, os.ErrNotExist, "Get for a non-existent key should return os.ErrNotExist")
+ })
+
+ // --- Test case 4: Put an empty file ---
+ t.Run("PutEmptyFile", func(t *testing.T) {
+ emptyContent := []byte{}
+ emptyKey := sha256.Sum256(emptyContent)
+
+ path, err := cache.Put(emptyKey, bytes.NewReader(emptyContent), CacheChart)
+ require.NoError(t, err)
+
+ // Get should return ErrNotExist for empty files
+ _, err = cache.Get(emptyKey, CacheChart)
+ assert.ErrorIs(t, err, os.ErrNotExist, "Get for an empty file should return os.ErrNotExist")
+
+ // But the file should exist
+ _, err = os.Stat(path)
+ require.NoError(t, err, "Empty file should still exist on disk")
+ })
+
+ // --- Test case 5: Get a directory ---
+ t.Run("GetDirectory", func(t *testing.T) {
+ dirKey := sha256.Sum256([]byte("i am a directory"))
+ dirPath := cache.fileName(dirKey, CacheChart)
+ err := os.MkdirAll(dirPath, 0755)
+ require.NoError(t, err)
+
+ _, err = cache.Get(dirKey, CacheChart)
+ assert.EqualError(t, err, "is a directory")
+ })
+}
+
+func TestDiskCache_fileName(t *testing.T) {
+ cache := &DiskCache{Root: "/tmp/cache"}
+ key := sha256.Sum256([]byte("some data"))
+
+ assert.Equal(t, filepath.Join("/tmp/cache", "13", "1307990e6ba5ca145eb35e99182a9bec46531bc54ddf656a602c780fa0240dee.chart"), cache.fileName(key, CacheChart))
+ assert.Equal(t, filepath.Join("/tmp/cache", "13", "1307990e6ba5ca145eb35e99182a9bec46531bc54ddf656a602c780fa0240dee.prov"), cache.fileName(key, CacheProv))
+}
diff --git a/helm/pkg/downloader/chart_downloader.go b/helm/pkg/downloader/chart_downloader.go
new file mode 100644
index 000000000..ee4f8abe3
--- /dev/null
+++ b/helm/pkg/downloader/chart_downloader.go
@@ -0,0 +1,580 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package downloader
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "log/slog"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "helm.sh/helm/v4/internal/fileutil"
+ ifs "helm.sh/helm/v4/internal/third_party/dep/fs"
+ "helm.sh/helm/v4/internal/urlutil"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/provenance"
+ "helm.sh/helm/v4/pkg/registry"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+// VerificationStrategy describes a strategy for determining whether to verify a chart.
+type VerificationStrategy int
+
+const (
+ // VerifyNever will skip all verification of a chart.
+ VerifyNever VerificationStrategy = iota
+ // VerifyIfPossible will attempt a verification, it will not error if verification
+ // data is missing. But it will not stop processing if verification fails.
+ VerifyIfPossible
+ // VerifyAlways will always attempt a verification, and will fail if the
+ // verification fails.
+ VerifyAlways
+ // VerifyLater will fetch verification data, but not do any verification.
+ // This is to accommodate the case where another step of the process will
+ // perform verification.
+ VerifyLater
+)
+
+// ErrNoOwnerRepo indicates that a given chart URL can't be found in any repos.
+var ErrNoOwnerRepo = errors.New("could not find a repo containing the given URL")
+
+// ChartDownloader handles downloading a chart.
+//
+// It is capable of performing verifications on charts as well.
+type ChartDownloader struct {
+ // Out is the location to write warning and info messages.
+ Out io.Writer
+ // Verify indicates what verification strategy to use.
+ Verify VerificationStrategy
+ // Keyring is the keyring file used for verification.
+ Keyring string
+ // Getter collection for the operation
+ Getters getter.Providers
+ // Options provide parameters to be passed along to the Getter being initialized.
+ Options []getter.Option
+ RegistryClient *registry.Client
+ RepositoryConfig string
+ RepositoryCache string
+
+ // ContentCache is the location where Cache stores its files by default
+ // In previous versions of Helm the charts were put in the RepositoryCache. The
+ // repositories and charts are stored in 2 different caches.
+ ContentCache string
+
+ // Cache specifies the cache implementation to use.
+ Cache Cache
+}
+
+// DownloadTo retrieves a chart. Depending on the settings, it may also download a provenance file.
+//
+// If Verify is set to VerifyNever, the verification will be nil.
+// If Verify is set to VerifyIfPossible, this will return a verification (or nil on failure), and print a warning on failure.
+// If Verify is set to VerifyAlways, this will return a verification or an error if the verification fails.
+// If Verify is set to VerifyLater, this will download the prov file (if it exists), but not verify it.
+//
+// For VerifyNever and VerifyIfPossible, the Verification may be empty.
+//
+// Returns a string path to the location where the file was downloaded and a verification
+// (if provenance was verified), or an error if something bad happened.
+func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *provenance.Verification, error) {
+ if c.Cache == nil {
+ if c.ContentCache == "" {
+ return "", nil, errors.New("content cache must be set")
+ }
+ c.Cache = &DiskCache{Root: c.ContentCache}
+ slog.Debug("set up default downloader cache")
+ }
+ hash, u, err := c.ResolveChartVersion(ref, version)
+ if err != nil {
+ return "", nil, err
+ }
+
+ g, err := c.Getters.ByScheme(u.Scheme)
+ if err != nil {
+ return "", nil, err
+ }
+
+ // Check the cache for the content. Otherwise download it.
+ // Note, this process will pull from the cache but does not automatically populate
+ // the cache with the file it downloads.
+ var data *bytes.Buffer
+ var found bool
+ var digest []byte
+ var digest32 [32]byte
+ if hash != "" {
+ // if there is a hash, populate the other formats
+ digest, err = hex.DecodeString(hash)
+ if err != nil {
+ return "", nil, err
+ }
+ copy(digest32[:], digest)
+ if pth, err := c.Cache.Get(digest32, CacheChart); err == nil {
+ fdata, err := os.ReadFile(pth)
+ if err == nil {
+ found = true
+ data = bytes.NewBuffer(fdata)
+ slog.Debug("found chart in cache", "id", hash)
+ }
+ }
+ }
+
+ if !found {
+ c.Options = append(c.Options, getter.WithAcceptHeader("application/gzip,application/octet-stream"))
+
+ data, err = g.Get(u.String(), c.Options...)
+ if err != nil {
+ return "", nil, err
+ }
+ }
+
+ name := filepath.Base(u.Path)
+ if u.Scheme == registry.OCIScheme {
+ idx := strings.LastIndexByte(name, ':')
+ name = fmt.Sprintf("%s-%s.tgz", name[:idx], name[idx+1:])
+ }
+
+ destfile := filepath.Join(dest, name)
+ if err := fileutil.AtomicWriteFile(destfile, data, 0644); err != nil {
+ return destfile, nil, err
+ }
+
+ // If provenance is requested, verify it.
+ ver := &provenance.Verification{}
+ if c.Verify > VerifyNever {
+ found = false
+ var body *bytes.Buffer
+ if hash != "" {
+ if pth, err := c.Cache.Get(digest32, CacheProv); err == nil {
+ fdata, err := os.ReadFile(pth)
+ if err == nil {
+ found = true
+ body = bytes.NewBuffer(fdata)
+ slog.Debug("found provenance in cache", "id", hash)
+ }
+ }
+ }
+ if !found {
+ body, err = g.Get(u.String() + ".prov")
+ if err != nil {
+ if c.Verify == VerifyAlways {
+ return destfile, ver, fmt.Errorf("failed to fetch provenance %q", u.String()+".prov")
+ }
+ fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err)
+ return destfile, ver, nil
+ }
+ }
+ provfile := destfile + ".prov"
+ if err := fileutil.AtomicWriteFile(provfile, body, 0644); err != nil {
+ return destfile, nil, err
+ }
+
+ if c.Verify != VerifyLater {
+ ver, err = VerifyChart(destfile, destfile+".prov", c.Keyring)
+ if err != nil {
+ // Fail always in this case, since it means the verification step
+ // failed.
+ return destfile, ver, err
+ }
+ }
+ }
+ return destfile, ver, nil
+}
+
+// DownloadToCache retrieves resources while using a content based cache.
+func (c *ChartDownloader) DownloadToCache(ref, version string) (string, *provenance.Verification, error) {
+ if c.Cache == nil {
+ if c.ContentCache == "" {
+ return "", nil, errors.New("content cache must be set")
+ }
+ c.Cache = &DiskCache{Root: c.ContentCache}
+ slog.Debug("set up default downloader cache")
+ }
+
+ digestString, u, err := c.ResolveChartVersion(ref, version)
+ if err != nil {
+ return "", nil, err
+ }
+
+ g, err := c.Getters.ByScheme(u.Scheme)
+ if err != nil {
+ return "", nil, err
+ }
+
+ c.Options = append(c.Options, getter.WithAcceptHeader("application/gzip,application/octet-stream"))
+
+ // Check the cache for the file
+ digest, err := hex.DecodeString(digestString)
+ if err != nil {
+ return "", nil, fmt.Errorf("unable to decode digest: %w", err)
+ }
+ var digest32 [32]byte
+ copy(digest32[:], digest)
+
+ var pth string
+ // only fetch from the cache if we have a digest
+ if len(digest) > 0 {
+ pth, err = c.Cache.Get(digest32, CacheChart)
+ if err == nil {
+ slog.Debug("found chart in cache", "id", digestString)
+ }
+ }
+ if len(digest) == 0 || err != nil {
+ slog.Debug("attempting to download chart", "ref", ref, "version", version)
+ if err != nil && !os.IsNotExist(err) {
+ return "", nil, err
+ }
+
+ // Get file not in the cache
+ data, gerr := g.Get(u.String(), c.Options...)
+ if gerr != nil {
+ return "", nil, gerr
+ }
+
+ // Generate the digest
+ if len(digest) == 0 {
+ digest32 = sha256.Sum256(data.Bytes())
+ }
+
+ pth, err = c.Cache.Put(digest32, data, CacheChart)
+ if err != nil {
+ return "", nil, err
+ }
+ slog.Debug("put downloaded chart in cache", "id", hex.EncodeToString(digest32[:]))
+ }
+
+ // If provenance is requested, verify it.
+ ver := &provenance.Verification{}
+ if c.Verify > VerifyNever {
+
+ ppth, err := c.Cache.Get(digest32, CacheProv)
+ if err == nil {
+ slog.Debug("found provenance in cache", "id", digestString)
+ } else {
+ if !os.IsNotExist(err) {
+ return pth, ver, err
+ }
+
+ body, err := g.Get(u.String() + ".prov")
+ if err != nil {
+ if c.Verify == VerifyAlways {
+ return pth, ver, fmt.Errorf("failed to fetch provenance %q", u.String()+".prov")
+ }
+ fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err)
+ return pth, ver, nil
+ }
+
+ ppth, err = c.Cache.Put(digest32, body, CacheProv)
+ if err != nil {
+ return "", nil, err
+ }
+ slog.Debug("put downloaded provenance file in cache", "id", hex.EncodeToString(digest32[:]))
+ }
+
+ if c.Verify != VerifyLater {
+
+ // provenance files pin to a specific name so this needs to be accounted for
+ // when verifying.
+ // Note, this does make an assumption that the name/version is unique to a
+ // hash when a provenance file is used. If this isn't true, this section of code
+ // will need to be reworked.
+ name := filepath.Base(u.Path)
+ if u.Scheme == registry.OCIScheme {
+ idx := strings.LastIndexByte(name, ':')
+ name = fmt.Sprintf("%s-%s.tgz", name[:idx], name[idx+1:])
+ }
+
+ // Copy chart to a known location with the right name for verification and then
+ // clean it up.
+ tmpdir := filepath.Dir(filepath.Join(c.ContentCache, "tmp"))
+ if err := os.MkdirAll(tmpdir, 0755); err != nil {
+ return pth, ver, err
+ }
+ tmpfile := filepath.Join(tmpdir, name)
+ err = ifs.CopyFile(pth, tmpfile)
+ if err != nil {
+ return pth, ver, err
+ }
+ // Not removing the tmp dir itself because a concurrent process may be using it
+ defer os.RemoveAll(tmpfile)
+
+ ver, err = VerifyChart(tmpfile, ppth, c.Keyring)
+ if err != nil {
+ // Fail always in this case, since it means the verification step
+ // failed.
+ return pth, ver, err
+ }
+ }
+ }
+ return pth, ver, nil
+}
+
+// ResolveChartVersion resolves a chart reference to a URL.
+//
+// It returns:
+// - A hash of the content if available
+// - The URL and sets the ChartDownloader's Options that can fetch the URL using the appropriate Getter.
+// - An error if there is one
+//
+// A reference may be an HTTP URL, an oci reference URL, a 'reponame/chartname'
+// reference, or a local path.
+//
+// A version is a SemVer string (1.2.3-beta.1+f334a6789).
+//
+// - For fully qualified URLs, the version will be ignored (since URLs aren't versioned)
+// - For a chart reference
+// - If version is non-empty, this will return the URL for that version
+// - If version is empty, this will return the URL for the latest version
+// - If no version can be found, an error is returned
+//
+// TODO: support OCI hash
+func (c *ChartDownloader) ResolveChartVersion(ref, version string) (string, *url.URL, error) {
+ u, err := url.Parse(ref)
+ if err != nil {
+ return "", nil, fmt.Errorf("invalid chart URL format: %s", ref)
+ }
+
+ if registry.IsOCI(u.String()) {
+ if c.RegistryClient == nil {
+ return "", nil, fmt.Errorf("unable to lookup ref %s at version '%s', missing registry client", ref, version)
+ }
+
+ digest, OCIref, err := c.RegistryClient.ValidateReference(ref, version, u)
+ return digest, OCIref, err
+ }
+
+ rf, err := loadRepoConfig(c.RepositoryConfig)
+ if err != nil {
+ return "", u, err
+ }
+
+ if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 {
+ // In this case, we have to find the parent repo that contains this chart
+ // URL. And this is an unfortunate problem, as it requires actually going
+ // through each repo cache file and finding a matching URL. But basically
+ // we want to find the repo in case we have special SSL cert config
+ // for that repo.
+
+ rc, err := c.scanReposForURL(ref, rf)
+ if err != nil {
+ // If there is no special config, return the default HTTP client and
+ // swallow the error.
+ if err == ErrNoOwnerRepo {
+ // Make sure to add the ref URL as the URL for the getter
+ c.Options = append(c.Options, getter.WithURL(ref))
+ return "", u, nil
+ }
+ return "", u, err
+ }
+
+ // If we get here, we don't need to go through the next phase of looking
+ // up the URL. We have it already. So we just set the parameters and return.
+ c.Options = append(
+ c.Options,
+ getter.WithURL(rc.URL),
+ )
+ if rc.CertFile != "" || rc.KeyFile != "" || rc.CAFile != "" {
+ c.Options = append(c.Options, getter.WithTLSClientConfig(rc.CertFile, rc.KeyFile, rc.CAFile))
+ }
+ if rc.Username != "" && rc.Password != "" {
+ c.Options = append(
+ c.Options,
+ getter.WithBasicAuth(rc.Username, rc.Password),
+ getter.WithPassCredentialsAll(rc.PassCredentialsAll),
+ )
+ }
+ return "", u, nil
+ }
+
+ // See if it's of the form: repo/path_to_chart
+ p := strings.SplitN(u.Path, "/", 2)
+ if len(p) < 2 {
+ return "", u, fmt.Errorf("non-absolute URLs should be in form of repo_name/path_to_chart, got: %s", u)
+ }
+
+ repoName := p[0]
+ chartName := p[1]
+ rc, err := pickChartRepositoryConfigByName(repoName, rf.Repositories)
+ if err != nil {
+ return "", u, err
+ }
+
+ // Now that we have the chart repository information we can use that URL
+ // to set the URL for the getter.
+ c.Options = append(c.Options, getter.WithURL(rc.URL))
+
+ r, err := repo.NewChartRepository(rc, c.Getters)
+ if err != nil {
+ return "", u, err
+ }
+
+ if r != nil && r.Config != nil {
+ if r.Config.CertFile != "" || r.Config.KeyFile != "" || r.Config.CAFile != "" {
+ c.Options = append(c.Options, getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile))
+ }
+ if r.Config.Username != "" && r.Config.Password != "" {
+ c.Options = append(c.Options,
+ getter.WithBasicAuth(r.Config.Username, r.Config.Password),
+ getter.WithPassCredentialsAll(r.Config.PassCredentialsAll),
+ )
+ }
+ }
+
+ // Next, we need to load the index, and actually look up the chart.
+ idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name))
+ i, err := repo.LoadIndexFile(idxFile)
+ if err != nil {
+ return "", u, fmt.Errorf("no cached repo found. (try 'helm repo update'): %w", err)
+ }
+
+ cv, err := i.Get(chartName, version)
+ if err != nil {
+ return "", u, fmt.Errorf("chart %q matching %s not found in %s index. (try 'helm repo update'): %w", chartName, version, r.Config.Name, err)
+ }
+
+ if len(cv.URLs) == 0 {
+ return "", u, fmt.Errorf("chart %q has no downloadable URLs", ref)
+ }
+
+ // TODO: Seems that picking first URL is not fully correct
+ resolvedURL, err := repo.ResolveReferenceURL(rc.URL, cv.URLs[0])
+ if err != nil {
+ return cv.Digest, u, fmt.Errorf("invalid chart URL format: %s", ref)
+ }
+
+ loc, err := url.Parse(resolvedURL)
+ return cv.Digest, loc, err
+}
+
+// VerifyChart takes a path to a chart archive and a keyring, and verifies the chart.
+//
+// It assumes that a chart archive file is accompanied by a provenance file whose
+// name is the archive file name plus the ".prov" extension.
+func VerifyChart(path, provfile, keyring string) (*provenance.Verification, error) {
+ // For now, error out if it's not a tar file.
+ switch fi, err := os.Stat(path); {
+ case err != nil:
+ return nil, err
+ case fi.IsDir():
+ return nil, errors.New("unpacked charts cannot be verified")
+ case !isTar(path):
+ return nil, errors.New("chart must be a tgz file")
+ }
+
+ if _, err := os.Stat(provfile); err != nil {
+ return nil, fmt.Errorf("could not load provenance file %s: %w", provfile, err)
+ }
+
+ sig, err := provenance.NewFromKeyring(keyring, "")
+ if err != nil {
+ return nil, fmt.Errorf("failed to load keyring: %w", err)
+ }
+
+ // Read archive and provenance files
+ archiveData, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read chart archive: %w", err)
+ }
+ provData, err := os.ReadFile(provfile)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read provenance file: %w", err)
+ }
+
+ return sig.Verify(archiveData, provData, filepath.Base(path))
+}
+
+// isTar tests whether the given file is a tar file.
+//
+// Currently, this simply checks extension, since a subsequent function will
+// untar the file and validate its binary format.
+func isTar(filename string) bool {
+ return strings.EqualFold(filepath.Ext(filename), ".tgz")
+}
+
+func pickChartRepositoryConfigByName(name string, cfgs []*repo.Entry) (*repo.Entry, error) {
+ for _, rc := range cfgs {
+ if rc.Name == name {
+ if rc.URL == "" {
+ return nil, fmt.Errorf("no URL found for repository %s", name)
+ }
+ return rc, nil
+ }
+ }
+ return nil, fmt.Errorf("repo %s not found", name)
+}
+
+// scanReposForURL scans all repos to find which repo contains the given URL.
+//
+// This will attempt to find the given URL in all of the known repositories files.
+//
+// If the URL is found, this will return the repo entry that contained that URL.
+//
+// If all of the repos are checked, but the URL is not found, an ErrNoOwnerRepo
+// error is returned.
+//
+// Other errors may be returned when repositories cannot be loaded or searched.
+//
+// Technically, the fact that a URL is not found in a repo is not a failure indication.
+// Charts are not required to be included in an index before they are valid. So
+// be mindful of this case.
+//
+// The same URL can technically exist in two or more repositories. This algorithm
+// will return the first one it finds. Order is determined by the order of repositories
+// in the repositories.yaml file.
+func (c *ChartDownloader) scanReposForURL(u string, rf *repo.File) (*repo.Entry, error) {
+ // FIXME: This is far from optimal. Larger installations and index files will
+ // incur a performance hit for this type of scanning.
+ for _, rc := range rf.Repositories {
+ r, err := repo.NewChartRepository(rc, c.Getters)
+ if err != nil {
+ return nil, err
+ }
+
+ idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name))
+ i, err := repo.LoadIndexFile(idxFile)
+ if err != nil {
+ return nil, fmt.Errorf("no cached repo found. (try 'helm repo update'): %w", err)
+ }
+
+ for _, entry := range i.Entries {
+ for _, ver := range entry {
+ for _, dl := range ver.URLs {
+ if urlutil.Equal(u, dl) {
+ return rc, nil
+ }
+ }
+ }
+ }
+ }
+ // This means that there is no repo file for the given URL.
+ return nil, ErrNoOwnerRepo
+}
+
+func loadRepoConfig(file string) (*repo.File, error) {
+ r, err := repo.LoadFile(file)
+ if err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return nil, err
+ }
+ return r, nil
+}
diff --git a/helm/pkg/downloader/chart_downloader_test.go b/helm/pkg/downloader/chart_downloader_test.go
new file mode 100644
index 000000000..4349ecef9
--- /dev/null
+++ b/helm/pkg/downloader/chart_downloader_test.go
@@ -0,0 +1,487 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package downloader
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/registry"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
+)
+
+const (
+ repoConfig = "testdata/repositories.yaml"
+ repoCache = "testdata/repository"
+)
+
+func TestResolveChartRef(t *testing.T) {
+ tests := []struct {
+ name, ref, expect, version string
+ fail bool
+ }{
+ {name: "full URL", ref: "http://example.com/foo-1.2.3.tgz", expect: "http://example.com/foo-1.2.3.tgz"},
+ {name: "full URL, HTTPS", ref: "https://example.com/foo-1.2.3.tgz", expect: "https://example.com/foo-1.2.3.tgz"},
+ {name: "full URL, with authentication", ref: "http://username:password@example.com/foo-1.2.3.tgz", expect: "http://username:password@example.com/foo-1.2.3.tgz"},
+ {name: "reference, testing repo", ref: "testing/alpine", expect: "http://example.com/alpine-1.2.3.tgz"},
+ {name: "reference, version, testing repo", ref: "testing/alpine", version: "0.2.0", expect: "http://example.com/alpine-0.2.0.tgz"},
+ {name: "reference, version, malformed repo", ref: "malformed/alpine", version: "1.2.3", expect: "http://dl.example.com/alpine-1.2.3.tgz"},
+ {name: "reference, querystring repo", ref: "testing-querystring/alpine", expect: "http://example.com/alpine-1.2.3.tgz?key=value"},
+ {name: "reference, testing-relative repo", ref: "testing-relative/foo", expect: "http://example.com/helm/charts/foo-1.2.3.tgz"},
+ {name: "reference, testing-relative repo", ref: "testing-relative/bar", expect: "http://example.com/helm/bar-1.2.3.tgz"},
+ {name: "reference, testing-relative repo", ref: "testing-relative/baz", expect: "http://example.com/path/to/baz-1.2.3.tgz"},
+ {name: "reference, testing-relative-trailing-slash repo", ref: "testing-relative-trailing-slash/foo", expect: "http://example.com/helm/charts/foo-1.2.3.tgz"},
+ {name: "reference, testing-relative-trailing-slash repo", ref: "testing-relative-trailing-slash/bar", expect: "http://example.com/helm/bar-1.2.3.tgz"},
+ {name: "encoded URL", ref: "encoded-url/foobar", expect: "http://example.com/with%2Fslash/charts/foobar-4.2.1.tgz"},
+ {name: "full URL, HTTPS, irrelevant version", ref: "https://example.com/foo-1.2.3.tgz", version: "0.1.0", expect: "https://example.com/foo-1.2.3.tgz", fail: true},
+ {name: "full URL, file", ref: "file:///foo-1.2.3.tgz", fail: true},
+ {name: "invalid", ref: "invalid-1.2.3", fail: true},
+ {name: "not found", ref: "nosuchthing/invalid-1.2.3", fail: true},
+ {name: "ref with tag", ref: "oci://example.com/helm-charts/nginx:15.4.2", expect: "oci://example.com/helm-charts/nginx:15.4.2"},
+ {name: "no repository", ref: "oci://", fail: true},
+ {name: "oci ref", ref: "oci://example.com/helm-charts/nginx", version: "15.4.2", expect: "oci://example.com/helm-charts/nginx:15.4.2"},
+ {name: "oci ref with sha256 and version mismatch", ref: "oci://example.com/install/by/sha:0.1.1@sha256:d234555386402a5867ef0169fefe5486858b6d8d209eaf32fd26d29b16807fd6", version: "0.1.2", fail: true},
+ }
+
+ // Create a mock registry client for OCI references
+ registryClient, err := registry.NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c := ChartDownloader{
+ Out: os.Stderr,
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ RegistryClient: registryClient,
+ Getters: getter.All(&cli.EnvSettings{
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ }),
+ }
+
+ for _, tt := range tests {
+ _, u, err := c.ResolveChartVersion(tt.ref, tt.version)
+ if err != nil {
+ if tt.fail {
+ continue
+ }
+ t.Errorf("%s: failed with error %q", tt.name, err)
+ continue
+ }
+ if got := u.String(); got != tt.expect {
+ t.Errorf("%s: expected %s, got %s", tt.name, tt.expect, got)
+ }
+ }
+}
+
+func TestResolveChartOpts(t *testing.T) {
+ tests := []struct {
+ name, ref, version string
+ expect []getter.Option
+ }{
+ {
+ name: "repo with CA-file",
+ ref: "testing-ca-file/foo",
+ expect: []getter.Option{
+ getter.WithURL("https://example.com/foo-1.2.3.tgz"),
+ getter.WithTLSClientConfig("cert", "key", "ca"),
+ },
+ },
+ }
+
+ c := ChartDownloader{
+ Out: os.Stderr,
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ Getters: getter.All(&cli.EnvSettings{
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ }),
+ }
+
+ // snapshot options
+ snapshotOpts := c.Options
+
+ for _, tt := range tests {
+ // reset chart downloader options for each test case
+ c.Options = snapshotOpts
+
+ expect, err := getter.NewHTTPGetter(tt.expect...)
+ if err != nil {
+ t.Errorf("%s: failed to setup http client: %s", tt.name, err)
+ continue
+ }
+
+ _, u, err := c.ResolveChartVersion(tt.ref, tt.version)
+ if err != nil {
+ t.Errorf("%s: failed with error %s", tt.name, err)
+ continue
+ }
+
+ got, err := getter.NewHTTPGetter(
+ append(
+ c.Options,
+ getter.WithURL(u.String()),
+ )...,
+ )
+ if err != nil {
+ t.Errorf("%s: failed to create http client: %s", tt.name, err)
+ continue
+ }
+
+ if *(got.(*getter.HTTPGetter)) != *(expect.(*getter.HTTPGetter)) {
+ t.Errorf("%s: expected %s, got %s", tt.name, expect, got)
+ }
+ }
+}
+
+func TestVerifyChart(t *testing.T) {
+ v, err := VerifyChart("testdata/signtest-0.1.0.tgz", "testdata/signtest-0.1.0.tgz.prov", "testdata/helm-test-key.pub")
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The verification is tested at length in the provenance package. Here,
+ // we just want a quick sanity check that the v is not empty.
+ if len(v.FileHash) == 0 {
+ t.Error("Digest missing")
+ }
+}
+
+func TestIsTar(t *testing.T) {
+ tests := map[string]bool{
+ "foo.tgz": true,
+ "foo/bar/baz.tgz": true,
+ "foo-1.2.3.4.5.tgz": true,
+ "foo.tar.gz": false, // for our purposes
+ "foo.tgz.1": false,
+ "footgz": false,
+ }
+
+ for src, expect := range tests {
+ if isTar(src) != expect {
+ t.Errorf("%q should be %t", src, expect)
+ }
+ }
+}
+
+func TestDownloadTo(t *testing.T) {
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/*.tgz*"),
+ repotest.WithMiddleware(repotest.BasicAuthMiddleware(t)),
+ )
+ defer srv.Stop()
+ if err := srv.CreateIndex(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ contentCache := t.TempDir()
+
+ c := ChartDownloader{
+ Out: os.Stderr,
+ Verify: VerifyAlways,
+ Keyring: "testdata/helm-test-key.pub",
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ ContentCache: contentCache,
+ Getters: getter.All(&cli.EnvSettings{
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ ContentCache: contentCache,
+ }),
+ Options: []getter.Option{
+ getter.WithBasicAuth("username", "password"),
+ getter.WithPassCredentialsAll(false),
+ },
+ }
+ cname := "/signtest-0.1.0.tgz"
+ dest := srv.Root()
+ where, v, err := c.DownloadTo(srv.URL()+cname, "", dest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if expect := filepath.Join(dest, cname); where != expect {
+ t.Errorf("Expected download to %s, got %s", expect, where)
+ }
+
+ if v.FileHash == "" {
+ t.Error("File hash was empty, but verification is required.")
+ }
+
+ if _, err := os.Stat(filepath.Join(dest, cname)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestDownloadTo_TLS(t *testing.T) {
+ // Set up mock server w/ tls enabled
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/*.tgz*"),
+ repotest.WithTLSConfig(repotest.MakeTestTLSConfig(t, "../../testdata")),
+ )
+ defer srv.Stop()
+ if err := srv.CreateIndex(); err != nil {
+ t.Fatal(err)
+ }
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ repoConfig := filepath.Join(srv.Root(), "repositories.yaml")
+ repoCache := srv.Root()
+ contentCache := t.TempDir()
+
+ c := ChartDownloader{
+ Out: os.Stderr,
+ Verify: VerifyAlways,
+ Keyring: "testdata/helm-test-key.pub",
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ ContentCache: contentCache,
+ Getters: getter.All(&cli.EnvSettings{
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ ContentCache: contentCache,
+ }),
+ Options: []getter.Option{
+ getter.WithTLSClientConfig(
+ "",
+ "",
+ filepath.Join("../../testdata/rootca.crt"),
+ ),
+ },
+ }
+ cname := "test/signtest"
+ dest := srv.Root()
+ where, v, err := c.DownloadTo(cname, "", dest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ target := filepath.Join(dest, "signtest-0.1.0.tgz")
+ if expect := target; where != expect {
+ t.Errorf("Expected download to %s, got %s", expect, where)
+ }
+
+ if v.FileHash == "" {
+ t.Error("File hash was empty, but verification is required.")
+ }
+
+ if _, err := os.Stat(target); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestDownloadTo_VerifyLater(t *testing.T) {
+ ensure.HelmHome(t)
+
+ dest := t.TempDir()
+
+ // Set up a fake repo
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/*.tgz*"),
+ )
+ defer srv.Stop()
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+ contentCache := t.TempDir()
+
+ c := ChartDownloader{
+ Out: os.Stderr,
+ Verify: VerifyLater,
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ ContentCache: contentCache,
+ Getters: getter.All(&cli.EnvSettings{
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ ContentCache: contentCache,
+ }),
+ }
+ cname := "/signtest-0.1.0.tgz"
+ where, _, err := c.DownloadTo(srv.URL()+cname, "", dest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if expect := filepath.Join(dest, cname); where != expect {
+ t.Errorf("Expected download to %s, got %s", expect, where)
+ }
+
+ if _, err := os.Stat(filepath.Join(dest, cname)); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := os.Stat(filepath.Join(dest, cname+".prov")); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestScanReposForURL(t *testing.T) {
+ c := ChartDownloader{
+ Out: os.Stderr,
+ Verify: VerifyLater,
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ Getters: getter.All(&cli.EnvSettings{
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ }),
+ }
+
+ u := "http://example.com/alpine-0.2.0.tgz"
+ rf, err := repo.LoadFile(repoConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ entry, err := c.scanReposForURL(u, rf)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if entry.Name != "testing" {
+ t.Errorf("Unexpected repo %q for URL %q", entry.Name, u)
+ }
+
+ // A lookup failure should produce an ErrNoOwnerRepo
+ u = "https://no.such.repo/foo/bar-1.23.4.tgz"
+ if _, err = c.scanReposForURL(u, rf); err != ErrNoOwnerRepo {
+ t.Fatalf("expected ErrNoOwnerRepo, got %v", err)
+ }
+}
+
+func TestDownloadToCache(t *testing.T) {
+ srv := repotest.NewTempServer(t,
+ repotest.WithChartSourceGlob("testdata/*.tgz*"),
+ )
+ defer srv.Stop()
+ if err := srv.CreateIndex(); err != nil {
+ t.Fatal(err)
+ }
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+
+ // The repo file needs to point to our server.
+ repoFile := filepath.Join(srv.Root(), "repositories.yaml")
+ repoCache := srv.Root()
+ contentCache := t.TempDir()
+
+ c := ChartDownloader{
+ Out: os.Stderr,
+ Verify: VerifyNever,
+ RepositoryConfig: repoFile,
+ RepositoryCache: repoCache,
+ Getters: getter.All(&cli.EnvSettings{
+ RepositoryConfig: repoFile,
+ RepositoryCache: repoCache,
+ ContentCache: contentCache,
+ }),
+ Cache: &DiskCache{Root: contentCache},
+ }
+
+ // Case 1: Chart not in cache, download it.
+ t.Run("download and cache chart", func(t *testing.T) {
+ // Clear cache for this test
+ os.RemoveAll(contentCache)
+ os.MkdirAll(contentCache, 0755)
+ c.Cache = &DiskCache{Root: contentCache}
+
+ pth, v, err := c.DownloadToCache("test/signtest", "0.1.0")
+ require.NoError(t, err)
+ require.NotNil(t, v)
+
+ // Check that the file exists at the returned path
+ _, err = os.Stat(pth)
+ require.NoError(t, err, "chart should exist at returned path")
+
+ // Check that it's in the cache
+ digest, _, err := c.ResolveChartVersion("test/signtest", "0.1.0")
+ require.NoError(t, err)
+ digestBytes, err := hex.DecodeString(digest)
+ require.NoError(t, err)
+ var digestArray [sha256.Size]byte
+ copy(digestArray[:], digestBytes)
+
+ cachePath, err := c.Cache.Get(digestArray, CacheChart)
+ require.NoError(t, err, "chart should now be in cache")
+ require.Equal(t, pth, cachePath)
+ })
+
+ // Case 2: Chart is in cache, get from cache.
+ t.Run("get chart from cache", func(t *testing.T) {
+ // The cache should be populated from the previous test.
+ // To prove it's coming from cache, we can stop the server.
+ // But repotest doesn't support restarting.
+ // Let's just call it again and assume it works if it's fast and doesn't error.
+ pth, v, err := c.DownloadToCache("test/signtest", "0.1.0")
+ require.NoError(t, err)
+ require.NotNil(t, v)
+
+ _, err = os.Stat(pth)
+ require.NoError(t, err, "chart should exist at returned path")
+ })
+
+ // Case 3: Download with verification
+ t.Run("download and verify", func(t *testing.T) {
+ // Clear cache
+ os.RemoveAll(contentCache)
+ os.MkdirAll(contentCache, 0755)
+ c.Cache = &DiskCache{Root: contentCache}
+ c.Verify = VerifyAlways
+ c.Keyring = "testdata/helm-test-key.pub"
+
+ _, v, err := c.DownloadToCache("test/signtest", "0.1.0")
+ require.NoError(t, err)
+ require.NotNil(t, v)
+ require.NotEmpty(t, v.FileHash, "verification should have a file hash")
+
+ // Check that both chart and prov are in cache
+ digest, _, err := c.ResolveChartVersion("test/signtest", "0.1.0")
+ require.NoError(t, err)
+ digestBytes, err := hex.DecodeString(digest)
+ require.NoError(t, err)
+ var digestArray [sha256.Size]byte
+ copy(digestArray[:], digestBytes)
+
+ _, err = c.Cache.Get(digestArray, CacheChart)
+ require.NoError(t, err, "chart should be in cache")
+ _, err = c.Cache.Get(digestArray, CacheProv)
+ require.NoError(t, err, "provenance file should be in cache")
+
+ // Reset for other tests
+ c.Verify = VerifyNever
+ c.Keyring = ""
+ })
+}
diff --git a/helm/pkg/downloader/doc.go b/helm/pkg/downloader/doc.go
new file mode 100644
index 000000000..848468090
--- /dev/null
+++ b/helm/pkg/downloader/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package downloader provides a library for downloading charts.
+
+This package contains various tools for downloading charts from repository
+servers, and then storing them in Helm-specific directory structures. This
+library contains many functions that depend on a specific
+filesystem layout.
+*/
+package downloader
diff --git a/helm/pkg/downloader/manager.go b/helm/pkg/downloader/manager.go
new file mode 100644
index 000000000..6043fbaaa
--- /dev/null
+++ b/helm/pkg/downloader/manager.go
@@ -0,0 +1,922 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package downloader
+
+import (
+ "crypto"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ stdfs "io/fs"
+ "log"
+ "net/url"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "sync"
+
+ "github.com/Masterminds/semver/v3"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/internal/resolver"
+ "helm.sh/helm/v4/internal/third_party/dep/fs"
+ "helm.sh/helm/v4/internal/urlutil"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+ "helm.sh/helm/v4/pkg/registry"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+// ErrRepoNotFound indicates that chart repositories can't be found in local repo cache.
+// The value of Repos is missing repos.
+type ErrRepoNotFound struct {
+ Repos []string
+}
+
+// Error implements the error interface.
+func (e ErrRepoNotFound) Error() string {
+ return fmt.Sprintf("no repository definition for %s", strings.Join(e.Repos, ", "))
+}
+
+// Manager handles the lifecycle of fetching, resolving, and storing dependencies.
+type Manager struct {
+ // Out is used to print warnings and notifications.
+ Out io.Writer
+ // ChartPath is the path to the unpacked base chart upon which this operates.
+ ChartPath string
+ // Verification indicates whether the chart should be verified.
+ Verify VerificationStrategy
+ // Debug is the global "--debug" flag
+ Debug bool
+ // Keyring is the key ring file.
+ Keyring string
+ // SkipUpdate indicates that the repository should not be updated first.
+ SkipUpdate bool
+ // Getter collection for the operation
+ Getters []getter.Provider
+ RegistryClient *registry.Client
+ RepositoryConfig string
+ RepositoryCache string
+
+ // ContentCache is a location where a cache of charts can be stored
+ ContentCache string
+}
+
+// Build rebuilds a local charts directory from a lockfile.
+//
+// If the lockfile is not present, this will run a Manager.Update()
+//
+// If SkipUpdate is set, this will not update the repository.
+func (m *Manager) Build() error {
+ c, err := m.loadChartDir()
+ if err != nil {
+ return err
+ }
+
+ // If a lock file is found, run a build from that. Otherwise, just do
+ // an update.
+ lock := c.Lock
+ if lock == nil {
+ return m.Update()
+ }
+
+ // Check that all of the repos we're dependent on actually exist.
+ req := c.Metadata.Dependencies
+
+ // If using apiVersion v1, calculate the hash before resolve repo names
+ // because resolveRepoNames will change req if req uses repo alias
+ // and Helm 2 calculate the digest from the original req
+ // Fix for: https://github.com/helm/helm/issues/7619
+ var v2Sum string
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ v2Sum, err = resolver.HashV2Req(req)
+ if err != nil {
+ return errors.New("the lock file (requirements.lock) is out of sync with the dependencies file (requirements.yaml). Please update the dependencies")
+ }
+ }
+
+ if _, err := m.resolveRepoNames(req); err != nil {
+ return err
+ }
+
+ if sum, err := resolver.HashReq(req, lock.Dependencies); err != nil || sum != lock.Digest {
+ // If lock digest differs and chart is apiVersion v1, it maybe because the lock was built
+ // with Helm 2 and therefore should be checked with Helm v2 hash
+ // Fix for: https://github.com/helm/helm/issues/7233
+ if c.Metadata.APIVersion == chart.APIVersionV1 {
+ log.Println("warning: a valid Helm v3 hash was not found. Checking against Helm v2 hash...")
+ if v2Sum != lock.Digest {
+ return errors.New("the lock file (requirements.lock) is out of sync with the dependencies file (requirements.yaml). Please update the dependencies")
+ }
+ } else {
+ return errors.New("the lock file (Chart.lock) is out of sync with the dependencies file (Chart.yaml). Please update the dependencies")
+ }
+ }
+
+ // Check that all of the repos we're dependent on actually exist.
+ if err := m.hasAllRepos(lock.Dependencies); err != nil {
+ return err
+ }
+
+ if !m.SkipUpdate {
+ // For each repo in the file, update the cached copy of that repo
+ if err := m.UpdateRepositories(); err != nil {
+ return err
+ }
+ }
+
+ // Now we need to fetch every package here into charts/
+ return m.downloadAll(lock.Dependencies)
+}
+
+// Update updates a local charts directory.
+//
+// It first reads the Chart.yaml file, and then attempts to
+// negotiate versions based on that. It will download the versions
+// from remote chart repositories unless SkipUpdate is true.
+func (m *Manager) Update() error {
+ c, err := m.loadChartDir()
+ if err != nil {
+ return err
+ }
+
+ // If no dependencies are found, we consider this a successful
+ // completion.
+ req := c.Metadata.Dependencies
+ if req == nil {
+ return nil
+ }
+
+ // Get the names of the repositories the dependencies need that Helm is
+ // configured to know about.
+ repoNames, err := m.resolveRepoNames(req)
+ if err != nil {
+ return err
+ }
+
+ // For the repositories Helm is not configured to know about, ensure Helm
+ // has some information about them and, when possible, the index files
+ // locally.
+ // TODO(mattfarina): Repositories should be explicitly added by end users
+ // rather than automatic. In Helm v4 require users to add repositories. They
+ // should have to add them in order to make sure they are aware of the
+ // repositories and opt-in to any locations, for security.
+ repoNames, err = m.ensureMissingRepos(repoNames, req)
+ if err != nil {
+ return err
+ }
+
+ // For each of the repositories Helm is configured to know about, update
+ // the index information locally.
+ if !m.SkipUpdate {
+ if err := m.UpdateRepositories(); err != nil {
+ return err
+ }
+ }
+
+ // Now we need to find out which version of a chart best satisfies the
+ // dependencies in the Chart.yaml
+ lock, err := m.resolve(req, repoNames)
+ if err != nil {
+ return err
+ }
+
+ // Now we need to fetch every package here into charts/
+ if err := m.downloadAll(lock.Dependencies); err != nil {
+ return err
+ }
+
+ // downloadAll might overwrite dependency version, recalculate lock digest
+ newDigest, err := resolver.HashReq(req, lock.Dependencies)
+ if err != nil {
+ return err
+ }
+ lock.Digest = newDigest
+
+ // If the lock file hasn't changed, don't write a new one.
+ oldLock := c.Lock
+ if oldLock != nil && oldLock.Digest == lock.Digest {
+ return nil
+ }
+
+ // Finally, we need to write the lockfile.
+ return writeLock(m.ChartPath, lock, c.Metadata.APIVersion == chart.APIVersionV1)
+}
+
+func (m *Manager) loadChartDir() (*chart.Chart, error) {
+ if fi, err := os.Stat(m.ChartPath); err != nil {
+ return nil, fmt.Errorf("could not find %s: %w", m.ChartPath, err)
+ } else if !fi.IsDir() {
+ return nil, errors.New("only unpacked charts can be updated")
+ }
+ return loader.LoadDir(m.ChartPath)
+}
+
+// resolve takes a list of dependencies and translates them into an exact version to download.
+//
+// This returns a lock file, which has all of the dependencies normalized to a specific version.
+func (m *Manager) resolve(req []*chart.Dependency, repoNames map[string]string) (*chart.Lock, error) {
+ res := resolver.New(m.ChartPath, m.RepositoryCache, m.RegistryClient)
+ return res.Resolve(req, repoNames)
+}
+
+// downloadAll takes a list of dependencies and downloads them into charts/
+//
+// It will delete versions of the chart that exist on disk and might cause
+// a conflict.
+func (m *Manager) downloadAll(deps []*chart.Dependency) error {
+ repos, err := m.loadChartRepositories()
+ if err != nil {
+ return err
+ }
+
+ destPath := filepath.Join(m.ChartPath, "charts")
+ tmpPath := filepath.Join(m.ChartPath, fmt.Sprintf("tmpcharts-%d", os.Getpid()))
+
+ // Check if 'charts' directory is not actually a directory. If it does not exist, create it.
+ if fi, err := os.Stat(destPath); err == nil {
+ if !fi.IsDir() {
+ return fmt.Errorf("%q is not a directory", destPath)
+ }
+ } else if errors.Is(err, stdfs.ErrNotExist) {
+ if err := os.MkdirAll(destPath, 0755); err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("unable to retrieve file info for '%s': %v", destPath, err)
+ }
+
+ // Prepare tmpPath
+ if err := os.MkdirAll(tmpPath, 0755); err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpPath)
+
+ fmt.Fprintf(m.Out, "Saving %d charts\n", len(deps))
+ var saveError error
+ churls := make(map[string]struct{})
+ for _, dep := range deps {
+ // No repository means the chart is in charts directory
+ if dep.Repository == "" {
+ fmt.Fprintf(m.Out, "Dependency %s did not declare a repository. Assuming it exists in the charts directory\n", dep.Name)
+ // NOTE: we are only validating the local dependency conforms to the constraints. No copying to tmpPath is necessary.
+ chartPath := filepath.Join(destPath, dep.Name)
+ ch, err := loader.LoadDir(chartPath)
+ if err != nil {
+ return fmt.Errorf("unable to load chart '%s': %v", chartPath, err)
+ }
+
+ constraint, err := semver.NewConstraint(dep.Version)
+ if err != nil {
+ return fmt.Errorf("dependency %s has an invalid version/constraint format: %s", dep.Name, err)
+ }
+
+ v, err := semver.NewVersion(ch.Metadata.Version)
+ if err != nil {
+ return fmt.Errorf("invalid version %s for dependency %s: %s", dep.Version, dep.Name, err)
+ }
+
+ if !constraint.Check(v) {
+ saveError = fmt.Errorf("dependency %s at version %s does not satisfy the constraint %s", dep.Name, ch.Metadata.Version, dep.Version)
+ break
+ }
+ continue
+ }
+ if strings.HasPrefix(dep.Repository, "file://") {
+ if m.Debug {
+ fmt.Fprintf(m.Out, "Archiving %s from repo %s\n", dep.Name, dep.Repository)
+ }
+ ver, err := tarFromLocalDir(m.ChartPath, dep.Name, dep.Repository, dep.Version, tmpPath)
+ if err != nil {
+ saveError = err
+ break
+ }
+ dep.Version = ver
+ continue
+ }
+
+ // Any failure to resolve/download a chart should fail:
+ // https://github.com/helm/helm/issues/1439
+ churl, username, password, insecureSkipTLSVerify, passCredentialsAll, caFile, certFile, keyFile, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos)
+ if err != nil {
+ saveError = fmt.Errorf("could not find %s: %w", churl, err)
+ break
+ }
+
+ if _, ok := churls[churl]; ok {
+ fmt.Fprintf(m.Out, "Already downloaded %s from repo %s\n", dep.Name, dep.Repository)
+ continue
+ }
+
+ fmt.Fprintf(m.Out, "Downloading %s from repo %s\n", dep.Name, dep.Repository)
+
+ dl := ChartDownloader{
+ Out: m.Out,
+ Verify: m.Verify,
+ Keyring: m.Keyring,
+ RepositoryConfig: m.RepositoryConfig,
+ RepositoryCache: m.RepositoryCache,
+ ContentCache: m.ContentCache,
+ RegistryClient: m.RegistryClient,
+ Getters: m.Getters,
+ Options: []getter.Option{
+ getter.WithBasicAuth(username, password),
+ getter.WithPassCredentialsAll(passCredentialsAll),
+ getter.WithInsecureSkipVerifyTLS(insecureSkipTLSVerify),
+ getter.WithTLSClientConfig(certFile, keyFile, caFile),
+ },
+ }
+
+ version := ""
+ if registry.IsOCI(churl) {
+ churl, version, err = parseOCIRef(churl)
+ if err != nil {
+ return fmt.Errorf("could not parse OCI reference: %w", err)
+ }
+ dl.Options = append(dl.Options,
+ getter.WithRegistryClient(m.RegistryClient),
+ getter.WithTagName(version))
+ }
+
+ if _, _, err = dl.DownloadTo(churl, version, tmpPath); err != nil {
+ saveError = fmt.Errorf("could not download %s: %w", churl, err)
+ break
+ }
+
+ churls[churl] = struct{}{}
+ }
+
+ // TODO: this should probably be refactored to be a []error, so we can capture and provide more information rather than "last error wins".
+ if saveError == nil {
+ // now we can move all downloaded charts to destPath and delete outdated dependencies
+ if err := m.safeMoveDeps(deps, tmpPath, destPath); err != nil {
+ return err
+ }
+ } else {
+ fmt.Fprintln(m.Out, "Save error occurred: ", saveError)
+ return saveError
+ }
+ return nil
+}
+
+func parseOCIRef(chartRef string) (string, string, error) {
+ refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`)
+ caps := refTagRegexp.FindStringSubmatch(chartRef)
+ if len(caps) != 4 {
+ return "", "", fmt.Errorf("improperly formatted oci chart reference: %s", chartRef)
+ }
+ chartRef = caps[1]
+ tag := caps[3]
+
+ return chartRef, tag, nil
+}
+
+// safeMoveDeps moves all dependencies in the source and moves them into dest.
+//
+// It does this by first matching the file name to an expected pattern, then loading
+// the file to verify that it is a chart.
+//
+// Any charts in dest that do not exist in source are removed (barring local dependencies)
+//
+// Because it requires tar file introspection, it is more intensive than a basic move.
+//
+// This will only return errors that should stop processing entirely. Other errors
+// will emit log messages or be ignored.
+func (m *Manager) safeMoveDeps(deps []*chart.Dependency, source, dest string) error {
+ existsInSourceDirectory := map[string]bool{}
+ isLocalDependency := map[string]bool{}
+ sourceFiles, err := os.ReadDir(source)
+ if err != nil {
+ return err
+ }
+ // attempt to read destFiles; fail fast if we can't
+ destFiles, err := os.ReadDir(dest)
+ if err != nil {
+ return err
+ }
+
+ for _, dep := range deps {
+ if dep.Repository == "" {
+ isLocalDependency[dep.Name] = true
+ }
+ }
+
+ for _, file := range sourceFiles {
+ if file.IsDir() {
+ continue
+ }
+ filename := file.Name()
+ sourcefile := filepath.Join(source, filename)
+ destfile := filepath.Join(dest, filename)
+ existsInSourceDirectory[filename] = true
+ if _, err := loader.LoadFile(sourcefile); err != nil {
+ fmt.Fprintf(m.Out, "Could not verify %s for moving: %s (Skipping)", sourcefile, err)
+ continue
+ }
+ // NOTE: no need to delete the dest; os.Rename replaces it.
+ if err := fs.RenameWithFallback(sourcefile, destfile); err != nil {
+ fmt.Fprintf(m.Out, "Unable to move %s to charts dir %s (Skipping)", sourcefile, err)
+ continue
+ }
+ }
+
+ fmt.Fprintln(m.Out, "Deleting outdated charts")
+ // find all files that exist in dest that do not exist in source; delete them (outdated dependencies)
+ for _, file := range destFiles {
+ if !file.IsDir() && !existsInSourceDirectory[file.Name()] {
+ fname := filepath.Join(dest, file.Name())
+ ch, err := loader.LoadFile(fname)
+ if err != nil {
+ fmt.Fprintf(m.Out, "Could not verify %s for deletion: %s (Skipping)\n", fname, err)
+ continue
+ }
+ // local dependency - skip
+ if isLocalDependency[ch.Name()] {
+ continue
+ }
+ if err := os.Remove(fname); err != nil {
+ fmt.Fprintf(m.Out, "Could not delete %s: %s (Skipping)", fname, err)
+ continue
+ }
+ }
+ }
+
+ return nil
+}
+
+// hasAllRepos ensures that all of the referenced deps are in the local repo cache.
+func (m *Manager) hasAllRepos(deps []*chart.Dependency) error {
+ rf, err := loadRepoConfig(m.RepositoryConfig)
+ if err != nil {
+ return err
+ }
+ repos := rf.Repositories
+
+ // Verify that all repositories referenced in the deps are actually known
+ // by Helm.
+ missing := []string{}
+Loop:
+ for _, dd := range deps {
+ // If repo is from local path or OCI, continue
+ if strings.HasPrefix(dd.Repository, "file://") || registry.IsOCI(dd.Repository) {
+ continue
+ }
+
+ if dd.Repository == "" {
+ continue
+ }
+ for _, repo := range repos {
+ if urlutil.Equal(repo.URL, strings.TrimSuffix(dd.Repository, "/")) {
+ continue Loop
+ }
+ }
+ missing = append(missing, dd.Repository)
+ }
+ if len(missing) > 0 {
+ return ErrRepoNotFound{missing}
+ }
+ return nil
+}
+
+// ensureMissingRepos attempts to ensure the repository information for repos
+// not managed by Helm is present. This takes in the repoNames Helm is configured
+// to work with along with the chart dependencies. It will find the deps not
+// in a known repo and attempt to ensure the data is present for steps like
+// version resolution.
+func (m *Manager) ensureMissingRepos(repoNames map[string]string, deps []*chart.Dependency) (map[string]string, error) {
+
+ var ru []*repo.Entry
+
+ for _, dd := range deps {
+
+ // If the chart is in the local charts directory no repository needs
+ // to be specified.
+ if dd.Repository == "" {
+ continue
+ }
+
+ // When the repoName for a dependency is known we can skip ensuring
+ if _, ok := repoNames[dd.Name]; ok {
+ continue
+ }
+
+ // The generated repository name, which will result in an index being
+ // locally cached, has a name pattern of "helm-manager-" followed by a
+ // sha256 of the repo name. This assumes end users will never create
+ // repositories with these names pointing to other repositories. Using
+ // this method of naming allows the existing repository pulling and
+ // resolution code to do most of the work.
+ rn, err := key(dd.Repository)
+ if err != nil {
+ return repoNames, err
+ }
+ rn = managerKeyPrefix + rn
+
+ repoNames[dd.Name] = rn
+
+ // Assuming the repository is generally available. For Helm managed
+ // access controls the repository needs to be added through the user
+ // managed system. This path will work for public charts, like those
+ // supplied by Bitnami, but not for protected charts, like corp ones
+ // behind a username and pass.
+ ri := &repo.Entry{
+ Name: rn,
+ URL: dd.Repository,
+ }
+ ru = append(ru, ri)
+ }
+
+ // Calls to UpdateRepositories (a public function) will only update
+ // repositories configured by the user. Here we update repos found in
+ // the dependencies that are not known to the user if update skipping
+ // is not configured.
+ if !m.SkipUpdate && len(ru) > 0 {
+ fmt.Fprintln(m.Out, "Getting updates for unmanaged Helm repositories...")
+ if err := m.parallelRepoUpdate(ru); err != nil {
+ return repoNames, err
+ }
+ }
+
+ return repoNames, nil
+}
+
+// resolveRepoNames returns the repo names of the referenced deps which can be used to fetch the cached index file
+// and replaces aliased repository URLs into resolved URLs in dependencies.
+func (m *Manager) resolveRepoNames(deps []*chart.Dependency) (map[string]string, error) {
+ rf, err := loadRepoConfig(m.RepositoryConfig)
+ if err != nil {
+ if errors.Is(err, stdfs.ErrNotExist) {
+ return make(map[string]string), nil
+ }
+ return nil, err
+ }
+ repos := rf.Repositories
+
+ reposMap := make(map[string]string)
+
+ // Verify that all repositories referenced in the deps are actually known
+ // by Helm.
+ missing := []string{}
+ for _, dd := range deps {
+ // Don't map the repository, we don't need to download chart from charts directory
+ if dd.Repository == "" {
+ continue
+ }
+ // if dep chart is from local path, verify the path is valid
+ if strings.HasPrefix(dd.Repository, "file://") {
+ if _, err := resolver.GetLocalPath(dd.Repository, m.ChartPath); err != nil {
+ return nil, err
+ }
+
+ if m.Debug {
+ fmt.Fprintf(m.Out, "Repository from local path: %s\n", dd.Repository)
+ }
+ reposMap[dd.Name] = dd.Repository
+ continue
+ }
+
+ if registry.IsOCI(dd.Repository) {
+ reposMap[dd.Name] = dd.Repository
+ continue
+ }
+
+ found := false
+
+ for _, repo := range repos {
+ if (strings.HasPrefix(dd.Repository, "@") && strings.TrimPrefix(dd.Repository, "@") == repo.Name) ||
+ (strings.HasPrefix(dd.Repository, "alias:") && strings.TrimPrefix(dd.Repository, "alias:") == repo.Name) {
+ found = true
+ dd.Repository = repo.URL
+ reposMap[dd.Name] = repo.Name
+ break
+ } else if urlutil.Equal(repo.URL, dd.Repository) {
+ found = true
+ reposMap[dd.Name] = repo.Name
+ break
+ }
+ }
+ if !found {
+ repository := dd.Repository
+ // Add if URL
+ _, err := url.ParseRequestURI(repository)
+ if err == nil {
+ reposMap[repository] = repository
+ continue
+ }
+ missing = append(missing, repository)
+ }
+ }
+ if len(missing) > 0 {
+ errorMessage := fmt.Sprintf("no repository definition for %s. Please add them via 'helm repo add'", strings.Join(missing, ", "))
+ // It is common for people to try to enter "stable" as a repository instead of the actual URL.
+ // For this case, let's give them a suggestion.
+ containsNonURL := false
+ for _, repo := range missing {
+ if !strings.Contains(repo, "//") && !strings.HasPrefix(repo, "@") && !strings.HasPrefix(repo, "alias:") {
+ containsNonURL = true
+ }
+ }
+ if containsNonURL {
+ errorMessage += `
+Note that repositories must be URLs or aliases. For example, to refer to the "example"
+repository, use "https://charts.example.com/" or "@example" instead of
+"example". Don't forget to add the repo, too ('helm repo add').`
+ }
+ return nil, errors.New(errorMessage)
+ }
+ return reposMap, nil
+}
+
+// UpdateRepositories updates all of the local repos to the latest.
+func (m *Manager) UpdateRepositories() error {
+ rf, err := loadRepoConfig(m.RepositoryConfig)
+ if err != nil {
+ return err
+ }
+ repos := rf.Repositories
+ if len(repos) > 0 {
+ fmt.Fprintln(m.Out, "Hang tight while we grab the latest from your chart repositories...")
+ // This prints warnings straight to out.
+ if err := m.parallelRepoUpdate(repos); err != nil {
+ return err
+ }
+ fmt.Fprintln(m.Out, "Update Complete. ⎈Happy Helming!⎈")
+ }
+ return nil
+}
+
+// Filter out duplicate repos by URL, including those with trailing slashes.
+func dedupeRepos(repos []*repo.Entry) []*repo.Entry {
+ seen := make(map[string]*repo.Entry)
+ for _, r := range repos {
+ // Normalize URL by removing trailing slashes.
+ seenURL := strings.TrimSuffix(r.URL, "/")
+ seen[seenURL] = r
+ }
+ var unique []*repo.Entry
+ for _, r := range seen {
+ unique = append(unique, r)
+ }
+ return unique
+}
+
+func (m *Manager) parallelRepoUpdate(repos []*repo.Entry) error {
+
+ var wg sync.WaitGroup
+
+ localRepos := dedupeRepos(repos)
+
+ for _, c := range localRepos {
+ r, err := repo.NewChartRepository(c, m.Getters)
+ if err != nil {
+ return err
+ }
+ r.CachePath = m.RepositoryCache
+ wg.Add(1)
+ go func(r *repo.ChartRepository) {
+ if _, err := r.DownloadIndexFile(); err != nil {
+ // For those dependencies that are not known to helm and using a
+ // generated key name we display the repo url.
+ if strings.HasPrefix(r.Config.Name, managerKeyPrefix) {
+ fmt.Fprintf(m.Out, "...Unable to get an update from the %q chart repository:\n\t%s\n", r.Config.URL, err)
+ } else {
+ fmt.Fprintf(m.Out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", r.Config.Name, r.Config.URL, err)
+ }
+ } else {
+ // For those dependencies that are not known to helm and using a
+ // generated key name we display the repo url.
+ if strings.HasPrefix(r.Config.Name, managerKeyPrefix) {
+ fmt.Fprintf(m.Out, "...Successfully got an update from the %q chart repository\n", r.Config.URL)
+ } else {
+ fmt.Fprintf(m.Out, "...Successfully got an update from the %q chart repository\n", r.Config.Name)
+ }
+ }
+ wg.Done()
+ }(r)
+ }
+ wg.Wait()
+
+ return nil
+}
+
+// findChartURL searches the cache of repo data for a chart that has the name and the repoURL specified.
+//
+// 'name' is the name of the chart. Version is an exact semver, or an empty string. If empty, the
+// newest version will be returned.
+//
+// repoURL is the repository to search
+//
+// If it finds a URL that is "relative", it will prepend the repoURL.
+func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, insecureSkipTLSVerify, passCredentialsAll bool, caFile, certFile, keyFile string, err error) {
+ if registry.IsOCI(repoURL) {
+ return fmt.Sprintf("%s/%s:%s", repoURL, name, version), "", "", false, false, "", "", "", nil
+ }
+
+ for _, cr := range repos {
+ if urlutil.Equal(repoURL, cr.Config.URL) {
+ var entry repo.ChartVersions
+ entry, err = findEntryByName(name, cr)
+ if err != nil {
+ // TODO: Where linting is skipped in this function we should
+ // refactor to remove naked returns while ensuring the same
+ // behavior
+ //nolint:nakedret
+ return
+ }
+ var ve *repo.ChartVersion
+ ve, err = findVersionedEntry(version, entry)
+ if err != nil {
+ //nolint:nakedret
+ return
+ }
+ url, err = repo.ResolveReferenceURL(repoURL, ve.URLs[0])
+ if err != nil {
+ //nolint:nakedret
+ return
+ }
+ username = cr.Config.Username
+ password = cr.Config.Password
+ passCredentialsAll = cr.Config.PassCredentialsAll
+ insecureSkipTLSVerify = cr.Config.InsecureSkipTLSVerify
+ caFile = cr.Config.CAFile
+ certFile = cr.Config.CertFile
+ keyFile = cr.Config.KeyFile
+ //nolint:nakedret
+ return
+ }
+ }
+ url, err = repo.FindChartInRepoURL(repoURL, name, m.Getters, repo.WithChartVersion(version), repo.WithClientTLS(certFile, keyFile, caFile))
+ if err == nil {
+ return url, username, password, false, false, "", "", "", err
+ }
+ err = fmt.Errorf("chart %s not found in %s: %w", name, repoURL, err)
+ return url, username, password, false, false, "", "", "", err
+}
+
+// findEntryByName finds an entry in the chart repository whose name matches the given name.
+//
+// It returns the ChartVersions for that entry.
+func findEntryByName(name string, cr *repo.ChartRepository) (repo.ChartVersions, error) {
+ for ename, entry := range cr.IndexFile.Entries {
+ if ename == name {
+ return entry, nil
+ }
+ }
+ return nil, errors.New("entry not found")
+}
+
+// findVersionedEntry takes a ChartVersions list and returns a single chart version that satisfies the version constraints.
+//
+// If version is empty, the first chart found is returned.
+func findVersionedEntry(version string, vers repo.ChartVersions) (*repo.ChartVersion, error) {
+ for _, verEntry := range vers {
+ if len(verEntry.URLs) == 0 {
+ // Not a legit entry.
+ continue
+ }
+
+ if version == "" || versionEquals(version, verEntry.Version) {
+ return verEntry, nil
+ }
+ }
+ return nil, errors.New("no matching version")
+}
+
+func versionEquals(v1, v2 string) bool {
+ sv1, err := semver.NewVersion(v1)
+ if err != nil {
+ // Fallback to string comparison.
+ return v1 == v2
+ }
+ sv2, err := semver.NewVersion(v2)
+ if err != nil {
+ return false
+ }
+ return sv1.Equal(sv2)
+}
+
+// loadChartRepositories reads the repositories.yaml, and then builds a map of
+// ChartRepositories.
+//
+// The key is the local name (which is only present in the repositories.yaml).
+func (m *Manager) loadChartRepositories() (map[string]*repo.ChartRepository, error) {
+ indices := map[string]*repo.ChartRepository{}
+
+ // Load repositories.yaml file
+ rf, err := loadRepoConfig(m.RepositoryConfig)
+ if err != nil {
+ return indices, fmt.Errorf("failed to load %s: %w", m.RepositoryConfig, err)
+ }
+
+ for _, re := range rf.Repositories {
+ lname := re.Name
+ idxFile := filepath.Join(m.RepositoryCache, helmpath.CacheIndexFile(lname))
+ index, err := repo.LoadIndexFile(idxFile)
+ if err != nil {
+ return indices, err
+ }
+
+ // TODO: use constructor
+ cr := &repo.ChartRepository{
+ Config: re,
+ IndexFile: index,
+ }
+ indices[lname] = cr
+ }
+ return indices, nil
+}
+
+// writeLock writes a lockfile to disk
+func writeLock(chartpath string, lock *chart.Lock, legacyLockfile bool) error {
+ data, err := yaml.Marshal(lock)
+ if err != nil {
+ return err
+ }
+ lockfileName := "Chart.lock"
+ if legacyLockfile {
+ lockfileName = "requirements.lock"
+ }
+ dest := filepath.Join(chartpath, lockfileName)
+
+ info, err := os.Lstat(dest)
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("error getting info for %q: %w", dest, err)
+ } else if err == nil {
+ if info.Mode()&os.ModeSymlink != 0 {
+ link, err := os.Readlink(dest)
+ if err != nil {
+ return fmt.Errorf("error reading symlink for %q: %w", dest, err)
+ }
+ return fmt.Errorf("the %s file is a symlink to %q", lockfileName, link)
+ }
+ }
+
+ return os.WriteFile(dest, data, 0644)
+}
+
+// archive a dep chart from local directory and save it into destPath
+func tarFromLocalDir(chartpath, name, repo, version, destPath string) (string, error) {
+ if !strings.HasPrefix(repo, "file://") {
+ return "", fmt.Errorf("wrong format: chart %s repository %s", name, repo)
+ }
+
+ origPath, err := resolver.GetLocalPath(repo, chartpath)
+ if err != nil {
+ return "", err
+ }
+
+ ch, err := loader.LoadDir(origPath)
+ if err != nil {
+ return "", err
+ }
+
+ constraint, err := semver.NewConstraint(version)
+ if err != nil {
+ return "", fmt.Errorf("dependency %s has an invalid version/constraint format: %w", name, err)
+ }
+
+ v, err := semver.NewVersion(ch.Metadata.Version)
+ if err != nil {
+ return "", err
+ }
+
+ if constraint.Check(v) {
+ _, err = chartutil.Save(ch, destPath)
+ return ch.Metadata.Version, err
+ }
+
+ return "", fmt.Errorf("can't get a valid version for dependency %s", name)
+}
+
+// The prefix to use for cache keys created by the manager for repo names
+const managerKeyPrefix = "helm-manager-"
+
+// key is used to turn a name, such as a repository url, into a filesystem
+// safe name that is unique for querying. To accomplish this a unique hash of
+// the string is used.
+func key(name string) (string, error) {
+ in := strings.NewReader(name)
+ hash := crypto.SHA256.New()
+ if _, err := io.Copy(hash, in); err != nil {
+ return "", nil
+ }
+ return hex.EncodeToString(hash.Sum(nil)), nil
+}
diff --git a/helm/pkg/downloader/manager_test.go b/helm/pkg/downloader/manager_test.go
new file mode 100644
index 000000000..9e27f183f
--- /dev/null
+++ b/helm/pkg/downloader/manager_test.go
@@ -0,0 +1,769 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package downloader
+
+import (
+ "bytes"
+ "errors"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/repo/v1"
+ "helm.sh/helm/v4/pkg/repo/v1/repotest"
+)
+
+func TestVersionEquals(t *testing.T) {
+ tests := []struct {
+ name, v1, v2 string
+ expect bool
+ }{
+ {name: "semver match", v1: "1.2.3-beta.11", v2: "1.2.3-beta.11", expect: true},
+ {name: "semver match, build info", v1: "1.2.3-beta.11+a", v2: "1.2.3-beta.11+b", expect: true},
+ {name: "string match", v1: "abcdef123", v2: "abcdef123", expect: true},
+ {name: "semver mismatch", v1: "1.2.3-beta.11", v2: "1.2.3-beta.22", expect: false},
+ {name: "semver mismatch, invalid semver", v1: "1.2.3-beta.11", v2: "stinkycheese", expect: false},
+ }
+
+ for _, tt := range tests {
+ if versionEquals(tt.v1, tt.v2) != tt.expect {
+ t.Errorf("%s: failed comparison of %q and %q (expect equal: %t)", tt.name, tt.v1, tt.v2, tt.expect)
+ }
+ }
+}
+
+func TestFindChartURL(t *testing.T) {
+ var b bytes.Buffer
+ m := &Manager{
+ Out: &b,
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ }
+ repos, err := m.loadChartRepositories()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ name := "alpine"
+ version := "0.1.0"
+ repoURL := "http://example.com/charts"
+
+ churl, username, password, insecureSkipTLSVerify, passcredentialsall, _, _, _, err := m.findChartURL(name, version, repoURL, repos)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if churl != "https://charts.helm.sh/stable/alpine-0.1.0.tgz" {
+ t.Errorf("Unexpected URL %q", churl)
+ }
+ if username != "" {
+ t.Errorf("Unexpected username %q", username)
+ }
+ if password != "" {
+ t.Errorf("Unexpected password %q", password)
+ }
+ if passcredentialsall != false {
+ t.Errorf("Unexpected passcredentialsall %t", passcredentialsall)
+ }
+ if insecureSkipTLSVerify {
+ t.Errorf("Unexpected insecureSkipTLSVerify %t", insecureSkipTLSVerify)
+ }
+
+ name = "tlsfoo"
+ version = "1.2.3"
+ repoURL = "https://example-https-insecureskiptlsverify.com"
+
+ churl, username, password, insecureSkipTLSVerify, passcredentialsall, _, _, _, err = m.findChartURL(name, version, repoURL, repos)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !insecureSkipTLSVerify {
+ t.Errorf("Unexpected insecureSkipTLSVerify %t", insecureSkipTLSVerify)
+ }
+ if churl != "https://example.com/tlsfoo-1.2.3.tgz" {
+ t.Errorf("Unexpected URL %q", churl)
+ }
+ if username != "" {
+ t.Errorf("Unexpected username %q", username)
+ }
+ if password != "" {
+ t.Errorf("Unexpected password %q", password)
+ }
+ if passcredentialsall != false {
+ t.Errorf("Unexpected passcredentialsall %t", passcredentialsall)
+ }
+
+ name = "foo"
+ version = "1.2.3"
+ repoURL = "http://example.com/helm"
+
+ churl, username, password, insecureSkipTLSVerify, passcredentialsall, _, _, _, err = m.findChartURL(name, version, repoURL, repos)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if churl != "http://example.com/helm/charts/foo-1.2.3.tgz" {
+ t.Errorf("Unexpected URL %q", churl)
+ }
+ if username != "" {
+ t.Errorf("Unexpected username %q", username)
+ }
+ if password != "" {
+ t.Errorf("Unexpected password %q", password)
+ }
+ if passcredentialsall != false {
+ t.Errorf("Unexpected passcredentialsall %t", passcredentialsall)
+ }
+ if insecureSkipTLSVerify {
+ t.Errorf("Unexpected insecureSkipTLSVerify %t", insecureSkipTLSVerify)
+ }
+}
+
+func TestGetRepoNames(t *testing.T) {
+ b := bytes.NewBuffer(nil)
+ m := &Manager{
+ Out: b,
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ }
+ tests := []struct {
+ name string
+ req []*chart.Dependency
+ expect map[string]string
+ err bool
+ }{
+ {
+ name: "no repo definition, but references a url",
+ req: []*chart.Dependency{
+ {Name: "oedipus-rex", Repository: "http://example.com/test"},
+ },
+ expect: map[string]string{"http://example.com/test": "http://example.com/test"},
+ },
+ {
+ name: "no repo definition failure -- stable repo",
+ req: []*chart.Dependency{
+ {Name: "oedipus-rex", Repository: "stable"},
+ },
+ err: true,
+ },
+ {
+ name: "no repo definition failure",
+ req: []*chart.Dependency{
+ {Name: "oedipus-rex", Repository: "http://example.com"},
+ },
+ expect: map[string]string{"oedipus-rex": "testing"},
+ },
+ {
+ name: "repo from local path",
+ req: []*chart.Dependency{
+ {Name: "local-dep", Repository: "file://./testdata/signtest"},
+ },
+ expect: map[string]string{"local-dep": "file://./testdata/signtest"},
+ },
+ {
+ name: "repo alias (alias:)",
+ req: []*chart.Dependency{
+ {Name: "oedipus-rex", Repository: "alias:testing"},
+ },
+ expect: map[string]string{"oedipus-rex": "testing"},
+ },
+ {
+ name: "repo alias (@)",
+ req: []*chart.Dependency{
+ {Name: "oedipus-rex", Repository: "@testing"},
+ },
+ expect: map[string]string{"oedipus-rex": "testing"},
+ },
+ {
+ name: "repo from local chart under charts path",
+ req: []*chart.Dependency{
+ {Name: "local-subchart", Repository: ""},
+ },
+ expect: map[string]string{},
+ },
+ }
+
+ for _, tt := range tests {
+ l, err := m.resolveRepoNames(tt.req)
+ if err != nil {
+ if tt.err {
+ continue
+ }
+ t.Fatal(err)
+ }
+
+ if tt.err {
+ t.Fatalf("Expected error in test %q", tt.name)
+ }
+
+ // m1 and m2 are the maps we want to compare
+ eq := reflect.DeepEqual(l, tt.expect)
+ if !eq {
+ t.Errorf("%s: expected map %v, got %v", tt.name, l, tt.name)
+ }
+ }
+}
+
+func TestDownloadAll(t *testing.T) {
+ chartPath := t.TempDir()
+ m := &Manager{
+ Out: new(bytes.Buffer),
+ RepositoryConfig: repoConfig,
+ RepositoryCache: repoCache,
+ ChartPath: chartPath,
+ }
+ signtest, err := loader.LoadDir(filepath.Join("testdata", "signtest"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := chartutil.SaveDir(signtest, filepath.Join(chartPath, "testdata")); err != nil {
+ t.Fatal(err)
+ }
+
+ local, err := loader.LoadDir(filepath.Join("testdata", "local-subchart"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := chartutil.SaveDir(local, filepath.Join(chartPath, "charts")); err != nil {
+ t.Fatal(err)
+ }
+
+ signDep := &chart.Dependency{
+ Name: signtest.Name(),
+ Repository: "file://./testdata/signtest",
+ Version: signtest.Metadata.Version,
+ }
+ localDep := &chart.Dependency{
+ Name: local.Name(),
+ Repository: "",
+ Version: local.Metadata.Version,
+ }
+
+ // create a 'tmpcharts' directory to test #5567
+ if err := os.MkdirAll(filepath.Join(chartPath, "tmpcharts"), 0755); err != nil {
+ t.Fatal(err)
+ }
+ if err := m.downloadAll([]*chart.Dependency{signDep, localDep}); err != nil {
+ t.Error(err)
+ }
+
+ if _, err := os.Stat(filepath.Join(chartPath, "charts", "signtest-0.1.0.tgz")); errors.Is(err, fs.ErrNotExist) {
+ t.Error(err)
+ }
+
+ // A chart with a bad name like this cannot be loaded and saved. Handling in
+ // the loading and saving will return an error about the invalid name. In
+ // this case, the chart needs to be created directly.
+ badchartyaml := `apiVersion: v2
+description: A Helm chart for Kubernetes
+name: ../bad-local-subchart
+version: 0.1.0`
+ if err := os.MkdirAll(filepath.Join(chartPath, "testdata", "bad-local-subchart"), 0755); err != nil {
+ t.Fatal(err)
+ }
+ err = os.WriteFile(filepath.Join(chartPath, "testdata", "bad-local-subchart", "Chart.yaml"), []byte(badchartyaml), 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ badLocalDep := &chart.Dependency{
+ Name: "../bad-local-subchart",
+ Repository: "file://./testdata/bad-local-subchart",
+ Version: "0.1.0",
+ }
+
+ err = m.downloadAll([]*chart.Dependency{badLocalDep})
+ if err == nil {
+ t.Fatal("Expected error for bad dependency name")
+ }
+}
+
+func TestUpdateBeforeBuild(t *testing.T) {
+ // Set up a fake repo
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/*.tgz*"),
+ )
+ defer srv.Stop()
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+ dir := func(p ...string) string {
+ return filepath.Join(append([]string{srv.Root()}, p...)...)
+ }
+
+ // Save dep
+ d := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "dep-chart",
+ Version: "0.1.0",
+ APIVersion: "v1",
+ },
+ }
+ if err := chartutil.SaveDir(d, dir()); err != nil {
+ t.Fatal(err)
+ }
+ // Save a chart
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "with-dependency",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{{
+ Name: d.Metadata.Name,
+ Version: ">=0.1.0",
+ Repository: "file://../dep-chart",
+ }},
+ },
+ }
+ if err := chartutil.SaveDir(c, dir()); err != nil {
+ t.Fatal(err)
+ }
+
+ // Set-up a manager
+ b := bytes.NewBuffer(nil)
+ g := getter.Providers{getter.Provider{
+ Schemes: []string{"http", "https"},
+ New: getter.NewHTTPGetter,
+ }}
+ m := &Manager{
+ ChartPath: dir(c.Metadata.Name),
+ Out: b,
+ Getters: g,
+ RepositoryConfig: dir("repositories.yaml"),
+ RepositoryCache: dir(),
+ }
+
+ // Update before Build. see issue: https://github.com/helm/helm/issues/7101
+ if err := m.Update(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := m.Build(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// TestUpdateWithNoRepo is for the case of a dependency that has no repo listed.
+// This happens when the dependency is in the charts directory and does not need
+// to be fetched.
+func TestUpdateWithNoRepo(t *testing.T) {
+ // Set up a fake repo
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/*.tgz*"),
+ )
+ defer srv.Stop()
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+ dir := func(p ...string) string {
+ return filepath.Join(append([]string{srv.Root()}, p...)...)
+ }
+
+ // Setup the dependent chart
+ d := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "dep-chart",
+ Version: "0.1.0",
+ APIVersion: "v1",
+ },
+ }
+
+ // Save a chart with the dependency
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "with-dependency",
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{{
+ Name: d.Metadata.Name,
+ Version: "0.1.0",
+ }},
+ },
+ }
+ if err := chartutil.SaveDir(c, dir()); err != nil {
+ t.Fatal(err)
+ }
+
+ // Save dependent chart into the parents charts directory. If the chart is
+ // not in the charts directory Helm will return an error that it is not
+ // found.
+ if err := chartutil.SaveDir(d, dir(c.Metadata.Name, "charts")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Set-up a manager
+ b := bytes.NewBuffer(nil)
+ g := getter.Providers{getter.Provider{
+ Schemes: []string{"http", "https"},
+ New: getter.NewHTTPGetter,
+ }}
+ m := &Manager{
+ ChartPath: dir(c.Metadata.Name),
+ Out: b,
+ Getters: g,
+ RepositoryConfig: dir("repositories.yaml"),
+ RepositoryCache: dir(),
+ }
+
+ // Test the update
+ if err := m.Update(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// This function is the skeleton test code of failing tests for #6416 and #6871 and bugs due to #5874.
+//
+// This function is used by below tests that ensures success of build operation
+// with optional fields, alias, condition, tags, and even with ranged version.
+// Parent chart includes local-subchart 0.1.0 subchart from a fake repository, by default.
+// If each of these main fields (name, version, repository) is not supplied by dep param, default value will be used.
+func checkBuildWithOptionalFields(t *testing.T, chartName string, dep chart.Dependency) {
+ t.Helper()
+ // Set up a fake repo
+ srv := repotest.NewTempServer(
+ t,
+ repotest.WithChartSourceGlob("testdata/*.tgz*"),
+ )
+ defer srv.Stop()
+ if err := srv.LinkIndices(); err != nil {
+ t.Fatal(err)
+ }
+ dir := func(p ...string) string {
+ return filepath.Join(append([]string{srv.Root()}, p...)...)
+ }
+
+ // Set main fields if not exist
+ if dep.Name == "" {
+ dep.Name = "local-subchart"
+ }
+ if dep.Version == "" {
+ dep.Version = "0.1.0"
+ }
+ if dep.Repository == "" {
+ dep.Repository = srv.URL()
+ }
+
+ // Save a chart
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: chartName,
+ Version: "0.1.0",
+ APIVersion: "v2",
+ Dependencies: []*chart.Dependency{&dep},
+ },
+ }
+ if err := chartutil.SaveDir(c, dir()); err != nil {
+ t.Fatal(err)
+ }
+
+ // Set-up a manager
+ b := bytes.NewBuffer(nil)
+ g := getter.Providers{getter.Provider{
+ Schemes: []string{"http", "https"},
+ New: getter.NewHTTPGetter,
+ }}
+ contentCache := t.TempDir()
+ m := &Manager{
+ ChartPath: dir(chartName),
+ Out: b,
+ Getters: g,
+ RepositoryConfig: dir("repositories.yaml"),
+ RepositoryCache: dir(),
+ ContentCache: contentCache,
+ }
+
+ // First build will update dependencies and create Chart.lock file.
+ if err := m.Build(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Second build should be passed. See PR #6655.
+ if err := m.Build(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBuild_WithoutOptionalFields(t *testing.T) {
+ // Dependency has main fields only (name/version/repository)
+ checkBuildWithOptionalFields(t, "without-optional-fields", chart.Dependency{})
+}
+
+func TestBuild_WithSemVerRange(t *testing.T) {
+ // Dependency version is the form of SemVer range
+ checkBuildWithOptionalFields(t, "with-semver-range", chart.Dependency{
+ Version: ">=0.1.0",
+ })
+}
+
+func TestBuild_WithAlias(t *testing.T) {
+ // Dependency has an alias
+ checkBuildWithOptionalFields(t, "with-alias", chart.Dependency{
+ Alias: "local-subchart-alias",
+ })
+}
+
+func TestBuild_WithCondition(t *testing.T) {
+ // Dependency has a condition
+ checkBuildWithOptionalFields(t, "with-condition", chart.Dependency{
+ Condition: "some.condition",
+ })
+}
+
+func TestBuild_WithTags(t *testing.T) {
+ // Dependency has several tags
+ checkBuildWithOptionalFields(t, "with-tags", chart.Dependency{
+ Tags: []string{"tag1", "tag2"},
+ })
+}
+
+// Failing test for #6871
+func TestBuild_WithRepositoryAlias(t *testing.T) {
+ // Dependency repository is aliased in Chart.yaml
+ checkBuildWithOptionalFields(t, "with-repository-alias", chart.Dependency{
+ Repository: "@test",
+ })
+}
+
+func TestErrRepoNotFound_Error(t *testing.T) {
+ type fields struct {
+ Repos []string
+ }
+ tests := []struct {
+ name string
+ fields fields
+ want string
+ }{
+ {
+ name: "OK",
+ fields: fields{
+ Repos: []string{"https://charts1.example.com", "https://charts2.example.com"},
+ },
+ want: "no repository definition for https://charts1.example.com, https://charts2.example.com",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ e := ErrRepoNotFound{
+ Repos: tt.fields.Repos,
+ }
+ if got := e.Error(); got != tt.want {
+ t.Errorf("Error() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestKey(t *testing.T) {
+ tests := []struct {
+ name string
+ expect string
+ }{
+ {
+ name: "file:////tmp",
+ expect: "afeed3459e92a874f6373aca264ce1459bfa91f9c1d6612f10ae3dc2ee955df3",
+ },
+ {
+ name: "https://example.com/charts",
+ expect: "7065c57c94b2411ad774638d76823c7ccb56415441f5ab2f5ece2f3845728e5d",
+ },
+ {
+ name: "foo/bar/baz",
+ expect: "15c46a4f8a189ae22f36f201048881d6c090c93583bedcf71f5443fdef224c82",
+ },
+ }
+
+ for _, tt := range tests {
+ o, err := key(tt.name)
+ if err != nil {
+ t.Fatalf("unable to generate key for %q with error: %s", tt.name, err)
+ }
+ if o != tt.expect {
+ t.Errorf("wrong key name generated for %q, expected %q but got %q", tt.name, tt.expect, o)
+ }
+ }
+}
+
+// Test dedupeRepos tests that the dedupeRepos function correctly deduplicates
+func TestDedupeRepos(t *testing.T) {
+ tests := []struct {
+ name string
+ repos []*repo.Entry
+ want []*repo.Entry
+ }{
+ {
+ name: "no duplicates",
+ repos: []*repo.Entry{
+ {
+ URL: "https://example.com/charts",
+ },
+ {
+ URL: "https://example.com/charts2",
+ },
+ },
+ want: []*repo.Entry{
+ {
+ URL: "https://example.com/charts",
+ },
+ {
+ URL: "https://example.com/charts2",
+ },
+ },
+ },
+ {
+ name: "duplicates",
+ repos: []*repo.Entry{
+ {
+ URL: "https://example.com/charts",
+ },
+ {
+ URL: "https://example.com/charts",
+ },
+ },
+ want: []*repo.Entry{
+ {
+ URL: "https://example.com/charts",
+ },
+ },
+ },
+ {
+ name: "duplicates with trailing slash",
+ repos: []*repo.Entry{
+ {
+ URL: "https://example.com/charts",
+ },
+ {
+ URL: "https://example.com/charts/",
+ },
+ },
+ want: []*repo.Entry{
+ {
+ // the last one wins
+ URL: "https://example.com/charts/",
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := dedupeRepos(tt.repos)
+ assert.ElementsMatch(t, tt.want, got)
+ })
+ }
+}
+
+func TestWriteLock(t *testing.T) {
+ fixedTime, err := time.Parse(time.RFC3339, "2025-07-04T00:00:00Z")
+ assert.NoError(t, err)
+ lock := &chart.Lock{
+ Generated: fixedTime,
+ Digest: "sha256:12345",
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "fantastic-chart",
+ Version: "1.2.3",
+ Repository: "https://example.com/charts",
+ },
+ },
+ }
+ expectedContent, err := yaml.Marshal(lock)
+ assert.NoError(t, err)
+
+ t.Run("v2 lock file", func(t *testing.T) {
+ dir := t.TempDir()
+ err := writeLock(dir, lock, false)
+ assert.NoError(t, err)
+
+ lockfilePath := filepath.Join(dir, "Chart.lock")
+ _, err = os.Stat(lockfilePath)
+ assert.NoError(t, err, "Chart.lock should exist")
+
+ content, err := os.ReadFile(lockfilePath)
+ assert.NoError(t, err)
+ assert.Equal(t, expectedContent, content)
+
+ // Check that requirements.lock does not exist
+ _, err = os.Stat(filepath.Join(dir, "requirements.lock"))
+ assert.Error(t, err)
+ assert.True(t, os.IsNotExist(err))
+ })
+
+ t.Run("v1 lock file", func(t *testing.T) {
+ dir := t.TempDir()
+ err := writeLock(dir, lock, true)
+ assert.NoError(t, err)
+
+ lockfilePath := filepath.Join(dir, "requirements.lock")
+ _, err = os.Stat(lockfilePath)
+ assert.NoError(t, err, "requirements.lock should exist")
+
+ content, err := os.ReadFile(lockfilePath)
+ assert.NoError(t, err)
+ assert.Equal(t, expectedContent, content)
+
+ // Check that Chart.lock does not exist
+ _, err = os.Stat(filepath.Join(dir, "Chart.lock"))
+ assert.Error(t, err)
+ assert.True(t, os.IsNotExist(err))
+ })
+
+ t.Run("overwrite existing lock file", func(t *testing.T) {
+ dir := t.TempDir()
+ lockfilePath := filepath.Join(dir, "Chart.lock")
+ assert.NoError(t, os.WriteFile(lockfilePath, []byte("old content"), 0644))
+
+ err = writeLock(dir, lock, false)
+ assert.NoError(t, err)
+
+ content, err := os.ReadFile(lockfilePath)
+ assert.NoError(t, err)
+ assert.Equal(t, expectedContent, content)
+ })
+
+ t.Run("lock file is a symlink", func(t *testing.T) {
+ dir := t.TempDir()
+ dummyFile := filepath.Join(dir, "dummy.txt")
+ assert.NoError(t, os.WriteFile(dummyFile, []byte("dummy"), 0644))
+
+ lockfilePath := filepath.Join(dir, "Chart.lock")
+ assert.NoError(t, os.Symlink(dummyFile, lockfilePath))
+
+ err = writeLock(dir, lock, false)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "the Chart.lock file is a symlink to")
+ })
+
+ t.Run("chart path is not a directory", func(t *testing.T) {
+ dir := t.TempDir()
+ filePath := filepath.Join(dir, "not-a-dir")
+ assert.NoError(t, os.WriteFile(filePath, []byte("file"), 0644))
+
+ err = writeLock(filePath, lock, false)
+ assert.Error(t, err)
+ })
+}
diff --git a/helm/pkg/downloader/testdata/helm-test-key.pub b/helm/pkg/downloader/testdata/helm-test-key.pub
new file mode 100644
index 000000000..38714f25a
Binary files /dev/null and b/helm/pkg/downloader/testdata/helm-test-key.pub differ
diff --git a/helm/pkg/downloader/testdata/helm-test-key.secret b/helm/pkg/downloader/testdata/helm-test-key.secret
new file mode 100644
index 000000000..a966aef93
Binary files /dev/null and b/helm/pkg/downloader/testdata/helm-test-key.secret differ
diff --git a/helm/pkg/downloader/testdata/local-subchart-0.1.0.tgz b/helm/pkg/downloader/testdata/local-subchart-0.1.0.tgz
new file mode 100644
index 000000000..485312105
Binary files /dev/null and b/helm/pkg/downloader/testdata/local-subchart-0.1.0.tgz differ
diff --git a/helm/pkg/downloader/testdata/local-subchart/Chart.yaml b/helm/pkg/downloader/testdata/local-subchart/Chart.yaml
new file mode 100644
index 000000000..1e17203e5
--- /dev/null
+++ b/helm/pkg/downloader/testdata/local-subchart/Chart.yaml
@@ -0,0 +1,3 @@
+description: A Helm chart for Kubernetes
+name: local-subchart
+version: 0.1.0
diff --git a/helm/pkg/downloader/testdata/repositories.yaml b/helm/pkg/downloader/testdata/repositories.yaml
new file mode 100644
index 000000000..db7a57687
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repositories.yaml
@@ -0,0 +1,28 @@
+apiVersion: v1
+repositories:
+ - name: testing
+ url: "http://example.com"
+ - name: testing-https
+ url: "https://example.com"
+ - name: testing-basicauth
+ url: "http://username:password@example.com"
+ - name: kubernetes-charts
+ url: "http://example.com/charts"
+ - name: malformed
+ url: "http://dl.example.com"
+ - name: testing-querystring
+ url: "http://example.com?key=value"
+ - name: testing-relative
+ url: "http://example.com/helm"
+ - name: testing-relative-trailing-slash
+ url: "http://example.com/helm/"
+ - name: testing-ca-file
+ url: "https://example.com"
+ certFile: "cert"
+ keyFile: "key"
+ caFile: "ca"
+ - name: testing-https-insecureskip-tls-verify
+ url: "https://example-https-insecureskiptlsverify.com"
+ insecure_skip_tls_verify: true
+ - name: encoded-url
+ url: "http://example.com/with%2Fslash"
diff --git a/helm/pkg/downloader/testdata/repository/encoded-url-index.yaml b/helm/pkg/downloader/testdata/repository/encoded-url-index.yaml
new file mode 100644
index 000000000..f9ec867a5
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repository/encoded-url-index.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+entries:
+ foobar:
+ - name: foobar
+ description: Foo Chart With Encoded URL
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - charts/foobar-4.2.1.tgz
+ version: 4.2.1
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ apiVersion: v2
diff --git a/helm/pkg/downloader/testdata/repository/kubernetes-charts-index.yaml b/helm/pkg/downloader/testdata/repository/kubernetes-charts-index.yaml
new file mode 100644
index 000000000..52dcf930b
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repository/kubernetes-charts-index.yaml
@@ -0,0 +1,49 @@
+apiVersion: v1
+entries:
+ alpine:
+ - name: alpine
+ urls:
+ - https://charts.helm.sh/stable/alpine-0.1.0.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 0.1.0
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ - name: alpine
+ urls:
+ - https://charts.helm.sh/stable/alpine-0.2.0.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 0.2.0
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ mariadb:
+ - name: mariadb
+ urls:
+ - https://charts.helm.sh/stable/mariadb-0.3.0.tgz
+ checksum: 65229f6de44a2be9f215d11dbff311673fc8ba56
+ home: https://mariadb.org
+ sources:
+ - https://github.com/bitnami/bitnami-docker-mariadb
+ version: 0.3.0
+ description: Chart for MariaDB
+ keywords:
+ - mariadb
+ - mysql
+ - database
+ - sql
+ maintainers:
+ - name: Bitnami
+ email: containers@bitnami.com
+ icon: ""
+ apiVersion: v2
diff --git a/helm/pkg/downloader/testdata/repository/malformed-index.yaml b/helm/pkg/downloader/testdata/repository/malformed-index.yaml
new file mode 100644
index 000000000..fa319abdd
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repository/malformed-index.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+entries:
+ alpine:
+ - name: alpine
+ urls:
+ - alpine-1.2.3.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 1.2.3
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
diff --git a/helm/pkg/downloader/testdata/repository/testing-basicauth-index.yaml b/helm/pkg/downloader/testdata/repository/testing-basicauth-index.yaml
new file mode 100644
index 000000000..ed092ef41
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repository/testing-basicauth-index.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+entries:
+ foo:
+ - name: foo
+ description: Foo Chart
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - http://username:password@example.com/foo-1.2.3.tgz
+ version: 1.2.3
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ apiVersion: v2
diff --git a/helm/pkg/downloader/testdata/repository/testing-ca-file-index.yaml b/helm/pkg/downloader/testdata/repository/testing-ca-file-index.yaml
new file mode 100644
index 000000000..81901efc7
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repository/testing-ca-file-index.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+entries:
+ foo:
+ - name: foo
+ description: Foo Chart
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - https://example.com/foo-1.2.3.tgz
+ version: 1.2.3
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ apiVersion: v2
diff --git a/helm/pkg/downloader/testdata/repository/testing-https-index.yaml b/helm/pkg/downloader/testdata/repository/testing-https-index.yaml
new file mode 100644
index 000000000..81901efc7
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repository/testing-https-index.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+entries:
+ foo:
+ - name: foo
+ description: Foo Chart
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - https://example.com/foo-1.2.3.tgz
+ version: 1.2.3
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ apiVersion: v2
diff --git a/helm/pkg/downloader/testdata/repository/testing-https-insecureskip-tls-verify-index.yaml b/helm/pkg/downloader/testdata/repository/testing-https-insecureskip-tls-verify-index.yaml
new file mode 100644
index 000000000..58f928ff4
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repository/testing-https-insecureskip-tls-verify-index.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+entries:
+ tlsfoo:
+ - name: tlsfoo
+ description: TLS FOO Chart
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - https://example.com/tlsfoo-1.2.3.tgz
+ version: 1.2.3
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b7373
diff --git a/helm/pkg/downloader/testdata/repository/testing-index.yaml b/helm/pkg/downloader/testdata/repository/testing-index.yaml
new file mode 100644
index 000000000..f588bf1fb
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repository/testing-index.yaml
@@ -0,0 +1,43 @@
+apiVersion: v1
+entries:
+ alpine:
+ - name: alpine
+ urls:
+ - http://example.com/alpine-1.2.3.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 1.2.3
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ - name: alpine
+ urls:
+ - http://example.com/alpine-0.2.0.tgz
+ - https://charts.helm.sh/stable/alpine-0.2.0.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 0.2.0
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
+ foo:
+ - name: foo
+ description: Foo Chart
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - http://example.com/foo-1.2.3.tgz
+ version: 1.2.3
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ apiVersion: v2
diff --git a/helm/pkg/downloader/testdata/repository/testing-querystring-index.yaml b/helm/pkg/downloader/testdata/repository/testing-querystring-index.yaml
new file mode 100644
index 000000000..fa319abdd
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repository/testing-querystring-index.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+entries:
+ alpine:
+ - name: alpine
+ urls:
+ - alpine-1.2.3.tgz
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ home: https://helm.sh/helm
+ sources:
+ - https://github.com/helm/helm
+ version: 1.2.3
+ description: Deploy a basic Alpine Linux pod
+ keywords: []
+ maintainers: []
+ icon: ""
+ apiVersion: v2
diff --git a/helm/pkg/downloader/testdata/repository/testing-relative-index.yaml b/helm/pkg/downloader/testdata/repository/testing-relative-index.yaml
new file mode 100644
index 000000000..9524daf6e
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repository/testing-relative-index.yaml
@@ -0,0 +1,41 @@
+apiVersion: v1
+entries:
+ foo:
+ - name: foo
+ description: Foo Chart With Relative Path
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - charts/foo-1.2.3.tgz
+ version: 1.2.3
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ apiVersion: v2
+ bar:
+ - name: bar
+ description: Bar Chart With Relative Path
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - bar-1.2.3.tgz
+ version: 1.2.3
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ apiVersion: v2
+ baz:
+ - name: baz
+ description: Baz Chart With Absolute Path
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - /path/to/baz-1.2.3.tgz
+ version: 1.2.3
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ apiVersion: v2
diff --git a/helm/pkg/downloader/testdata/repository/testing-relative-trailing-slash-index.yaml b/helm/pkg/downloader/testdata/repository/testing-relative-trailing-slash-index.yaml
new file mode 100644
index 000000000..ba27ed257
--- /dev/null
+++ b/helm/pkg/downloader/testdata/repository/testing-relative-trailing-slash-index.yaml
@@ -0,0 +1,28 @@
+apiVersion: v1
+entries:
+ foo:
+ - name: foo
+ description: Foo Chart With Relative Path
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - charts/foo-1.2.3.tgz
+ version: 1.2.3
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ apiVersion: v2
+ bar:
+ - name: bar
+ description: Bar Chart With Relative Path
+ home: https://helm.sh/helm
+ keywords: []
+ maintainers: []
+ sources:
+ - https://github.com/helm/charts
+ urls:
+ - bar-1.2.3.tgz
+ version: 1.2.3
+ checksum: 0e6661f193211d7a5206918d42f5c2a9470b737d
+ apiVersion: v2
diff --git a/helm/pkg/downloader/testdata/signtest-0.1.0.tgz b/helm/pkg/downloader/testdata/signtest-0.1.0.tgz
new file mode 100644
index 000000000..c74e5b0ef
Binary files /dev/null and b/helm/pkg/downloader/testdata/signtest-0.1.0.tgz differ
diff --git a/helm/pkg/downloader/testdata/signtest-0.1.0.tgz.prov b/helm/pkg/downloader/testdata/signtest-0.1.0.tgz.prov
new file mode 100644
index 000000000..d325bb266
--- /dev/null
+++ b/helm/pkg/downloader/testdata/signtest-0.1.0.tgz.prov
@@ -0,0 +1,21 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA512
+
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: signtest
+version: 0.1.0
+
+...
+files:
+ signtest-0.1.0.tgz: sha256:e5ef611620fb97704d8751c16bab17fedb68883bfb0edc76f78a70e9173f9b55
+-----BEGIN PGP SIGNATURE-----
+
+wsBcBAEBCgAQBQJcoosfCRCEO7+YH8GHYgAA220IALAs8T8NPgkcLvHu+5109cAN
+BOCNPSZDNsqLZW/2Dc9cKoBG7Jen4Qad+i5l9351kqn3D9Gm6eRfAWcjfggRobV/
+9daZ19h0nl4O1muQNAkjvdgZt8MOP3+PB3I3/Tu2QCYjI579SLUmuXlcZR5BCFPR
+PJy+e3QpV2PcdeU2KZLG4tjtlrq+3QC9ZHHEJLs+BVN9d46Dwo6CxJdHJrrrAkTw
+M8MhA92vbiTTPRSCZI9x5qDAwJYhoq0oxLflpuL2tIlo3qVoCsaTSURwMESEHO32
+XwYG7BaVDMELWhAorBAGBGBwWFbJ1677qQ2gd9CN0COiVhekWlFRcnn60800r84=
+=k9Y9
+-----END PGP SIGNATURE-----
\ No newline at end of file
diff --git a/helm/pkg/downloader/testdata/signtest/.helmignore b/helm/pkg/downloader/testdata/signtest/.helmignore
new file mode 100644
index 000000000..435b756d8
--- /dev/null
+++ b/helm/pkg/downloader/testdata/signtest/.helmignore
@@ -0,0 +1,5 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+.git
diff --git a/helm/pkg/downloader/testdata/signtest/Chart.yaml b/helm/pkg/downloader/testdata/signtest/Chart.yaml
new file mode 100644
index 000000000..f1f73723a
--- /dev/null
+++ b/helm/pkg/downloader/testdata/signtest/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: signtest
+version: 0.1.0
diff --git a/helm/pkg/downloader/testdata/signtest/alpine/Chart.yaml b/helm/pkg/downloader/testdata/signtest/alpine/Chart.yaml
new file mode 100644
index 000000000..eec261220
--- /dev/null
+++ b/helm/pkg/downloader/testdata/signtest/alpine/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: Deploy a basic Alpine Linux pod
+home: https://helm.sh/helm
+name: alpine
+sources:
+- https://github.com/helm/helm
+version: 0.1.0
diff --git a/helm/pkg/downloader/testdata/signtest/alpine/README.md b/helm/pkg/downloader/testdata/signtest/alpine/README.md
new file mode 100644
index 000000000..28bebae07
--- /dev/null
+++ b/helm/pkg/downloader/testdata/signtest/alpine/README.md
@@ -0,0 +1,9 @@
+This example was generated using the command `helm create alpine`.
+
+The `templates/` directory contains a very simple pod resource with a
+couple of parameters.
+
+The `values.yaml` file contains the default values for the
+`alpine-pod.yaml` template.
+
+You can install this example using `helm install ./alpine`.
diff --git a/helm/pkg/downloader/testdata/signtest/alpine/templates/alpine-pod.yaml b/helm/pkg/downloader/testdata/signtest/alpine/templates/alpine-pod.yaml
new file mode 100644
index 000000000..5bbae10af
--- /dev/null
+++ b/helm/pkg/downloader/testdata/signtest/alpine/templates/alpine-pod.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.Release.Name}}-{{.Chart.Name}}
+ labels:
+ app.kubernetes.io/managed-by: {{.Release.Service}}
+ chartName: {{.Chart.Name}}
+ chartVersion: {{.Chart.Version | quote}}
+spec:
+ restartPolicy: {{default "Never" .restart_policy}}
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/downloader/testdata/signtest/alpine/values.yaml b/helm/pkg/downloader/testdata/signtest/alpine/values.yaml
new file mode 100644
index 000000000..bb6c06ae4
--- /dev/null
+++ b/helm/pkg/downloader/testdata/signtest/alpine/values.yaml
@@ -0,0 +1,2 @@
+# The pod name
+name: my-alpine
diff --git a/helm/pkg/downloader/testdata/signtest/templates/pod.yaml b/helm/pkg/downloader/testdata/signtest/templates/pod.yaml
new file mode 100644
index 000000000..9b00ccaf7
--- /dev/null
+++ b/helm/pkg/downloader/testdata/signtest/templates/pod.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: signtest
+spec:
+ restartPolicy: Never
+ containers:
+ - name: waiter
+ image: "alpine:3.3"
+ command: ["/bin/sleep","9000"]
diff --git a/helm/pkg/downloader/testdata/signtest/values.yaml b/helm/pkg/downloader/testdata/signtest/values.yaml
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/engine/doc.go b/helm/pkg/engine/doc.go
new file mode 100644
index 000000000..e764a829a
--- /dev/null
+++ b/helm/pkg/engine/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package engine implements the Go text template engine as needed for Helm.
+
+When Helm renders templates it does so with additional functions and different
+modes (e.g., strict, lint mode). This package handles the helm specific
+implementation.
+*/
+package engine // import "helm.sh/helm/v4/pkg/engine"
diff --git a/helm/pkg/engine/engine.go b/helm/pkg/engine/engine.go
new file mode 100644
index 000000000..f5db7e158
--- /dev/null
+++ b/helm/pkg/engine/engine.go
@@ -0,0 +1,595 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "errors"
+ "fmt"
+ "log/slog"
+ "maps"
+ "path"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "text/template"
+
+ "k8s.io/client-go/rest"
+
+ ci "helm.sh/helm/v4/pkg/chart"
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// Engine is an implementation of the Helm rendering implementation for templates.
+type Engine struct {
+ // If strict is enabled, template rendering will fail if a template references
+ // a value that was not passed in.
+ Strict bool
+ // In LintMode, some 'required' template values may be missing, so don't fail
+ LintMode bool
+ // optional provider of clients to talk to the Kubernetes API
+ clientProvider *ClientProvider
+ // EnableDNS tells the engine to allow DNS lookups when rendering templates
+ EnableDNS bool
+ // CustomTemplateFuncs is defined by users to provide custom template funcs
+ CustomTemplateFuncs template.FuncMap
+}
+
+// New creates a new instance of Engine using the passed in rest config.
+func New(config *rest.Config) Engine {
+ var clientProvider ClientProvider = clientProviderFromConfig{config}
+ return Engine{
+ clientProvider: &clientProvider,
+ }
+}
+
+// Render takes a chart, optional values, and value overrides, and attempts to render the Go templates.
+//
+// Render can be called repeatedly on the same engine.
+//
+// This will look in the chart's 'templates' data (e.g. the 'templates/' directory)
+// and attempt to render the templates there using the values passed in.
+//
+// Values are scoped to their templates. A dependency template will not have
+// access to the values set for its parent. If chart "foo" includes chart "bar",
+// "bar" will not have access to the values for "foo".
+//
+// Values should be prepared with something like `chartutils.ReadValues`.
+//
+// Values are passed through the templates according to scope. If the top layer
+// chart includes the chart foo, which includes the chart bar, the values map
+// will be examined for a table called "foo". If "foo" is found in vals,
+// that section of the values will be passed into the "foo" chart. And if that
+// section contains a value named "bar", that value will be passed on to the
+// bar chart during render time.
+func (e Engine) Render(chrt ci.Charter, values common.Values) (map[string]string, error) {
+ tmap := allTemplates(chrt, values)
+ return e.render(tmap)
+}
+
+// Render takes a chart, optional values, and value overrides, and attempts to
+// render the Go templates using the default options.
+func Render(chrt ci.Charter, values common.Values) (map[string]string, error) {
+ return new(Engine).Render(chrt, values)
+}
+
+// RenderWithClient takes a chart, optional values, and value overrides, and attempts to
+// render the Go templates using the default options. This engine is client aware and so can have template
+// functions that interact with the client.
+func RenderWithClient(chrt ci.Charter, values common.Values, config *rest.Config) (map[string]string, error) {
+ var clientProvider ClientProvider = clientProviderFromConfig{config}
+ return Engine{
+ clientProvider: &clientProvider,
+ }.Render(chrt, values)
+}
+
+// RenderWithClientProvider takes a chart, optional values, and value overrides, and attempts to
+// render the Go templates using the default options. This engine is client aware and so can have template
+// functions that interact with the client.
+// This function differs from RenderWithClient in that it lets you customize the way a dynamic client is constructed.
+func RenderWithClientProvider(chrt ci.Charter, values common.Values, clientProvider ClientProvider) (map[string]string, error) {
+ return Engine{
+ clientProvider: &clientProvider,
+ }.Render(chrt, values)
+}
+
+// renderable is an object that can be rendered.
+type renderable struct {
+ // tpl is the current template.
+ tpl string
+ // vals are the values to be supplied to the template.
+ vals common.Values
+ // namespace prefix to the templates of the current chart
+ basePath string
+}
+
+const warnStartDelim = "HELM_ERR_START"
+const warnEndDelim = "HELM_ERR_END"
+const recursionMaxNums = 1000
+
+var warnRegex = regexp.MustCompile(warnStartDelim + `((?s).*)` + warnEndDelim)
+
+func warnWrap(warn string) string {
+ return warnStartDelim + warn + warnEndDelim
+}
+
+// 'include' needs to be defined in the scope of a 'tpl' template as
+// well as regular file-loaded templates.
+func includeFun(t *template.Template, includedNames map[string]int) func(string, interface{}) (string, error) {
+ return func(name string, data interface{}) (string, error) {
+ var buf strings.Builder
+ if v, ok := includedNames[name]; ok {
+ if v > recursionMaxNums {
+ return "", fmt.Errorf(
+ "rendering template has a nested reference name: %s: %w",
+ name, errors.New("unable to execute template"))
+ }
+ includedNames[name]++
+ } else {
+ includedNames[name] = 1
+ }
+ err := t.ExecuteTemplate(&buf, name, data)
+ includedNames[name]--
+ return buf.String(), err
+ }
+}
+
+// As does 'tpl', so that nested calls to 'tpl' see the templates
+// defined by their enclosing contexts.
+func tplFun(parent *template.Template, includedNames map[string]int, strict bool) func(string, interface{}) (string, error) {
+ return func(tpl string, vals interface{}) (string, error) {
+ t, err := parent.Clone()
+ if err != nil {
+ return "", fmt.Errorf("cannot clone template: %w", err)
+ }
+
+ // Re-inject the missingkey option, see text/template issue https://github.com/golang/go/issues/43022
+ // We have to go by strict from our engine configuration, as the option fields are private in Template.
+ // TODO: Remove workaround (and the strict parameter) once we build only with golang versions with a fix.
+ if strict {
+ t.Option("missingkey=error")
+ } else {
+ t.Option("missingkey=zero")
+ }
+
+ // Re-inject 'include' so that it can close over our clone of t;
+ // this lets any 'define's inside tpl be 'include'd.
+ t.Funcs(template.FuncMap{
+ "include": includeFun(t, includedNames),
+ "tpl": tplFun(t, includedNames, strict),
+ })
+
+ // We need a .New template, as template text which is just blanks
+ // or comments after parsing out defines just adds new named
+ // template definitions without changing the main template.
+ // https://pkg.go.dev/text/template#Template.Parse
+ // Use the parent's name for lack of a better way to identify the tpl
+ // text string. (Maybe we could use a hash appended to the name?)
+ t, err = t.New(parent.Name()).Parse(tpl)
+ if err != nil {
+ return "", fmt.Errorf("cannot parse template %q: %w", tpl, err)
+ }
+
+ var buf strings.Builder
+ if err := t.Execute(&buf, vals); err != nil {
+ return "", fmt.Errorf("error during tpl function execution for %q: %w", tpl, err)
+ }
+
+ // See comment in renderWithReferences explaining the hack.
+ return strings.ReplaceAll(buf.String(), "", ""), nil
+ }
+}
+
+// initFunMap creates the Engine's FuncMap and adds context-specific functions.
+func (e Engine) initFunMap(t *template.Template) {
+ funcMap := funcMap()
+ includedNames := make(map[string]int)
+
+ // Add the template-rendering functions here so we can close over t.
+ funcMap["include"] = includeFun(t, includedNames)
+ funcMap["tpl"] = tplFun(t, includedNames, e.Strict)
+
+ // Add the `required` function here so we can use lintMode
+ funcMap["required"] = func(warn string, val interface{}) (interface{}, error) {
+ if val == nil {
+ if e.LintMode {
+ // Don't fail on missing required values when linting
+ slog.Warn("missing required value", "message", warn)
+ return "", nil
+ }
+ return val, errors.New(warnWrap(warn))
+ } else if _, ok := val.(string); ok {
+ if val == "" {
+ if e.LintMode {
+ // Don't fail on missing required values when linting
+ slog.Warn("missing required values", "message", warn)
+ return "", nil
+ }
+ return val, errors.New(warnWrap(warn))
+ }
+ }
+ return val, nil
+ }
+
+ // Override sprig fail function for linting and wrapping message
+ funcMap["fail"] = func(msg string) (string, error) {
+ if e.LintMode {
+ // Don't fail when linting
+ slog.Info("funcMap fail", "message", msg)
+ return "", nil
+ }
+ return "", errors.New(warnWrap(msg))
+ }
+
+ // If we are not linting and have a cluster connection, provide a Kubernetes-backed
+ // implementation.
+ if !e.LintMode && e.clientProvider != nil {
+ funcMap["lookup"] = newLookupFunction(*e.clientProvider)
+ }
+
+ // When DNS lookups are not enabled override the sprig function and return
+ // an empty string.
+ if !e.EnableDNS {
+ funcMap["getHostByName"] = func(_ string) string {
+ return ""
+ }
+ }
+
+ // Set custom template funcs
+ maps.Copy(funcMap, e.CustomTemplateFuncs)
+
+ t.Funcs(funcMap)
+}
+
+// render takes a map of templates/values and renders them.
+func (e Engine) render(tpls map[string]renderable) (rendered map[string]string, err error) {
+ // Basically, what we do here is start with an empty parent template and then
+ // build up a list of templates -- one for each file. Once all of the templates
+ // have been parsed, we loop through again and execute every template.
+ //
+ // The idea with this process is to make it possible for more complex templates
+ // to share common blocks, but to make the entire thing feel like a file-based
+ // template engine.
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("rendering template failed: %v", r)
+ }
+ }()
+ t := template.New("gotpl")
+ if e.Strict {
+ t.Option("missingkey=error")
+ } else {
+ // Not that zero will attempt to add default values for types it knows,
+ // but will still emit for others. We mitigate that later.
+ t.Option("missingkey=zero")
+ }
+
+ e.initFunMap(t)
+
+ // We want to parse the templates in a predictable order. The order favors
+ // higher-level (in file system) templates over deeply nested templates.
+ keys := sortTemplates(tpls)
+
+ for _, filename := range keys {
+ r := tpls[filename]
+ if _, err := t.New(filename).Parse(r.tpl); err != nil {
+ return map[string]string{}, cleanupParseError(filename, err)
+ }
+ }
+
+ rendered = make(map[string]string, len(keys))
+ for _, filename := range keys {
+ // Don't render partials. We don't care out the direct output of partials.
+ // They are only included from other templates.
+ if strings.HasPrefix(path.Base(filename), "_") {
+ continue
+ }
+ // At render time, add information about the template that is being rendered.
+ vals := tpls[filename].vals
+ vals["Template"] = common.Values{"Name": filename, "BasePath": tpls[filename].basePath}
+ var buf strings.Builder
+ if err := t.ExecuteTemplate(&buf, filename, vals); err != nil {
+ return map[string]string{}, reformatExecErrorMsg(filename, err)
+ }
+
+ // Work around the issue where Go will emit "" even if Options(missing=zero)
+ // is set. Since missing=error will never get here, we do not need to handle
+ // the Strict case.
+ rendered[filename] = strings.ReplaceAll(buf.String(), "", "")
+ }
+
+ return rendered, nil
+}
+
+func cleanupParseError(filename string, err error) error {
+ tokens := strings.Split(err.Error(), ": ")
+ if len(tokens) == 1 {
+ // This might happen if a non-templating error occurs
+ return fmt.Errorf("parse error in (%s): %s", filename, err)
+ }
+ // The first token is "template"
+ // The second token is either "filename:lineno" or "filename:lineNo:columnNo"
+ location := tokens[1]
+ // The remaining tokens make up a stacktrace-like chain, ending with the relevant error
+ errMsg := tokens[len(tokens)-1]
+ return fmt.Errorf("parse error at (%s): %s", location, errMsg)
+}
+
+type TraceableError struct {
+ location string
+ message string
+ executedFunction string
+}
+
+func (t TraceableError) String() string {
+ var errorString strings.Builder
+ if t.location != "" {
+ _, _ = fmt.Fprintf(&errorString, "%s\n ", t.location)
+ }
+ if t.executedFunction != "" {
+ _, _ = fmt.Fprintf(&errorString, "%s\n ", t.executedFunction)
+ }
+ if t.message != "" {
+ _, _ = fmt.Fprintf(&errorString, "%s\n", t.message)
+ }
+ return errorString.String()
+}
+
+// parseTemplateExecErrorString parses a template execution error string from text/template
+// without using regular expressions. It returns a TraceableError and true if parsing succeeded.
+func parseTemplateExecErrorString(s string) (TraceableError, bool) {
+ const prefix = "template: "
+ if !strings.HasPrefix(s, prefix) {
+ return TraceableError{}, false
+ }
+ remainder := s[len(prefix):]
+
+ // Special case: "template: no template %q associated with template %q"
+ // Matches https://cs.opensource.google/go/go/+/refs/tags/go1.23.6:src/text/template/exec.go;l=191
+ traceableError, done := parseTemplateNoTemplateError(s, remainder)
+ if done {
+ return traceableError, true
+ }
+
+ // Executing form: ": executing \"\" at <>: [ template:...]"
+ // Matches https://cs.opensource.google/go/go/+/refs/tags/go1.23.6:src/text/template/exec.go;l=141
+ traceableError, done = parseTemplateExecutingAtErrorType(remainder)
+ if done {
+ return traceableError, true
+ }
+
+ // Simple form: ": "
+ // Use LastIndex to avoid splitting colons within line:col info.
+ // Matches https://cs.opensource.google/go/go/+/refs/tags/go1.23.6:src/text/template/exec.go;l=138
+ traceableError, done = parseTemplateSimpleErrorString(remainder)
+ if done {
+ return traceableError, true
+ }
+
+ return TraceableError{}, false
+}
+
+// Special case: "template: no template %q associated with template %q"
+// Matches https://cs.opensource.google/go/go/+/refs/tags/go1.23.6:src/text/template/exec.go;l=191
+func parseTemplateNoTemplateError(s string, remainder string) (TraceableError, bool) {
+ if strings.HasPrefix(remainder, "no template ") {
+ return TraceableError{message: s}, true
+ }
+ return TraceableError{}, false
+}
+
+// Simple form: ": "
+// Use LastIndex to avoid splitting colons within line:col info.
+// Matches https://cs.opensource.google/go/go/+/refs/tags/go1.23.6:src/text/template/exec.go;l=138
+func parseTemplateSimpleErrorString(remainder string) (TraceableError, bool) {
+ if sep := strings.LastIndex(remainder, ": "); sep != -1 {
+ templateName := remainder[:sep]
+ errMsg := remainder[sep+2:]
+ if cut := strings.Index(errMsg, " template:"); cut != -1 {
+ errMsg = errMsg[:cut]
+ }
+ return TraceableError{location: templateName, message: errMsg}, true
+ }
+ return TraceableError{}, false
+}
+
+// Executing form: ": executing \"\" at <>: [ template:...]"
+// Matches https://cs.opensource.google/go/go/+/refs/tags/go1.23.6:src/text/template/exec.go;l=141
+func parseTemplateExecutingAtErrorType(remainder string) (TraceableError, bool) {
+ if idx := strings.Index(remainder, ": executing "); idx != -1 {
+ templateName := remainder[:idx]
+ after := remainder[idx+len(": executing "):]
+ if len(after) == 0 || after[0] != '"' {
+ return TraceableError{}, false
+ }
+ // find closing quote for function name
+ endQuote := strings.IndexByte(after[1:], '"')
+ if endQuote == -1 {
+ return TraceableError{}, false
+ }
+ endQuote++ // account for offset we started at 1
+ functionName := after[1:endQuote]
+ afterFunc := after[endQuote+1:]
+
+ // expect: " at <" then location then ">: " then message
+ const atPrefix = " at <"
+ if !strings.HasPrefix(afterFunc, atPrefix) {
+ return TraceableError{}, false
+ }
+ afterAt := afterFunc[len(atPrefix):]
+ endLoc := strings.Index(afterAt, ">: ")
+ if endLoc == -1 {
+ return TraceableError{}, false
+ }
+ locationName := afterAt[:endLoc]
+ errMsg := afterAt[endLoc+len(">: "):]
+
+ // trim chained next error starting with space + "template:" if present
+ if cut := strings.Index(errMsg, " template:"); cut != -1 {
+ errMsg = errMsg[:cut]
+ }
+ return TraceableError{
+ location: templateName,
+ message: errMsg,
+ executedFunction: "executing \"" + functionName + "\" at <" + locationName + ">:",
+ }, true
+ }
+ return TraceableError{}, false
+}
+
+// reformatExecErrorMsg takes an error message for template rendering and formats it into a formatted
+// multi-line error string
+func reformatExecErrorMsg(filename string, err error) error {
+ // This function parses the error message produced by text/template package.
+ // If it can parse out details from that error message such as the line number, template it failed on,
+ // and error description, then it will construct a new error that displays these details in a structured way.
+ // If there are issues with parsing the error message, the err passed into the function should return instead.
+ var execError template.ExecError
+ if !errors.As(err, &execError) {
+ return err
+ }
+
+ tokens := strings.SplitN(err.Error(), ": ", 3)
+ if len(tokens) != 3 {
+ // This might happen if a non-templating error occurs
+ return fmt.Errorf("execution error in (%s): %s", filename, err)
+ }
+
+ // The first token is "template"
+ // The second token is either "filename:lineno" or "filename:lineNo:columnNo"
+ location := tokens[1]
+
+ parts := warnRegex.FindStringSubmatch(tokens[2])
+ if len(parts) >= 2 {
+ return fmt.Errorf("execution error at (%s): %s", location, parts[1])
+ }
+ current := err
+ var fileLocations []TraceableError
+ for current != nil {
+ if tr, ok := parseTemplateExecErrorString(current.Error()); ok {
+ if len(fileLocations) == 0 || fileLocations[len(fileLocations)-1] != tr {
+ fileLocations = append(fileLocations, tr)
+ }
+ } else {
+ return err
+ }
+ current = errors.Unwrap(current)
+ }
+
+ var finalErrorString strings.Builder
+ for _, fileLocation := range fileLocations {
+ _, _ = fmt.Fprintf(&finalErrorString, "%s", fileLocation.String())
+ }
+
+ return errors.New(strings.TrimSpace(finalErrorString.String()))
+}
+
+func sortTemplates(tpls map[string]renderable) []string {
+ keys := make([]string, len(tpls))
+ i := 0
+ for key := range tpls {
+ keys[i] = key
+ i++
+ }
+ sort.Sort(sort.Reverse(byPathLen(keys)))
+ return keys
+}
+
+type byPathLen []string
+
+func (p byPathLen) Len() int { return len(p) }
+func (p byPathLen) Swap(i, j int) { p[j], p[i] = p[i], p[j] }
+func (p byPathLen) Less(i, j int) bool {
+ a, b := p[i], p[j]
+ ca, cb := strings.Count(a, "/"), strings.Count(b, "/")
+ if ca == cb {
+ return strings.Compare(a, b) == -1
+ }
+ return ca < cb
+}
+
+// allTemplates returns all templates for a chart and its dependencies.
+//
+// As it goes, it also prepares the values in a scope-sensitive manner.
+func allTemplates(c ci.Charter, vals common.Values) map[string]renderable {
+ templates := make(map[string]renderable)
+ recAllTpls(c, templates, vals)
+ return templates
+}
+
+// recAllTpls recurses through the templates in a chart.
+//
+// As it recurses, it also sets the values to be appropriate for the template
+// scope.
+func recAllTpls(c ci.Charter, templates map[string]renderable, values common.Values) map[string]interface{} {
+ vals := values.AsMap()
+ subCharts := make(map[string]interface{})
+ accessor, err := ci.NewAccessor(c)
+ if err != nil {
+ slog.Error("error accessing chart", "error", err)
+ }
+ chartMetaData := accessor.MetadataAsMap()
+ chartMetaData["IsRoot"] = accessor.IsRoot()
+
+ next := map[string]interface{}{
+ "Chart": chartMetaData,
+ "Files": newFiles(accessor.Files()),
+ "Release": vals["Release"],
+ "Capabilities": vals["Capabilities"],
+ "Values": make(common.Values),
+ "Subcharts": subCharts,
+ }
+
+ // If there is a {{.Values.ThisChart}} in the parent metadata,
+ // copy that into the {{.Values}} for this template.
+ if accessor.IsRoot() {
+ next["Values"] = vals["Values"]
+ } else if vs, err := values.Table("Values." + accessor.Name()); err == nil {
+ next["Values"] = vs
+ }
+
+ for _, child := range accessor.Dependencies() {
+ // TODO: Handle error
+ sub, _ := ci.NewAccessor(child)
+ subCharts[sub.Name()] = recAllTpls(child, templates, next)
+ }
+
+ newParentID := accessor.ChartFullPath()
+ for _, t := range accessor.Templates() {
+ if t == nil {
+ continue
+ }
+ if !isTemplateValid(accessor, t.Name) {
+ continue
+ }
+ templates[path.Join(newParentID, t.Name)] = renderable{
+ tpl: string(t.Data),
+ vals: next,
+ basePath: path.Join(newParentID, "templates"),
+ }
+ }
+
+ return next
+}
+
+// isTemplateValid returns true if the template is valid for the chart type
+func isTemplateValid(accessor ci.Accessor, templateName string) bool {
+ if accessor.IsLibraryChart() {
+ return strings.HasPrefix(filepath.Base(templateName), "_")
+ }
+ return true
+}
diff --git a/helm/pkg/engine/engine_test.go b/helm/pkg/engine/engine_test.go
new file mode 100644
index 000000000..c9cdf79c3
--- /dev/null
+++ b/helm/pkg/engine/engine_test.go
@@ -0,0 +1,1504 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "fmt"
+ "path"
+ "strings"
+ "sync"
+ "testing"
+ "text/template"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/dynamic"
+ "k8s.io/client-go/dynamic/fake"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ "helm.sh/helm/v4/pkg/chart/common/util"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+func TestSortTemplates(t *testing.T) {
+ tpls := map[string]renderable{
+ "/mychart/templates/foo.tpl": {},
+ "/mychart/templates/charts/foo/charts/bar/templates/foo.tpl": {},
+ "/mychart/templates/bar.tpl": {},
+ "/mychart/templates/charts/foo/templates/bar.tpl": {},
+ "/mychart/templates/_foo.tpl": {},
+ "/mychart/templates/charts/foo/templates/foo.tpl": {},
+ "/mychart/templates/charts/bar/templates/foo.tpl": {},
+ }
+ got := sortTemplates(tpls)
+ if len(got) != len(tpls) {
+ t.Fatal("Sorted results are missing templates")
+ }
+
+ expect := []string{
+ "/mychart/templates/charts/foo/charts/bar/templates/foo.tpl",
+ "/mychart/templates/charts/foo/templates/foo.tpl",
+ "/mychart/templates/charts/foo/templates/bar.tpl",
+ "/mychart/templates/charts/bar/templates/foo.tpl",
+ "/mychart/templates/foo.tpl",
+ "/mychart/templates/bar.tpl",
+ "/mychart/templates/_foo.tpl",
+ }
+ for i, e := range expect {
+ if got[i] != e {
+ t.Fatalf("\n\tExp:\n%s\n\tGot:\n%s",
+ strings.Join(expect, "\n"),
+ strings.Join(got, "\n"),
+ )
+ }
+ }
+}
+
+func TestFuncMap(t *testing.T) {
+ fns := funcMap()
+ forbidden := []string{"env", "expandenv"}
+ for _, f := range forbidden {
+ if _, ok := fns[f]; ok {
+ t.Errorf("Forbidden function %s exists in FuncMap.", f)
+ }
+ }
+
+ // Test for Engine-specific template functions.
+ expect := []string{"include", "required", "tpl", "toYaml", "fromYaml", "toToml", "fromToml", "toJson", "fromJson", "lookup"}
+ for _, f := range expect {
+ if _, ok := fns[f]; !ok {
+ t.Errorf("Expected add-on function %q", f)
+ }
+ }
+}
+
+func TestRender(t *testing.T) {
+ modTime := time.Now()
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "moby",
+ Version: "1.2.3",
+ },
+ Templates: []*common.File{
+ {Name: "templates/test1", ModTime: modTime, Data: []byte("{{.Values.outer | title }} {{.Values.inner | title}}")},
+ {Name: "templates/test2", ModTime: modTime, Data: []byte("{{.Values.global.callme | lower }}")},
+ {Name: "templates/test3", ModTime: modTime, Data: []byte("{{.noValue}}")},
+ {Name: "templates/test4", ModTime: modTime, Data: []byte("{{toJson .Values}}")},
+ {Name: "templates/test5", ModTime: modTime, Data: []byte("{{getHostByName \"helm.sh\"}}")},
+ },
+ Values: map[string]interface{}{"outer": "DEFAULT", "inner": "DEFAULT"},
+ }
+
+ vals := map[string]interface{}{
+ "Values": map[string]interface{}{
+ "outer": "spouter",
+ "inner": "inn",
+ "global": map[string]interface{}{
+ "callme": "Ishmael",
+ },
+ },
+ }
+
+ v, err := util.CoalesceValues(c, vals)
+ if err != nil {
+ t.Fatalf("Failed to coalesce values: %s", err)
+ }
+ out, err := Render(c, v)
+ if err != nil {
+ t.Errorf("Failed to render templates: %s", err)
+ }
+
+ expect := map[string]string{
+ "moby/templates/test1": "Spouter Inn",
+ "moby/templates/test2": "ishmael",
+ "moby/templates/test3": "",
+ "moby/templates/test4": `{"global":{"callme":"Ishmael"},"inner":"inn","outer":"spouter"}`,
+ "moby/templates/test5": "",
+ }
+
+ for name, data := range expect {
+ if out[name] != data {
+ t.Errorf("Expected %q, got %q", data, out[name])
+ }
+ }
+}
+
+func TestRenderRefsOrdering(t *testing.T) {
+ modTime := time.Now()
+
+ parentChart := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "parent",
+ Version: "1.2.3",
+ },
+ Templates: []*common.File{
+ {Name: "templates/_helpers.tpl", ModTime: modTime, Data: []byte(`{{- define "test" -}}parent value{{- end -}}`)},
+ {Name: "templates/test.yaml", ModTime: modTime, Data: []byte(`{{ tpl "{{ include \"test\" . }}" . }}`)},
+ },
+ }
+ childChart := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "child",
+ Version: "1.2.3",
+ },
+ Templates: []*common.File{
+ {Name: "templates/_helpers.tpl", ModTime: modTime, Data: []byte(`{{- define "test" -}}child value{{- end -}}`)},
+ },
+ }
+ parentChart.AddDependency(childChart)
+
+ expect := map[string]string{
+ "parent/templates/test.yaml": "parent value",
+ }
+
+ for i := range 100 {
+ out, err := Render(parentChart, common.Values{})
+ if err != nil {
+ t.Fatalf("Failed to render templates: %s", err)
+ }
+
+ for name, data := range expect {
+ if out[name] != data {
+ t.Fatalf("Expected %q, got %q (iteration %d)", data, out[name], i+1)
+ }
+ }
+ }
+}
+
+func TestRenderInternals(t *testing.T) {
+ // Test the internals of the rendering tool.
+
+ vals := common.Values{"Name": "one", "Value": "two"}
+ tpls := map[string]renderable{
+ "one": {tpl: `Hello {{title .Name}}`, vals: vals},
+ "two": {tpl: `Goodbye {{upper .Value}}`, vals: vals},
+ // Test whether a template can reliably reference another template
+ // without regard for ordering.
+ "three": {tpl: `{{template "two" dict "Value" "three"}}`, vals: vals},
+ }
+
+ out, err := new(Engine).render(tpls)
+ if err != nil {
+ t.Fatalf("Failed template rendering: %s", err)
+ }
+
+ if len(out) != 3 {
+ t.Fatalf("Expected 3 templates, got %d", len(out))
+ }
+
+ if out["one"] != "Hello One" {
+ t.Errorf("Expected 'Hello One', got %q", out["one"])
+ }
+
+ if out["two"] != "Goodbye TWO" {
+ t.Errorf("Expected 'Goodbye TWO'. got %q", out["two"])
+ }
+
+ if out["three"] != "Goodbye THREE" {
+ t.Errorf("Expected 'Goodbye THREE'. got %q", out["two"])
+ }
+}
+
+func TestRenderWithDNS(t *testing.T) {
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "moby",
+ Version: "1.2.3",
+ },
+ Templates: []*common.File{
+ {Name: "templates/test1", ModTime: time.Now(), Data: []byte("{{getHostByName \"helm.sh\"}}")},
+ },
+ Values: map[string]interface{}{},
+ }
+
+ vals := map[string]interface{}{
+ "Values": map[string]interface{}{},
+ }
+
+ v, err := util.CoalesceValues(c, vals)
+ if err != nil {
+ t.Fatalf("Failed to coalesce values: %s", err)
+ }
+
+ var e Engine
+ e.EnableDNS = true
+ out, err := e.Render(c, v)
+ if err != nil {
+ t.Errorf("Failed to render templates: %s", err)
+ }
+
+ for _, val := range c.Templates {
+ fp := path.Join("moby", val.Name)
+ if out[fp] == "" {
+ t.Errorf("Expected IP address, got %q", out[fp])
+ }
+ }
+}
+
+type kindProps struct {
+ shouldErr error
+ gvr schema.GroupVersionResource
+ namespaced bool
+}
+
+type testClientProvider struct {
+ t *testing.T
+ scheme map[string]kindProps
+ objects []runtime.Object
+}
+
+func (p *testClientProvider) GetClientFor(apiVersion, kind string) (dynamic.NamespaceableResourceInterface, bool, error) {
+ props := p.scheme[path.Join(apiVersion, kind)]
+ if props.shouldErr != nil {
+ return nil, false, props.shouldErr
+ }
+ return fake.NewSimpleDynamicClient(runtime.NewScheme(), p.objects...).Resource(props.gvr), props.namespaced, nil
+}
+
+var _ ClientProvider = &testClientProvider{}
+
+// makeUnstructured is a convenience function for single-line creation of Unstructured objects.
+func makeUnstructured(apiVersion, kind, name, namespace string) *unstructured.Unstructured {
+ ret := &unstructured.Unstructured{Object: map[string]interface{}{
+ "apiVersion": apiVersion,
+ "kind": kind,
+ "metadata": map[string]interface{}{
+ "name": name,
+ },
+ }}
+ if namespace != "" {
+ ret.Object["metadata"].(map[string]interface{})["namespace"] = namespace
+ }
+ return ret
+}
+
+func TestRenderWithClientProvider(t *testing.T) {
+ provider := &testClientProvider{
+ t: t,
+ scheme: map[string]kindProps{
+ "v1/Namespace": {
+ gvr: schema.GroupVersionResource{
+ Version: "v1",
+ Resource: "namespaces",
+ },
+ },
+ "v1/Pod": {
+ gvr: schema.GroupVersionResource{
+ Version: "v1",
+ Resource: "pods",
+ },
+ namespaced: true,
+ },
+ },
+ objects: []runtime.Object{
+ makeUnstructured("v1", "Namespace", "default", ""),
+ makeUnstructured("v1", "Pod", "pod1", "default"),
+ makeUnstructured("v1", "Pod", "pod2", "ns1"),
+ makeUnstructured("v1", "Pod", "pod3", "ns1"),
+ },
+ }
+
+ type testCase struct {
+ template string
+ output string
+ }
+ cases := map[string]testCase{
+ "ns-single": {
+ template: `{{ (lookup "v1" "Namespace" "" "default").metadata.name }}`,
+ output: "default",
+ },
+ "ns-list": {
+ template: `{{ (lookup "v1" "Namespace" "" "").items | len }}`,
+ output: "1",
+ },
+ "ns-missing": {
+ template: `{{ (lookup "v1" "Namespace" "" "absent") }}`,
+ output: "map[]",
+ },
+ "pod-single": {
+ template: `{{ (lookup "v1" "Pod" "default" "pod1").metadata.name }}`,
+ output: "pod1",
+ },
+ "pod-list": {
+ template: `{{ (lookup "v1" "Pod" "ns1" "").items | len }}`,
+ output: "2",
+ },
+ "pod-all": {
+ template: `{{ (lookup "v1" "Pod" "" "").items | len }}`,
+ output: "3",
+ },
+ "pod-missing": {
+ template: `{{ (lookup "v1" "Pod" "" "ns2") }}`,
+ output: "map[]",
+ },
+ }
+
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "moby",
+ Version: "1.2.3",
+ },
+ Values: map[string]interface{}{},
+ }
+
+ modTime := time.Now()
+ for name, exp := range cases {
+ c.Templates = append(c.Templates, &common.File{
+ Name: path.Join("templates", name),
+ ModTime: modTime,
+ Data: []byte(exp.template),
+ })
+ }
+
+ vals := map[string]interface{}{
+ "Values": map[string]interface{}{},
+ }
+
+ v, err := util.CoalesceValues(c, vals)
+ if err != nil {
+ t.Fatalf("Failed to coalesce values: %s", err)
+ }
+
+ out, err := RenderWithClientProvider(c, v, provider)
+ if err != nil {
+ t.Errorf("Failed to render templates: %s", err)
+ }
+
+ for name, want := range cases {
+ t.Run(name, func(t *testing.T) {
+ key := path.Join("moby/templates", name)
+ if out[key] != want.output {
+ t.Errorf("Expected %q, got %q", want, out[key])
+ }
+ })
+ }
+}
+
+func TestRenderWithClientProvider_error(t *testing.T) {
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "moby",
+ Version: "1.2.3",
+ },
+ Templates: []*common.File{
+ {Name: "templates/error", ModTime: time.Now(), Data: []byte(`{{ lookup "v1" "Error" "" "" }}`)},
+ },
+ Values: map[string]interface{}{},
+ }
+
+ vals := map[string]interface{}{
+ "Values": map[string]interface{}{},
+ }
+
+ v, err := util.CoalesceValues(c, vals)
+ if err != nil {
+ t.Fatalf("Failed to coalesce values: %s", err)
+ }
+
+ provider := &testClientProvider{
+ t: t,
+ scheme: map[string]kindProps{
+ "v1/Error": {
+ shouldErr: fmt.Errorf("kaboom"),
+ },
+ },
+ }
+ _, err = RenderWithClientProvider(c, v, provider)
+ if err == nil || !strings.Contains(err.Error(), "kaboom") {
+ t.Errorf("Expected error from client provider when rendering, got %q", err)
+ }
+}
+
+func TestParallelRenderInternals(t *testing.T) {
+ // Make sure that we can use one Engine to run parallel template renders.
+ e := new(Engine)
+ var wg sync.WaitGroup
+ for i := range 20 {
+ wg.Add(1)
+ go func(i int) {
+ tt := fmt.Sprintf("expect-%d", i)
+ tpls := map[string]renderable{
+ "t": {
+ tpl: `{{.val}}`,
+ vals: map[string]interface{}{"val": tt},
+ },
+ }
+ out, err := e.render(tpls)
+ if err != nil {
+ t.Errorf("Failed to render %s: %s", tt, err)
+ }
+ if out["t"] != tt {
+ t.Errorf("Expected %q, got %q", tt, out["t"])
+ }
+ wg.Done()
+ }(i)
+ }
+ wg.Wait()
+}
+
+func TestParseErrors(t *testing.T) {
+ vals := common.Values{"Values": map[string]interface{}{}}
+
+ tplsUndefinedFunction := map[string]renderable{
+ "undefined_function": {tpl: `{{foo}}`, vals: vals},
+ }
+ _, err := new(Engine).render(tplsUndefinedFunction)
+ if err == nil {
+ t.Fatalf("Expected failures while rendering: %s", err)
+ }
+ expected := `parse error at (undefined_function:1): function "foo" not defined`
+ if err.Error() != expected {
+ t.Errorf("Expected '%s', got %q", expected, err.Error())
+ }
+}
+
+func TestExecErrors(t *testing.T) {
+ vals := common.Values{"Values": map[string]interface{}{}}
+ cases := []struct {
+ name string
+ tpls map[string]renderable
+ expected string
+ }{
+ {
+ name: "MissingRequired",
+ tpls: map[string]renderable{
+ "missing_required": {tpl: `{{required "foo is required" .Values.foo}}`, vals: vals},
+ },
+ expected: `execution error at (missing_required:1:2): foo is required`,
+ },
+ {
+ name: "MissingRequiredWithColons",
+ tpls: map[string]renderable{
+ "missing_required_with_colons": {tpl: `{{required ":this: message: has many: colons:" .Values.foo}}`, vals: vals},
+ },
+ expected: `execution error at (missing_required_with_colons:1:2): :this: message: has many: colons:`,
+ },
+ {
+ name: "Issue6044",
+ tpls: map[string]renderable{
+ "issue6044": {
+ vals: vals,
+ tpl: `{{ $someEmptyValue := "" }}
+{{ $myvar := "abc" }}
+{{- required (printf "%s: something is missing" $myvar) $someEmptyValue | repeat 0 }}`,
+ },
+ },
+ expected: `execution error at (issue6044:3:4): abc: something is missing`,
+ },
+ {
+ name: "MissingRequiredWithNewlines",
+ tpls: map[string]renderable{
+ "issue9981": {tpl: `{{required "foo is required\nmore info after the break" .Values.foo}}`, vals: vals},
+ },
+ expected: `execution error at (issue9981:1:2): foo is required
+more info after the break`,
+ },
+ {
+ name: "FailWithNewlines",
+ tpls: map[string]renderable{
+ "issue9981": {tpl: `{{fail "something is wrong\nlinebreak"}}`, vals: vals},
+ },
+ expected: `execution error at (issue9981:1:2): something is wrong
+linebreak`,
+ },
+ }
+
+ for _, tt := range cases {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := new(Engine).render(tt.tpls)
+ if err == nil {
+ t.Fatalf("Expected failures while rendering: %s", err)
+ }
+ if err.Error() != tt.expected {
+ t.Errorf("Expected %q, got %q", tt.expected, err.Error())
+ }
+ })
+ }
+}
+
+func TestFailErrors(t *testing.T) {
+ vals := common.Values{"Values": map[string]interface{}{}}
+
+ failtpl := `All your base are belong to us{{ fail "This is an error" }}`
+ tplsFailed := map[string]renderable{
+ "failtpl": {tpl: failtpl, vals: vals},
+ }
+ _, err := new(Engine).render(tplsFailed)
+ if err == nil {
+ t.Fatalf("Expected failures while rendering: %s", err)
+ }
+ expected := `execution error at (failtpl:1:33): This is an error`
+ if err.Error() != expected {
+ t.Errorf("Expected '%s', got %q", expected, err.Error())
+ }
+
+ var e Engine
+ e.LintMode = true
+ out, err := e.render(tplsFailed)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectStr := "All your base are belong to us"
+ if gotStr := out["failtpl"]; gotStr != expectStr {
+ t.Errorf("Expected %q, got %q (%v)", expectStr, gotStr, out)
+ }
+}
+
+func TestAllTemplates(t *testing.T) {
+ modTime := time.Now()
+ ch1 := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "ch1"},
+ Templates: []*common.File{
+ {Name: "templates/foo", ModTime: modTime, Data: []byte("foo")},
+ {Name: "templates/bar", ModTime: modTime, Data: []byte("bar")},
+ },
+ }
+ dep1 := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "laboratory mice"},
+ Templates: []*common.File{
+ {Name: "templates/pinky", ModTime: modTime, Data: []byte("pinky")},
+ {Name: "templates/brain", ModTime: modTime, Data: []byte("brain")},
+ },
+ }
+ ch1.AddDependency(dep1)
+
+ dep2 := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "same thing we do every night"},
+ Templates: []*common.File{
+ {Name: "templates/innermost", ModTime: modTime, Data: []byte("innermost")},
+ },
+ }
+ dep1.AddDependency(dep2)
+
+ tpls := allTemplates(ch1, common.Values{})
+ if len(tpls) != 5 {
+ t.Errorf("Expected 5 charts, got %d", len(tpls))
+ }
+}
+
+func TestChartValuesContainsIsRoot(t *testing.T) {
+ modTime := time.Now()
+ ch1 := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "parent"},
+ Templates: []*common.File{
+ {Name: "templates/isroot", ModTime: modTime, Data: []byte("{{.Chart.IsRoot}}")},
+ },
+ }
+ dep1 := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "child"},
+ Templates: []*common.File{
+ {Name: "templates/isroot", ModTime: modTime, Data: []byte("{{.Chart.IsRoot}}")},
+ },
+ }
+ ch1.AddDependency(dep1)
+
+ out, err := Render(ch1, common.Values{})
+ if err != nil {
+ t.Fatalf("failed to render templates: %s", err)
+ }
+ expects := map[string]string{
+ "parent/charts/child/templates/isroot": "false",
+ "parent/templates/isroot": "true",
+ }
+ for file, expect := range expects {
+ if out[file] != expect {
+ t.Errorf("Expected %q, got %q", expect, out[file])
+ }
+ }
+}
+
+func TestRenderDependency(t *testing.T) {
+ deptpl := `{{define "myblock"}}World{{end}}`
+ toptpl := `Hello {{template "myblock"}}`
+ modTime := time.Now()
+ ch := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "outerchart"},
+ Templates: []*common.File{
+ {Name: "templates/outer", ModTime: modTime, Data: []byte(toptpl)},
+ },
+ }
+ ch.AddDependency(&chart.Chart{
+ Metadata: &chart.Metadata{Name: "innerchart"},
+ Templates: []*common.File{
+ {Name: "templates/inner", ModTime: modTime, Data: []byte(deptpl)},
+ },
+ })
+
+ out, err := Render(ch, map[string]interface{}{})
+ if err != nil {
+ t.Fatalf("failed to render chart: %s", err)
+ }
+
+ if len(out) != 2 {
+ t.Errorf("Expected 2, got %d", len(out))
+ }
+
+ expect := "Hello World"
+ if out["outerchart/templates/outer"] != expect {
+ t.Errorf("Expected %q, got %q", expect, out["outer"])
+ }
+
+}
+
+func TestRenderNestedValues(t *testing.T) {
+ innerpath := "templates/inner.tpl"
+ outerpath := "templates/outer.tpl"
+ // Ensure namespacing rules are working.
+ deepestpath := "templates/inner.tpl"
+ checkrelease := "templates/release.tpl"
+ // Ensure subcharts scopes are working.
+ subchartspath := "templates/subcharts.tpl"
+
+ modTime := time.Now()
+ deepest := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "deepest"},
+ Templates: []*common.File{
+ {Name: deepestpath, ModTime: modTime, Data: []byte(`And this same {{.Values.what}} that smiles {{.Values.global.when}}`)},
+ {Name: checkrelease, ModTime: modTime, Data: []byte(`Tomorrow will be {{default "happy" .Release.Name }}`)},
+ },
+ Values: map[string]interface{}{"what": "milkshake", "where": "here"},
+ }
+
+ inner := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "herrick"},
+ Templates: []*common.File{
+ {Name: innerpath, ModTime: modTime, Data: []byte(`Old {{.Values.who}} is still a-flyin'`)},
+ },
+ Values: map[string]interface{}{"who": "Robert", "what": "glasses"},
+ }
+ inner.AddDependency(deepest)
+
+ outer := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "top"},
+ Templates: []*common.File{
+ {Name: outerpath, ModTime: modTime, Data: []byte(`Gather ye {{.Values.what}} while ye may`)},
+ {Name: subchartspath, ModTime: modTime, Data: []byte(`The glorious Lamp of {{.Subcharts.herrick.Subcharts.deepest.Values.where}}, the {{.Subcharts.herrick.Values.what}}`)},
+ },
+ Values: map[string]interface{}{
+ "what": "stinkweed",
+ "who": "me",
+ "herrick": map[string]interface{}{
+ "who": "time",
+ "what": "Sun",
+ },
+ },
+ }
+ outer.AddDependency(inner)
+
+ injValues := map[string]interface{}{
+ "what": "rosebuds",
+ "herrick": map[string]interface{}{
+ "deepest": map[string]interface{}{
+ "what": "flower",
+ "where": "Heaven",
+ },
+ },
+ "global": map[string]interface{}{
+ "when": "to-day",
+ },
+ }
+
+ tmp, err := util.CoalesceValues(outer, injValues)
+ if err != nil {
+ t.Fatalf("Failed to coalesce values: %s", err)
+ }
+
+ inject := common.Values{
+ "Values": tmp,
+ "Chart": outer.Metadata,
+ "Release": common.Values{
+ "Name": "dyin",
+ },
+ }
+
+ t.Logf("Calculated values: %v", inject)
+
+ out, err := Render(outer, inject)
+ if err != nil {
+ t.Fatalf("failed to render templates: %s", err)
+ }
+
+ fullouterpath := "top/" + outerpath
+ if out[fullouterpath] != "Gather ye rosebuds while ye may" {
+ t.Errorf("Unexpected outer: %q", out[fullouterpath])
+ }
+
+ fullinnerpath := "top/charts/herrick/" + innerpath
+ if out[fullinnerpath] != "Old time is still a-flyin'" {
+ t.Errorf("Unexpected inner: %q", out[fullinnerpath])
+ }
+
+ fulldeepestpath := "top/charts/herrick/charts/deepest/" + deepestpath
+ if out[fulldeepestpath] != "And this same flower that smiles to-day" {
+ t.Errorf("Unexpected deepest: %q", out[fulldeepestpath])
+ }
+
+ fullcheckrelease := "top/charts/herrick/charts/deepest/" + checkrelease
+ if out[fullcheckrelease] != "Tomorrow will be dyin" {
+ t.Errorf("Unexpected release: %q", out[fullcheckrelease])
+ }
+
+ fullchecksubcharts := "top/" + subchartspath
+ if out[fullchecksubcharts] != "The glorious Lamp of Heaven, the Sun" {
+ t.Errorf("Unexpected subcharts: %q", out[fullchecksubcharts])
+ }
+}
+
+func TestRenderBuiltinValues(t *testing.T) {
+ modTime := time.Now()
+ inner := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "Latium", APIVersion: chart.APIVersionV2},
+ Templates: []*common.File{
+ {Name: "templates/Lavinia", ModTime: modTime, Data: []byte(`{{.Template.Name}}{{.Chart.Name}}{{.Release.Name}}`)},
+ {Name: "templates/From", ModTime: modTime, Data: []byte(`{{.Files.author | printf "%s"}} {{.Files.Get "book/title.txt"}}`)},
+ },
+ Files: []*common.File{
+ {Name: "author", ModTime: modTime, Data: []byte("Virgil")},
+ {Name: "book/title.txt", ModTime: modTime, Data: []byte("Aeneid")},
+ },
+ }
+
+ outer := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "Troy", APIVersion: chart.APIVersionV2},
+ Templates: []*common.File{
+ {Name: "templates/Aeneas", ModTime: modTime, Data: []byte(`{{.Template.Name}}{{.Chart.Name}}{{.Release.Name}}`)},
+ {Name: "templates/Amata", ModTime: modTime, Data: []byte(`{{.Subcharts.Latium.Chart.Name}} {{.Subcharts.Latium.Files.author | printf "%s"}}`)},
+ },
+ }
+ outer.AddDependency(inner)
+
+ inject := common.Values{
+ "Values": "",
+ "Chart": outer.Metadata,
+ "Release": common.Values{
+ "Name": "Aeneid",
+ },
+ }
+
+ t.Logf("Calculated values: %v", outer)
+
+ out, err := Render(outer, inject)
+ if err != nil {
+ t.Fatalf("failed to render templates: %s", err)
+ }
+
+ expects := map[string]string{
+ "Troy/charts/Latium/templates/Lavinia": "Troy/charts/Latium/templates/LaviniaLatiumAeneid",
+ "Troy/templates/Aeneas": "Troy/templates/AeneasTroyAeneid",
+ "Troy/templates/Amata": "Latium Virgil",
+ "Troy/charts/Latium/templates/From": "Virgil Aeneid",
+ }
+ for file, expect := range expects {
+ if out[file] != expect {
+ t.Errorf("Expected %q, got %q", expect, out[file])
+ }
+ }
+
+}
+
+func TestAlterFuncMap_include(t *testing.T) {
+ modTime := time.Now()
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "conrad"},
+ Templates: []*common.File{
+ {Name: "templates/quote", ModTime: modTime, Data: []byte(`{{include "conrad/templates/_partial" . | indent 2}} dead.`)},
+ {Name: "templates/_partial", ModTime: modTime, Data: []byte(`{{.Release.Name}} - he`)},
+ },
+ }
+
+ // Check nested reference in include FuncMap
+ d := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "nested"},
+ Templates: []*common.File{
+ {Name: "templates/quote", ModTime: modTime, Data: []byte(`{{include "nested/templates/quote" . | indent 2}} dead.`)},
+ {Name: "templates/_partial", ModTime: modTime, Data: []byte(`{{.Release.Name}} - he`)},
+ },
+ }
+
+ v := common.Values{
+ "Values": "",
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "Mistah Kurtz",
+ },
+ }
+
+ out, err := Render(c, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expect := " Mistah Kurtz - he dead."
+ if got := out["conrad/templates/quote"]; got != expect {
+ t.Errorf("Expected %q, got %q (%v)", expect, got, out)
+ }
+
+ _, err = Render(d, v)
+ expectErrName := "nested/templates/quote"
+ if err == nil {
+ t.Errorf("Expected err of nested reference name: %v", expectErrName)
+ }
+}
+
+func TestAlterFuncMap_require(t *testing.T) {
+ modTime := time.Now()
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "conan"},
+ Templates: []*common.File{
+ {Name: "templates/quote", ModTime: modTime, Data: []byte(`All your base are belong to {{ required "A valid 'who' is required" .Values.who }}`)},
+ {Name: "templates/bases", ModTime: modTime, Data: []byte(`All {{ required "A valid 'bases' is required" .Values.bases }} of them!`)},
+ },
+ }
+
+ v := common.Values{
+ "Values": common.Values{
+ "who": "us",
+ "bases": 2,
+ },
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "That 90s meme",
+ },
+ }
+
+ out, err := Render(c, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectStr := "All your base are belong to us"
+ if gotStr := out["conan/templates/quote"]; gotStr != expectStr {
+ t.Errorf("Expected %q, got %q (%v)", expectStr, gotStr, out)
+ }
+ expectNum := "All 2 of them!"
+ if gotNum := out["conan/templates/bases"]; gotNum != expectNum {
+ t.Errorf("Expected %q, got %q (%v)", expectNum, gotNum, out)
+ }
+
+ // test required without passing in needed values with lint mode on
+ // verifies lint replaces required with an empty string (should not fail)
+ lintValues := common.Values{
+ "Values": common.Values{
+ "who": "us",
+ },
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "That 90s meme",
+ },
+ }
+ var e Engine
+ e.LintMode = true
+ out, err = e.Render(c, lintValues)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectStr = "All your base are belong to us"
+ if gotStr := out["conan/templates/quote"]; gotStr != expectStr {
+ t.Errorf("Expected %q, got %q (%v)", expectStr, gotStr, out)
+ }
+ expectNum = "All of them!"
+ if gotNum := out["conan/templates/bases"]; gotNum != expectNum {
+ t.Errorf("Expected %q, got %q (%v)", expectNum, gotNum, out)
+ }
+}
+
+func TestAlterFuncMap_tpl(t *testing.T) {
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "TplFunction"},
+ Templates: []*common.File{
+ {Name: "templates/base", ModTime: time.Now(), Data: []byte(`Evaluate tpl {{tpl "Value: {{ .Values.value}}" .}}`)},
+ },
+ }
+
+ v := common.Values{
+ "Values": common.Values{
+ "value": "myvalue",
+ },
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+
+ out, err := Render(c, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expect := "Evaluate tpl Value: myvalue"
+ if got := out["TplFunction/templates/base"]; got != expect {
+ t.Errorf("Expected %q, got %q (%v)", expect, got, out)
+ }
+}
+
+func TestAlterFuncMap_tplfunc(t *testing.T) {
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "TplFunction"},
+ Templates: []*common.File{
+ {Name: "templates/base", ModTime: time.Now(), Data: []byte(`Evaluate tpl {{tpl "Value: {{ .Values.value | quote}}" .}}`)},
+ },
+ }
+
+ v := common.Values{
+ "Values": common.Values{
+ "value": "myvalue",
+ },
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+
+ out, err := Render(c, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expect := "Evaluate tpl Value: \"myvalue\""
+ if got := out["TplFunction/templates/base"]; got != expect {
+ t.Errorf("Expected %q, got %q (%v)", expect, got, out)
+ }
+}
+
+func TestAlterFuncMap_tplinclude(t *testing.T) {
+ modTime := time.Now()
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "TplFunction"},
+ Templates: []*common.File{
+ {Name: "templates/base", ModTime: modTime, Data: []byte(`{{ tpl "{{include ` + "`" + `TplFunction/templates/_partial` + "`" + ` . | quote }}" .}}`)},
+ {Name: "templates/_partial", ModTime: modTime, Data: []byte(`{{.Template.Name}}`)},
+ },
+ }
+ v := common.Values{
+ "Values": common.Values{
+ "value": "myvalue",
+ },
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+
+ out, err := Render(c, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expect := "\"TplFunction/templates/base\""
+ if got := out["TplFunction/templates/base"]; got != expect {
+ t.Errorf("Expected %q, got %q (%v)", expect, got, out)
+ }
+
+}
+
+func TestRenderRecursionLimit(t *testing.T) {
+ modTime := time.Now()
+
+ // endless recursion should produce an error
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "bad"},
+ Templates: []*common.File{
+ {Name: "templates/base", ModTime: modTime, Data: []byte(`{{include "recursion" . }}`)},
+ {Name: "templates/recursion", ModTime: modTime, Data: []byte(`{{define "recursion"}}{{include "recursion" . }}{{end}}`)},
+ },
+ }
+ v := common.Values{
+ "Values": "",
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+ expectErr := "rendering template has a nested reference name: recursion: unable to execute template"
+
+ _, err := Render(c, v)
+ if err == nil || !strings.HasSuffix(err.Error(), expectErr) {
+ t.Errorf("Expected err with suffix: %s", expectErr)
+ }
+
+ // calling the same function many times is ok
+ times := 4000
+ phrase := "All work and no play makes Jack a dull boy"
+ printFunc := `{{define "overlook"}}{{printf "` + phrase + `\n"}}{{end}}`
+ var repeatedIncl strings.Builder
+ for range times {
+ repeatedIncl.WriteString(`{{include "overlook" . }}`)
+ }
+
+ d := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "overlook"},
+ Templates: []*common.File{
+ {Name: "templates/quote", ModTime: modTime, Data: []byte(repeatedIncl.String())},
+ {Name: "templates/_function", ModTime: modTime, Data: []byte(printFunc)},
+ },
+ }
+
+ out, err := Render(d, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var expect string
+ for range times {
+ expect += phrase + "\n"
+ }
+ if got := out["overlook/templates/quote"]; got != expect {
+ t.Errorf("Expected %q, got %q (%v)", expect, got, out)
+ }
+
+}
+
+func TestRenderLoadTemplateForTplFromFile(t *testing.T) {
+ modTime := time.Now()
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "TplLoadFromFile"},
+ Templates: []*common.File{
+ {Name: "templates/base", ModTime: modTime, Data: []byte(`{{ tpl (.Files.Get .Values.filename) . }}`)},
+ {Name: "templates/_function", ModTime: modTime, Data: []byte(`{{define "test-function"}}test-function{{end}}`)},
+ },
+ Files: []*common.File{
+ {Name: "test", ModTime: modTime, Data: []byte(`{{ tpl (.Files.Get .Values.filename2) .}}`)},
+ {Name: "test2", ModTime: modTime, Data: []byte(`{{include "test-function" .}}{{define "nested-define"}}nested-define-content{{end}} {{include "nested-define" .}}`)},
+ },
+ }
+
+ v := common.Values{
+ "Values": common.Values{
+ "filename": "test",
+ "filename2": "test2",
+ },
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+
+ out, err := Render(c, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expect := "test-function nested-define-content"
+ if got := out["TplLoadFromFile/templates/base"]; got != expect {
+ t.Fatalf("Expected %q, got %q", expect, got)
+ }
+}
+
+func TestRenderTplEmpty(t *testing.T) {
+ modTime := time.Now()
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "TplEmpty"},
+ Templates: []*common.File{
+ {Name: "templates/empty-string", ModTime: modTime, Data: []byte(`{{tpl "" .}}`)},
+ {Name: "templates/empty-action", ModTime: modTime, Data: []byte(`{{tpl "{{ \"\"}}" .}}`)},
+ {Name: "templates/only-defines", ModTime: modTime, Data: []byte(`{{tpl "{{define \"not-invoked\"}}not-rendered{{end}}" .}}`)},
+ },
+ }
+ v := common.Values{
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+
+ out, err := Render(c, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expects := map[string]string{
+ "TplEmpty/templates/empty-string": "",
+ "TplEmpty/templates/empty-action": "",
+ "TplEmpty/templates/only-defines": "",
+ }
+ for file, expect := range expects {
+ if out[file] != expect {
+ t.Errorf("Expected %q, got %q", expect, out[file])
+ }
+ }
+}
+
+func TestRenderTplTemplateNames(t *testing.T) {
+ modTime := time.Now()
+ // .Template.BasePath and .Name make it through
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "TplTemplateNames"},
+ Templates: []*common.File{
+ {Name: "templates/default-basepath", ModTime: modTime, Data: []byte(`{{tpl "{{ .Template.BasePath }}" .}}`)},
+ {Name: "templates/default-name", ModTime: modTime, Data: []byte(`{{tpl "{{ .Template.Name }}" .}}`)},
+ {Name: "templates/modified-basepath", ModTime: modTime, Data: []byte(`{{tpl "{{ .Template.BasePath }}" .Values.dot}}`)},
+ {Name: "templates/modified-name", ModTime: modTime, Data: []byte(`{{tpl "{{ .Template.Name }}" .Values.dot}}`)},
+ {Name: "templates/modified-field", ModTime: modTime, Data: []byte(`{{tpl "{{ .Template.Field }}" .Values.dot}}`)},
+ },
+ }
+ v := common.Values{
+ "Values": common.Values{
+ "dot": common.Values{
+ "Template": common.Values{
+ "BasePath": "path/to/template",
+ "Name": "name-of-template",
+ "Field": "extra-field",
+ },
+ },
+ },
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+
+ out, err := Render(c, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expects := map[string]string{
+ "TplTemplateNames/templates/default-basepath": "TplTemplateNames/templates",
+ "TplTemplateNames/templates/default-name": "TplTemplateNames/templates/default-name",
+ "TplTemplateNames/templates/modified-basepath": "path/to/template",
+ "TplTemplateNames/templates/modified-name": "name-of-template",
+ "TplTemplateNames/templates/modified-field": "extra-field",
+ }
+ for file, expect := range expects {
+ if out[file] != expect {
+ t.Errorf("Expected %q, got %q", expect, out[file])
+ }
+ }
+}
+
+func TestRenderTplRedefines(t *testing.T) {
+ modTime := time.Now()
+ // Redefining a template inside 'tpl' does not affect the outer definition
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "TplRedefines"},
+ Templates: []*common.File{
+ {Name: "templates/_partials", ModTime: modTime, Data: []byte(`{{define "partial"}}original-in-partial{{end}}`)},
+ {Name: "templates/partial", ModTime: modTime, Data: []byte(
+ `before: {{include "partial" .}}\n{{tpl .Values.partialText .}}\nafter: {{include "partial" .}}`,
+ )},
+ {Name: "templates/manifest", Data: []byte(
+ `{{define "manifest"}}original-in-manifest{{end}}` +
+ `before: {{include "manifest" .}}\n{{tpl .Values.manifestText .}}\nafter: {{include "manifest" .}}`,
+ )},
+ {Name: "templates/manifest-only", Data: []byte(
+ `{{define "manifest-only"}}only-in-manifest{{end}}` +
+ `before: {{include "manifest-only" .}}\n{{tpl .Values.manifestOnlyText .}}\nafter: {{include "manifest-only" .}}`,
+ )},
+ {Name: "templates/nested", Data: []byte(
+ `{{define "nested"}}original-in-manifest{{end}}` +
+ `{{define "nested-outer"}}original-outer-in-manifest{{end}}` +
+ `before: {{include "nested" .}} {{include "nested-outer" .}}\n` +
+ `{{tpl .Values.nestedText .}}\n` +
+ `after: {{include "nested" .}} {{include "nested-outer" .}}`,
+ )},
+ },
+ }
+ v := common.Values{
+ "Values": common.Values{
+ "partialText": `{{define "partial"}}redefined-in-tpl{{end}}tpl: {{include "partial" .}}`,
+ "manifestText": `{{define "manifest"}}redefined-in-tpl{{end}}tpl: {{include "manifest" .}}`,
+ "manifestOnlyText": `tpl: {{include "manifest-only" .}}`,
+ "nestedText": `{{define "nested"}}redefined-in-tpl{{end}}` +
+ `{{define "nested-outer"}}redefined-outer-in-tpl{{end}}` +
+ `before-inner-tpl: {{include "nested" .}} {{include "nested-outer" . }}\n` +
+ `{{tpl .Values.innerText .}}\n` +
+ `after-inner-tpl: {{include "nested" .}} {{include "nested-outer" . }}`,
+ "innerText": `{{define "nested"}}redefined-in-inner-tpl{{end}}inner-tpl: {{include "nested" .}} {{include "nested-outer" . }}`,
+ },
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+
+ out, err := Render(c, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expects := map[string]string{
+ "TplRedefines/templates/partial": `before: original-in-partial\ntpl: redefined-in-tpl\nafter: original-in-partial`,
+ "TplRedefines/templates/manifest": `before: original-in-manifest\ntpl: redefined-in-tpl\nafter: original-in-manifest`,
+ "TplRedefines/templates/manifest-only": `before: only-in-manifest\ntpl: only-in-manifest\nafter: only-in-manifest`,
+ "TplRedefines/templates/nested": `before: original-in-manifest original-outer-in-manifest\n` +
+ `before-inner-tpl: redefined-in-tpl redefined-outer-in-tpl\n` +
+ `inner-tpl: redefined-in-inner-tpl redefined-outer-in-tpl\n` +
+ `after-inner-tpl: redefined-in-tpl redefined-outer-in-tpl\n` +
+ `after: original-in-manifest original-outer-in-manifest`,
+ }
+ for file, expect := range expects {
+ if out[file] != expect {
+ t.Errorf("Expected %q, got %q", expect, out[file])
+ }
+ }
+}
+
+func TestRenderTplMissingKey(t *testing.T) {
+ // Rendering a missing key results in empty/zero output.
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "TplMissingKey"},
+ Templates: []*common.File{
+ {Name: "templates/manifest", ModTime: time.Now(), Data: []byte(
+ `missingValue: {{tpl "{{.Values.noSuchKey}}" .}}`,
+ )},
+ },
+ }
+ v := common.Values{
+ "Values": common.Values{},
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+
+ out, err := Render(c, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expects := map[string]string{
+ "TplMissingKey/templates/manifest": `missingValue: `,
+ }
+ for file, expect := range expects {
+ if out[file] != expect {
+ t.Errorf("Expected %q, got %q", expect, out[file])
+ }
+ }
+}
+
+func TestRenderTplMissingKeyString(t *testing.T) {
+ // Rendering a missing key results in error
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "TplMissingKeyStrict"},
+ Templates: []*common.File{
+ {Name: "templates/manifest", ModTime: time.Now(), Data: []byte(
+ `missingValue: {{tpl "{{.Values.noSuchKey}}" .}}`,
+ )},
+ },
+ }
+ v := common.Values{
+ "Values": common.Values{},
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+
+ e := new(Engine)
+ e.Strict = true
+
+ out, err := e.Render(c, v)
+ if err == nil {
+ t.Errorf("Expected error, got %v", out)
+ return
+ }
+ errTxt := fmt.Sprint(err)
+ if !strings.Contains(errTxt, "noSuchKey") {
+ t.Errorf("Expected error to contain 'noSuchKey', got %s", errTxt)
+ }
+
+}
+
+func TestNestedHelpersProducesMultilineStacktrace(t *testing.T) {
+ modTime := time.Now()
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "NestedHelperFunctions"},
+ Templates: []*common.File{
+ {Name: "templates/svc.yaml", ModTime: modTime, Data: []byte(
+ `name: {{ include "nested_helper.name" . }}`,
+ )},
+ {Name: "templates/_helpers_1.tpl", ModTime: modTime, Data: []byte(
+ `{{- define "nested_helper.name" -}}{{- include "common.names.get_name" . -}}{{- end -}}`,
+ )},
+ {Name: "charts/common/templates/_helpers_2.tpl", ModTime: modTime, Data: []byte(
+ `{{- define "common.names.get_name" -}}{{- .Values.nonexistant.key | trunc 63 | trimSuffix "-" -}}{{- end -}}`,
+ )},
+ },
+ }
+
+ expectedErrorMessage := `NestedHelperFunctions/templates/svc.yaml:1:9
+ executing "NestedHelperFunctions/templates/svc.yaml" at :
+ error calling include:
+NestedHelperFunctions/templates/_helpers_1.tpl:1:39
+ executing "nested_helper.name" at :
+ error calling include:
+NestedHelperFunctions/charts/common/templates/_helpers_2.tpl:1:49
+ executing "common.names.get_name" at <.Values.nonexistant.key>:
+ nil pointer evaluating interface {}.key`
+
+ v := common.Values{}
+
+ val, _ := util.CoalesceValues(c, v)
+ vals := map[string]interface{}{
+ "Values": val.AsMap(),
+ }
+ _, err := Render(c, vals)
+
+ assert.NotNil(t, err)
+ assert.Equal(t, expectedErrorMessage, err.Error())
+}
+
+func TestMultilineNoTemplateAssociatedError(t *testing.T) {
+ modTime := time.Now()
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "multiline"},
+ Templates: []*common.File{
+ {Name: "templates/svc.yaml", ModTime: modTime, Data: []byte(
+ `name: {{ include "nested_helper.name" . }}`,
+ )},
+ {Name: "templates/test.yaml", ModTime: modTime, Data: []byte(
+ `{{ toYaml .Values }}`,
+ )},
+ {Name: "charts/common/templates/_helpers_2.tpl", ModTime: modTime, Data: []byte(
+ `{{ toYaml .Values }}`,
+ )},
+ },
+ }
+
+ expectedErrorMessage := `multiline/templates/svc.yaml:1:9
+ executing "multiline/templates/svc.yaml" at :
+ error calling include:
+template: no template "nested_helper.name" associated with template "gotpl"`
+
+ v := common.Values{}
+
+ val, _ := util.CoalesceValues(c, v)
+ vals := map[string]interface{}{
+ "Values": val.AsMap(),
+ }
+ _, err := Render(c, vals)
+
+ assert.NotNil(t, err)
+ assert.Equal(t, expectedErrorMessage, err.Error())
+}
+
+func TestRenderCustomTemplateFuncs(t *testing.T) {
+ modTime := time.Now()
+
+ // Create a chart with two templates that use custom functions
+ c := &chart.Chart{
+ Metadata: &chart.Metadata{Name: "CustomFunc"},
+ Templates: []*common.File{
+ {
+ Name: "templates/manifest",
+ ModTime: modTime,
+ Data: []byte(`{{exclaim .Values.message}}`),
+ },
+ {
+ Name: "templates/override",
+ ModTime: modTime,
+ Data: []byte(`{{ upper .Values.message }}`),
+ },
+ },
+ }
+ v := common.Values{
+ "Values": common.Values{
+ "message": "hello",
+ },
+ "Chart": c.Metadata,
+ "Release": common.Values{
+ "Name": "TestRelease",
+ },
+ }
+
+ // Define a custom template function "exclaim" that appends "!!!" to a string and override "upper" function
+ customFuncs := template.FuncMap{
+ "exclaim": func(input string) string {
+ return input + "!!!"
+ },
+ "upper": func(s string) string {
+ return "custom:" + s
+ },
+ }
+
+ // Create an engine instance and set the CustomTemplateFuncs.
+ e := new(Engine)
+ e.CustomTemplateFuncs = customFuncs
+
+ // Render the chart.
+ out, err := e.Render(c, v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Expected output should be "hello!!!".
+ expected := "hello!!!"
+ key := "CustomFunc/templates/manifest"
+ if rendered, ok := out[key]; !ok || rendered != expected {
+ t.Errorf("Expected %q, got %q", expected, rendered)
+ }
+
+ // Verify that the rendered template used the custom "upper" function.
+ expected = "custom:hello"
+ key = "CustomFunc/templates/override"
+ if rendered, ok := out[key]; !ok || rendered != expected {
+ t.Errorf("Expected %q, got %q", expected, rendered)
+ }
+}
+
+func TestTraceableError_SimpleForm(t *testing.T) {
+ testStrings := []string{
+ "function_not_found/templates/secret.yaml: error calling include",
+ }
+ for _, errString := range testStrings {
+ trace, done := parseTemplateSimpleErrorString(errString)
+ if !done {
+ t.Errorf("Expected parse to pass but did not")
+ }
+ if trace.message != "error calling include" {
+ t.Errorf("Expected %q, got %q", errString, trace.message)
+ }
+ }
+}
+func TestTraceableError_ExecutingForm(t *testing.T) {
+ testStrings := [][]string{
+ {"function_not_found/templates/secret.yaml:6:11: executing \"function_not_found/templates/secret.yaml\" at : ", "function_not_found/templates/secret.yaml:6:11"},
+ {"divide_by_zero/templates/secret.yaml:6:11: executing \"divide_by_zero/templates/secret.yaml\" at : ", "divide_by_zero/templates/secret.yaml:6:11"},
+ }
+ for _, errTuple := range testStrings {
+ errString := errTuple[0]
+ expectedLocation := errTuple[1]
+ trace, done := parseTemplateExecutingAtErrorType(errString)
+ if !done {
+ t.Errorf("Expected parse to pass but did not")
+ }
+ if trace.location != expectedLocation {
+ t.Errorf("Expected %q, got %q", expectedLocation, trace.location)
+ }
+ }
+}
+
+func TestTraceableError_NoTemplateForm(t *testing.T) {
+ testStrings := []string{
+ "no template \"common.names.get_name\" associated with template \"gotpl\"",
+ }
+ for _, errString := range testStrings {
+ trace, done := parseTemplateNoTemplateError(errString, errString)
+ if !done {
+ t.Errorf("Expected parse to pass but did not")
+ }
+ if trace.message != errString {
+ t.Errorf("Expected %q, got %q", errString, trace.message)
+ }
+ }
+}
diff --git a/helm/pkg/engine/files.go b/helm/pkg/engine/files.go
new file mode 100644
index 000000000..7834cac2c
--- /dev/null
+++ b/helm/pkg/engine/files.go
@@ -0,0 +1,165 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "encoding/base64"
+ "path"
+ "strings"
+
+ "github.com/gobwas/glob"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+)
+
+// files is a map of files in a chart that can be accessed from a template.
+type files map[string][]byte
+
+// NewFiles creates a new files from chart files.
+// Given an []*chart.File (the format for files in a chart.Chart), extract a map of files.
+func newFiles(from []*common.File) files {
+ files := make(map[string][]byte)
+ for _, f := range from {
+ files[f.Name] = f.Data
+ }
+ return files
+}
+
+// GetBytes gets a file by path.
+//
+// The returned data is raw. In a template context, this is identical to calling
+// {{index .Files $path}}.
+//
+// This is intended to be accessed from within a template, so a missed key returns
+// an empty []byte.
+func (f files) GetBytes(name string) []byte {
+ if v, ok := f[name]; ok {
+ return v
+ }
+ return []byte{}
+}
+
+// Get returns a string representation of the given file.
+//
+// Fetch the contents of a file as a string. It is designed to be called in a
+// template.
+//
+// {{.Files.Get "foo"}}
+func (f files) Get(name string) string {
+ return string(f.GetBytes(name))
+}
+
+// Glob takes a glob pattern and returns another files object only containing
+// matched files.
+//
+// This is designed to be called from a template.
+//
+// {{ range $name, $content := .Files.Glob("foo/**") }}
+// {{ $name }}: |
+// {{ .Files.Get($name) | indent 4 }}{{ end }}
+func (f files) Glob(pattern string) files {
+ g, err := glob.Compile(pattern, '/')
+ if err != nil {
+ g, _ = glob.Compile("**")
+ }
+
+ nf := newFiles(nil)
+ for name, contents := range f {
+ if g.Match(name) {
+ nf[name] = contents
+ }
+ }
+
+ return nf
+}
+
+// AsConfig turns a Files group and flattens it to a YAML map suitable for
+// including in the 'data' section of a Kubernetes ConfigMap definition.
+// Duplicate keys will be overwritten, so be aware that your file names
+// (regardless of path) should be unique.
+//
+// This is designed to be called from a template, and will return empty string
+// (via toYAML function) if it cannot be serialized to YAML, or if the Files
+// object is nil.
+//
+// The output will not be indented, so you will want to pipe this to the
+// 'indent' template function.
+//
+// data:
+//
+// {{ .Files.Glob("config/**").AsConfig() | indent 4 }}
+func (f files) AsConfig() string {
+ if f == nil {
+ return ""
+ }
+
+ m := make(map[string]string)
+
+ // Explicitly convert to strings, and file names
+ for k, v := range f {
+ m[path.Base(k)] = string(v)
+ }
+
+ return toYAML(m)
+}
+
+// AsSecrets returns the base64-encoded value of a Files object suitable for
+// including in the 'data' section of a Kubernetes Secret definition.
+// Duplicate keys will be overwritten, so be aware that your file names
+// (regardless of path) should be unique.
+//
+// This is designed to be called from a template, and will return empty string
+// (via toYAML function) if it cannot be serialized to YAML, or if the Files
+// object is nil.
+//
+// The output will not be indented, so you will want to pipe this to the
+// 'indent' template function.
+//
+// data:
+//
+// {{ .Files.Glob("secrets/*").AsSecrets() | indent 4 }}
+func (f files) AsSecrets() string {
+ if f == nil {
+ return ""
+ }
+
+ m := make(map[string]string)
+
+ for k, v := range f {
+ m[path.Base(k)] = base64.StdEncoding.EncodeToString(v)
+ }
+
+ return toYAML(m)
+}
+
+// Lines returns each line of a named file (split by "\n") as a slice, so it can
+// be ranged over in your templates.
+//
+// This is designed to be called from a template.
+//
+// {{ range .Files.Lines "foo/bar.html" }}
+// {{ . }}{{ end }}
+func (f files) Lines(path string) []string {
+ if f == nil || f[path] == nil {
+ return []string{}
+ }
+ s := string(f[path])
+ if s[len(s)-1] == '\n' {
+ s = s[:len(s)-1]
+ }
+ return strings.Split(s, "\n")
+}
diff --git a/helm/pkg/engine/files_test.go b/helm/pkg/engine/files_test.go
new file mode 100644
index 000000000..e53263c76
--- /dev/null
+++ b/helm/pkg/engine/files_test.go
@@ -0,0 +1,111 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var cases = []struct {
+ path, data string
+}{
+ {"ship/captain.txt", "The Captain"},
+ {"ship/stowaway.txt", "Legatt"},
+ {"story/name.txt", "The Secret Sharer"},
+ {"story/author.txt", "Joseph Conrad"},
+ {"multiline/test.txt", "bar\nfoo\n"},
+ {"multiline/test_with_blank_lines.txt", "bar\nfoo\n\n\n"},
+}
+
+func getTestFiles() files {
+ a := make(files, len(cases))
+ for _, c := range cases {
+ a[c.path] = []byte(c.data)
+ }
+ return a
+}
+
+func TestNewFiles(t *testing.T) {
+ files := getTestFiles()
+ if len(files) != len(cases) {
+ t.Errorf("Expected len() = %d, got %d", len(cases), len(files))
+ }
+
+ for i, f := range cases {
+ if got := string(files.GetBytes(f.path)); got != f.data {
+ t.Errorf("%d: expected %q, got %q", i, f.data, got)
+ }
+ if got := files.Get(f.path); got != f.data {
+ t.Errorf("%d: expected %q, got %q", i, f.data, got)
+ }
+ }
+}
+
+func TestFileGlob(t *testing.T) {
+ as := assert.New(t)
+
+ f := getTestFiles()
+
+ matched := f.Glob("story/**")
+
+ as.Len(matched, 2, "Should be two files in glob story/**")
+ as.Equal("Joseph Conrad", matched.Get("story/author.txt"))
+}
+
+func TestToConfig(t *testing.T) {
+ as := assert.New(t)
+
+ f := getTestFiles()
+ out := f.Glob("**/captain.txt").AsConfig()
+ as.Equal("captain.txt: The Captain", out)
+
+ out = f.Glob("ship/**").AsConfig()
+ as.Equal("captain.txt: The Captain\nstowaway.txt: Legatt", out)
+}
+
+func TestToSecret(t *testing.T) {
+ as := assert.New(t)
+
+ f := getTestFiles()
+
+ out := f.Glob("ship/**").AsSecrets()
+ as.Equal("captain.txt: VGhlIENhcHRhaW4=\nstowaway.txt: TGVnYXR0", out)
+}
+
+func TestLines(t *testing.T) {
+ as := assert.New(t)
+
+ f := getTestFiles()
+
+ out := f.Lines("multiline/test.txt")
+ as.Len(out, 2)
+
+ as.Equal("bar", out[0])
+}
+
+func TestBlankLines(t *testing.T) {
+ as := assert.New(t)
+
+ f := getTestFiles()
+
+ out := f.Lines("multiline/test_with_blank_lines.txt")
+ as.Len(out, 4)
+
+ as.Equal("bar", out[0])
+ as.Equal("", out[3])
+}
diff --git a/helm/pkg/engine/funcs.go b/helm/pkg/engine/funcs.go
new file mode 100644
index 000000000..a97f8f104
--- /dev/null
+++ b/helm/pkg/engine/funcs.go
@@ -0,0 +1,234 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "bytes"
+ "encoding/json"
+ "maps"
+ "strings"
+ "text/template"
+
+ "github.com/BurntSushi/toml"
+ "github.com/Masterminds/sprig/v3"
+ "sigs.k8s.io/yaml"
+ goYaml "sigs.k8s.io/yaml/goyaml.v3"
+)
+
+// funcMap returns a mapping of all of the functions that Engine has.
+//
+// Because some functions are late-bound (e.g. contain context-sensitive
+// data), the functions may not all perform identically outside of an Engine
+// as they will inside of an Engine.
+//
+// Known late-bound functions:
+//
+// - "include"
+// - "tpl"
+//
+// These are late-bound in Engine.Render(). The
+// version included in the FuncMap is a placeholder.
+func funcMap() template.FuncMap {
+ f := sprig.TxtFuncMap()
+ delete(f, "env")
+ delete(f, "expandenv")
+
+ // Add some extra functionality
+ extra := template.FuncMap{
+ "toToml": toTOML,
+ "fromToml": fromTOML,
+ "toYaml": toYAML,
+ "mustToYaml": mustToYAML,
+ "toYamlPretty": toYAMLPretty,
+ "fromYaml": fromYAML,
+ "fromYamlArray": fromYAMLArray,
+ "toJson": toJSON,
+ "mustToJson": mustToJSON,
+ "fromJson": fromJSON,
+ "fromJsonArray": fromJSONArray,
+
+ // This is a placeholder for the "include" function, which is
+ // late-bound to a template. By declaring it here, we preserve the
+ // integrity of the linter.
+ "include": func(string, interface{}) string { return "not implemented" },
+ "tpl": func(string, interface{}) interface{} { return "not implemented" },
+ "required": func(string, interface{}) (interface{}, error) { return "not implemented", nil },
+ // Provide a placeholder for the "lookup" function, which requires a kubernetes
+ // connection.
+ "lookup": func(string, string, string, string) (map[string]interface{}, error) {
+ return map[string]interface{}{}, nil
+ },
+ }
+
+ maps.Copy(f, extra)
+
+ return f
+}
+
+// toYAML takes an interface, marshals it to yaml, and returns a string. It will
+// always return a string, even on marshal error (empty string).
+//
+// This is designed to be called from a template.
+func toYAML(v interface{}) string {
+ data, err := yaml.Marshal(v)
+ if err != nil {
+ // Swallow errors inside of a template.
+ return ""
+ }
+ return strings.TrimSuffix(string(data), "\n")
+}
+
+// mustToYAML takes an interface, marshals it to yaml, and returns a string.
+// It will panic if there is an error.
+//
+// This is designed to be called from a template when need to ensure that the
+// output YAML is valid.
+func mustToYAML(v interface{}) string {
+ data, err := yaml.Marshal(v)
+ if err != nil {
+ panic(err)
+ }
+ return strings.TrimSuffix(string(data), "\n")
+}
+
+func toYAMLPretty(v interface{}) string {
+ var data bytes.Buffer
+ encoder := goYaml.NewEncoder(&data)
+ encoder.SetIndent(2)
+ err := encoder.Encode(v)
+
+ if err != nil {
+ // Swallow errors inside of a template.
+ return ""
+ }
+ return strings.TrimSuffix(data.String(), "\n")
+}
+
+// fromYAML converts a YAML document into a map[string]interface{}.
+//
+// This is not a general-purpose YAML parser, and will not parse all valid
+// YAML documents. Additionally, because its intended use is within templates
+// it tolerates errors. It will insert the returned error message string into
+// m["Error"] in the returned map.
+func fromYAML(str string) map[string]interface{} {
+ m := map[string]interface{}{}
+
+ if err := yaml.Unmarshal([]byte(str), &m); err != nil {
+ m["Error"] = err.Error()
+ }
+ return m
+}
+
+// fromYAMLArray converts a YAML array into a []interface{}.
+//
+// This is not a general-purpose YAML parser, and will not parse all valid
+// YAML documents. Additionally, because its intended use is within templates
+// it tolerates errors. It will insert the returned error message string as
+// the first and only item in the returned array.
+func fromYAMLArray(str string) []interface{} {
+ a := []interface{}{}
+
+ if err := yaml.Unmarshal([]byte(str), &a); err != nil {
+ a = []interface{}{err.Error()}
+ }
+ return a
+}
+
+// toTOML takes an interface, marshals it to toml, and returns a string. It will
+// always return a string, even on marshal error (empty string).
+//
+// This is designed to be called from a template.
+func toTOML(v interface{}) string {
+ b := bytes.NewBuffer(nil)
+ e := toml.NewEncoder(b)
+ err := e.Encode(v)
+ if err != nil {
+ return err.Error()
+ }
+ return b.String()
+}
+
+// fromTOML converts a TOML document into a map[string]interface{}.
+//
+// This is not a general-purpose TOML parser, and will not parse all valid
+// TOML documents. Additionally, because its intended use is within templates
+// it tolerates errors. It will insert the returned error message string into
+// m["Error"] in the returned map.
+func fromTOML(str string) map[string]interface{} {
+ m := make(map[string]interface{})
+
+ if err := toml.Unmarshal([]byte(str), &m); err != nil {
+ m["Error"] = err.Error()
+ }
+ return m
+}
+
+// toJSON takes an interface, marshals it to json, and returns a string. It will
+// always return a string, even on marshal error (empty string).
+//
+// This is designed to be called from a template.
+func toJSON(v interface{}) string {
+ data, err := json.Marshal(v)
+ if err != nil {
+ // Swallow errors inside of a template.
+ return ""
+ }
+ return string(data)
+}
+
+// mustToJSON takes an interface, marshals it to json, and returns a string.
+// It will panic if there is an error.
+//
+// This is designed to be called from a template when need to ensure that the
+// output JSON is valid.
+func mustToJSON(v interface{}) string {
+ data, err := json.Marshal(v)
+ if err != nil {
+ panic(err)
+ }
+ return string(data)
+}
+
+// fromJSON converts a JSON document into a map[string]interface{}.
+//
+// This is not a general-purpose JSON parser, and will not parse all valid
+// JSON documents. Additionally, because its intended use is within templates
+// it tolerates errors. It will insert the returned error message string into
+// m["Error"] in the returned map.
+func fromJSON(str string) map[string]interface{} {
+ m := make(map[string]interface{})
+
+ if err := json.Unmarshal([]byte(str), &m); err != nil {
+ m["Error"] = err.Error()
+ }
+ return m
+}
+
+// fromJSONArray converts a JSON array into a []interface{}.
+//
+// This is not a general-purpose JSON parser, and will not parse all valid
+// JSON documents. Additionally, because its intended use is within templates
+// it tolerates errors. It will insert the returned error message string as
+// the first and only item in the returned array.
+func fromJSONArray(str string) []interface{} {
+ a := []interface{}{}
+
+ if err := json.Unmarshal([]byte(str), &a); err != nil {
+ a = []interface{}{err.Error()}
+ }
+ return a
+}
diff --git a/helm/pkg/engine/funcs_test.go b/helm/pkg/engine/funcs_test.go
new file mode 100644
index 000000000..71a72e2e4
--- /dev/null
+++ b/helm/pkg/engine/funcs_test.go
@@ -0,0 +1,243 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "strings"
+ "testing"
+ "text/template"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFuncs(t *testing.T) {
+ //TODO write tests for failure cases
+ tests := []struct {
+ tpl, expect string
+ vars interface{}
+ }{{
+ tpl: `{{ toYaml . }}`,
+ expect: `foo: bar`,
+ vars: map[string]interface{}{"foo": "bar"},
+ }, {
+ tpl: `{{ toYamlPretty . }}`,
+ expect: "baz:\n - 1\n - 2\n - 3",
+ vars: map[string]interface{}{"baz": []int{1, 2, 3}},
+ }, {
+ tpl: `{{ toToml . }}`,
+ expect: "foo = \"bar\"\n",
+ vars: map[string]interface{}{"foo": "bar"},
+ }, {
+ tpl: `{{ fromToml . }}`,
+ expect: "map[hello:world]",
+ vars: `hello = "world"`,
+ }, {
+ tpl: `{{ fromToml . }}`,
+ expect: "map[table:map[keyInTable:valueInTable subtable:map[keyInSubtable:valueInSubTable]]]",
+ vars: `
+[table]
+keyInTable = "valueInTable"
+[table.subtable]
+keyInSubtable = "valueInSubTable"`,
+ }, {
+ tpl: `{{ fromToml . }}`,
+ expect: "map[tableArray:[map[keyInElement0:valueInElement0] map[keyInElement1:valueInElement1]]]",
+ vars: `
+[[tableArray]]
+keyInElement0 = "valueInElement0"
+[[tableArray]]
+keyInElement1 = "valueInElement1"`,
+ }, {
+ tpl: `{{ fromToml . }}`,
+ expect: "map[Error:toml: line 1: unexpected EOF; expected key separator '=']",
+ vars: "one",
+ }, {
+ tpl: `{{ toJson . }}`,
+ expect: `{"foo":"bar"}`,
+ vars: map[string]interface{}{"foo": "bar"},
+ }, {
+ tpl: `{{ fromYaml . }}`,
+ expect: "map[hello:world]",
+ vars: `hello: world`,
+ }, {
+ tpl: `{{ fromYamlArray . }}`,
+ expect: "[one 2 map[name:helm]]",
+ vars: "- one\n- 2\n- name: helm\n",
+ }, {
+ tpl: `{{ fromYamlArray . }}`,
+ expect: "[one 2 map[name:helm]]",
+ vars: `["one", 2, { "name": "helm" }]`,
+ }, {
+ // Regression for https://github.com/helm/helm/issues/2271
+ tpl: `{{ toToml . }}`,
+ expect: "[mast]\n sail = \"white\"\n",
+ vars: map[string]map[string]string{"mast": {"sail": "white"}},
+ }, {
+ tpl: `{{ fromYaml . }}`,
+ expect: "map[Error:error unmarshaling JSON: while decoding JSON: json: cannot unmarshal array into Go value of type map[string]interface {}]",
+ vars: "- one\n- two\n",
+ }, {
+ tpl: `{{ fromJson .}}`,
+ expect: `map[hello:world]`,
+ vars: `{"hello":"world"}`,
+ }, {
+ tpl: `{{ fromJson . }}`,
+ expect: `map[Error:json: cannot unmarshal array into Go value of type map[string]interface {}]`,
+ vars: `["one", "two"]`,
+ }, {
+ tpl: `{{ fromJsonArray . }}`,
+ expect: `[one 2 map[name:helm]]`,
+ vars: `["one", 2, { "name": "helm" }]`,
+ }, {
+ tpl: `{{ fromJsonArray . }}`,
+ expect: `[json: cannot unmarshal object into Go value of type []interface {}]`,
+ vars: `{"hello": "world"}`,
+ }, {
+ tpl: `{{ merge .dict (fromYaml .yaml) }}`,
+ expect: `map[a:map[b:c]]`,
+ vars: map[string]interface{}{"dict": map[string]interface{}{"a": map[string]interface{}{"b": "c"}}, "yaml": `{"a":{"b":"d"}}`},
+ }, {
+ tpl: `{{ merge (fromYaml .yaml) .dict }}`,
+ expect: `map[a:map[b:d]]`,
+ vars: map[string]interface{}{"dict": map[string]interface{}{"a": map[string]interface{}{"b": "c"}}, "yaml": `{"a":{"b":"d"}}`},
+ }, {
+ tpl: `{{ fromYaml . }}`,
+ expect: `map[Error:error unmarshaling JSON: while decoding JSON: json: cannot unmarshal array into Go value of type map[string]interface {}]`,
+ vars: `["one", "two"]`,
+ }, {
+ tpl: `{{ fromYamlArray . }}`,
+ expect: `[error unmarshaling JSON: while decoding JSON: json: cannot unmarshal object into Go value of type []interface {}]`,
+ vars: `hello: world`,
+ }, {
+ // This should never result in a network lookup. Regression for #7955
+ tpl: `{{ lookup "v1" "Namespace" "" "unlikelynamespace99999999" }}`,
+ expect: `map[]`,
+ vars: `["one", "two"]`,
+ }}
+
+ for _, tt := range tests {
+ var b strings.Builder
+ err := template.Must(template.New("test").Funcs(funcMap()).Parse(tt.tpl)).Execute(&b, tt.vars)
+ assert.NoError(t, err)
+ assert.Equal(t, tt.expect, b.String(), tt.tpl)
+ }
+
+ loopMap := map[string]interface{}{
+ "foo": "bar",
+ }
+ loopMap["loop"] = []interface{}{loopMap}
+
+ mustFuncsTests := []struct {
+ tpl string
+ expect interface{}
+ vars interface{}
+ }{{
+ tpl: `{{ mustToYaml . }}`,
+ vars: loopMap,
+ }, {
+ tpl: `{{ mustToJson . }}`,
+ vars: loopMap,
+ }, {
+ tpl: `{{ toYaml . }}`,
+ expect: "", // should return empty string and swallow error
+ vars: loopMap,
+ }, {
+ tpl: `{{ toJson . }}`,
+ expect: "", // should return empty string and swallow error
+ vars: loopMap,
+ },
+ }
+
+ for _, tt := range mustFuncsTests {
+ var b strings.Builder
+ err := template.Must(template.New("test").Funcs(funcMap()).Parse(tt.tpl)).Execute(&b, tt.vars)
+ if tt.expect != nil {
+ assert.NoError(t, err)
+ assert.Equal(t, tt.expect, b.String(), tt.tpl)
+ } else {
+ assert.Error(t, err)
+ }
+ }
+}
+
+// This test to check a function provided by sprig is due to a change in a
+// dependency of sprig. mergo in v0.3.9 changed the way it merges and only does
+// public fields (i.e. those starting with a capital letter). This test, from
+// sprig, fails in the new version. This is a behavior change for mergo that
+// impacts sprig and Helm users. This test will help us to not update to a
+// version of mergo (even accidentally) that causes a breaking change. See
+// sprig changelog and notes for more details.
+// Note, Go modules assume semver is never broken. So, there is no way to tell
+// the tooling to not update to a minor or patch version. `go install` could
+// be used to accidentally update mergo. This test and message should catch
+// the problem and explain why it's happening.
+func TestMerge(t *testing.T) {
+ dict := map[string]interface{}{
+ "src2": map[string]interface{}{
+ "h": 10,
+ "i": "i",
+ "j": "j",
+ },
+ "src1": map[string]interface{}{
+ "a": 1,
+ "b": 2,
+ "d": map[string]interface{}{
+ "e": "four",
+ },
+ "g": []int{6, 7},
+ "i": "aye",
+ "j": "jay",
+ "k": map[string]interface{}{
+ "l": false,
+ },
+ },
+ "dst": map[string]interface{}{
+ "a": "one",
+ "c": 3,
+ "d": map[string]interface{}{
+ "f": 5,
+ },
+ "g": []int{8, 9},
+ "i": "eye",
+ "k": map[string]interface{}{
+ "l": true,
+ },
+ },
+ }
+ tpl := `{{merge .dst .src1 .src2}}`
+ var b strings.Builder
+ err := template.Must(template.New("test").Funcs(funcMap()).Parse(tpl)).Execute(&b, dict)
+ assert.NoError(t, err)
+
+ expected := map[string]interface{}{
+ "a": "one", // key overridden
+ "b": 2, // merged from src1
+ "c": 3, // merged from dst
+ "d": map[string]interface{}{ // deep merge
+ "e": "four",
+ "f": 5,
+ },
+ "g": []int{8, 9}, // overridden - arrays are not merged
+ "h": 10, // merged from src2
+ "i": "eye", // overridden twice
+ "j": "jay", // overridden and merged
+ "k": map[string]interface{}{
+ "l": true, // overridden
+ },
+ }
+ assert.Equal(t, expected, dict["dst"])
+}
diff --git a/helm/pkg/engine/lookup_func.go b/helm/pkg/engine/lookup_func.go
new file mode 100644
index 000000000..c6ad8d252
--- /dev/null
+++ b/helm/pkg/engine/lookup_func.go
@@ -0,0 +1,148 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "strings"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/dynamic"
+ "k8s.io/client-go/rest"
+)
+
+type lookupFunc = func(apiversion string, resource string, namespace string, name string) (map[string]interface{}, error)
+
+// NewLookupFunction returns a function for looking up objects in the cluster.
+//
+// If the resource does not exist, no error is raised.
+func NewLookupFunction(config *rest.Config) lookupFunc { //nolint:revive
+ return newLookupFunction(clientProviderFromConfig{config: config})
+}
+
+type ClientProvider interface {
+ // GetClientFor returns a dynamic.NamespaceableResourceInterface suitable for interacting with resources
+ // corresponding to the provided apiVersion and kind, as well as a boolean indicating whether the resources
+ // are namespaced.
+ GetClientFor(apiVersion, kind string) (dynamic.NamespaceableResourceInterface, bool, error)
+}
+
+type clientProviderFromConfig struct {
+ config *rest.Config
+}
+
+func (c clientProviderFromConfig) GetClientFor(apiVersion, kind string) (dynamic.NamespaceableResourceInterface, bool, error) {
+ return getDynamicClientOnKind(apiVersion, kind, c.config)
+}
+
+func newLookupFunction(clientProvider ClientProvider) lookupFunc {
+ return func(apiversion string, kind string, namespace string, name string) (map[string]interface{}, error) {
+ var client dynamic.ResourceInterface
+ c, namespaced, err := clientProvider.GetClientFor(apiversion, kind)
+ if err != nil {
+ return map[string]interface{}{}, err
+ }
+ if namespaced && namespace != "" {
+ client = c.Namespace(namespace)
+ } else {
+ client = c
+ }
+ if name != "" {
+ // this will return a single object
+ obj, err := client.Get(context.Background(), name, metav1.GetOptions{})
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ // Just return an empty interface when the object was not found.
+ // That way, users can use `if not (lookup ...)` in their templates.
+ return map[string]interface{}{}, nil
+ }
+ return map[string]interface{}{}, err
+ }
+ return obj.UnstructuredContent(), nil
+ }
+ // this will return a list
+ obj, err := client.List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ // Just return an empty interface when the object was not found.
+ // That way, users can use `if not (lookup ...)` in their templates.
+ return map[string]interface{}{}, nil
+ }
+ return map[string]interface{}{}, err
+ }
+ return obj.UnstructuredContent(), nil
+ }
+}
+
+// getDynamicClientOnKind returns a dynamic client on an Unstructured type. This client can be further namespaced.
+func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) (dynamic.NamespaceableResourceInterface, bool, error) {
+ gvk := schema.FromAPIVersionAndKind(apiversion, kind)
+ apiRes, err := getAPIResourceForGVK(gvk, config)
+ if err != nil {
+ slog.Error(
+ "unable to get apiresource",
+ slog.String("groupVersionKind", gvk.String()),
+ slog.Any("error", err),
+ )
+ return nil, false, fmt.Errorf("unable to get apiresource from unstructured: %s: %w", gvk.String(), err)
+ }
+ gvr := schema.GroupVersionResource{
+ Group: apiRes.Group,
+ Version: apiRes.Version,
+ Resource: apiRes.Name,
+ }
+ intf, err := dynamic.NewForConfig(config)
+ if err != nil {
+ slog.Error("unable to get dynamic client", slog.Any("error", err))
+ return nil, false, err
+ }
+ res := intf.Resource(gvr)
+ return res, apiRes.Namespaced, nil
+}
+
+func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (metav1.APIResource, error) {
+ res := metav1.APIResource{}
+ discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
+ if err != nil {
+ slog.Error("unable to create discovery client", slog.Any("error", err))
+ return res, err
+ }
+ resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String())
+ if err != nil {
+ slog.Error(
+ "unable to retrieve resource list",
+ slog.String("GroupVersion", gvk.GroupVersion().String()),
+ slog.Any("error", err),
+ )
+ return res, err
+ }
+ for _, resource := range resList.APIResources {
+ // if a resource contains a "/" it's referencing a subresource. we don't support subresource for now.
+ if resource.Kind == gvk.Kind && !strings.Contains(resource.Name, "/") {
+ res = resource
+ res.Group = gvk.Group
+ res.Version = gvk.Version
+ break
+ }
+ }
+ return res, nil
+}
diff --git a/helm/pkg/gates/doc.go b/helm/pkg/gates/doc.go
new file mode 100644
index 000000000..6592cf4d4
--- /dev/null
+++ b/helm/pkg/gates/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package gates provides a general tool for working with experimental feature gates.
+
+This provides convenience methods where the user can determine if certain experimental features are enabled.
+*/
+package gates
diff --git a/helm/pkg/gates/gates.go b/helm/pkg/gates/gates.go
new file mode 100644
index 000000000..69559219e
--- /dev/null
+++ b/helm/pkg/gates/gates.go
@@ -0,0 +1,38 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gates
+
+import (
+ "fmt"
+ "os"
+)
+
+// Gate is the name of the feature gate.
+type Gate string
+
+// String returns the string representation of this feature gate.
+func (g Gate) String() string {
+ return string(g)
+}
+
+// IsEnabled determines whether a certain feature gate is enabled.
+func (g Gate) IsEnabled() bool {
+ return os.Getenv(string(g)) != ""
+}
+
+func (g Gate) Error() error {
+ return fmt.Errorf("this feature has been marked as experimental and is not enabled by default. Please set %s=1 in your environment to use this feature", g.String())
+}
diff --git a/helm/pkg/gates/gates_test.go b/helm/pkg/gates/gates_test.go
new file mode 100644
index 000000000..4d77199e6
--- /dev/null
+++ b/helm/pkg/gates/gates_test.go
@@ -0,0 +1,55 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gates
+
+import (
+ "os"
+ "testing"
+)
+
+const name string = "HELM_EXPERIMENTAL_FEATURE"
+
+func TestIsEnabled(t *testing.T) {
+ g := Gate(name)
+
+ if g.IsEnabled() {
+ t.Errorf("feature gate shows as available, but the environment variable %s was not set", name)
+ }
+
+ t.Setenv(name, "1")
+
+ if !g.IsEnabled() {
+ t.Errorf("feature gate shows as disabled, but the environment variable %s was set", name)
+ }
+}
+
+func TestError(t *testing.T) {
+ os.Unsetenv(name)
+ g := Gate(name)
+
+ if g.Error().Error() != "this feature has been marked as experimental and is not enabled by default. Please set HELM_EXPERIMENTAL_FEATURE=1 in your environment to use this feature" {
+ t.Errorf("incorrect error message. Received %s", g.Error().Error())
+ }
+}
+
+func TestString(t *testing.T) {
+ os.Unsetenv(name)
+ g := Gate(name)
+
+ if g.String() != "HELM_EXPERIMENTAL_FEATURE" {
+ t.Errorf("incorrect string representation. Received %s", g.String())
+ }
+}
diff --git a/helm/pkg/getter/doc.go b/helm/pkg/getter/doc.go
new file mode 100644
index 000000000..11cf6153e
--- /dev/null
+++ b/helm/pkg/getter/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package getter provides a generalize tool for fetching data by scheme.
+
+This provides a method by which the plugin system can load arbitrary protocol
+handlers based upon a URL scheme.
+*/
+package getter
diff --git a/helm/pkg/getter/getter.go b/helm/pkg/getter/getter.go
new file mode 100644
index 000000000..a2d0f0ee2
--- /dev/null
+++ b/helm/pkg/getter/getter.go
@@ -0,0 +1,232 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "slices"
+ "time"
+
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+// getterOptions are generic parameters to be provided to the getter during instantiation.
+//
+// Getters may or may not ignore these parameters as they are passed in.
+// TODO what is the difference between this and schema.GetterOptionsV1?
+type getterOptions struct {
+ url string
+ certFile string
+ keyFile string
+ caFile string
+ unTar bool
+ insecureSkipVerifyTLS bool
+ plainHTTP bool
+ acceptHeader string
+ username string
+ password string
+ passCredentialsAll bool
+ userAgent string
+ version string
+ registryClient *registry.Client
+ timeout time.Duration
+ transport *http.Transport
+ artifactType string
+}
+
+// Option allows specifying various settings configurable by the user for overriding the defaults
+// used when performing Get operations with the Getter.
+type Option func(*getterOptions)
+
+// WithURL informs the getter the server name that will be used when fetching objects. Used in conjunction with
+// WithTLSClientConfig to set the TLSClientConfig's server name.
+func WithURL(url string) Option {
+ return func(opts *getterOptions) {
+ opts.url = url
+ }
+}
+
+// WithAcceptHeader sets the request's Accept header as some REST APIs serve multiple content types
+func WithAcceptHeader(header string) Option {
+ return func(opts *getterOptions) {
+ opts.acceptHeader = header
+ }
+}
+
+// WithBasicAuth sets the request's Authorization header to use the provided credentials
+func WithBasicAuth(username, password string) Option {
+ return func(opts *getterOptions) {
+ opts.username = username
+ opts.password = password
+ }
+}
+
+func WithPassCredentialsAll(pass bool) Option {
+ return func(opts *getterOptions) {
+ opts.passCredentialsAll = pass
+ }
+}
+
+// WithUserAgent sets the request's User-Agent header to use the provided agent name.
+func WithUserAgent(userAgent string) Option {
+ return func(opts *getterOptions) {
+ opts.userAgent = userAgent
+ }
+}
+
+// WithInsecureSkipVerifyTLS determines if a TLS Certificate will be checked
+func WithInsecureSkipVerifyTLS(insecureSkipVerifyTLS bool) Option {
+ return func(opts *getterOptions) {
+ opts.insecureSkipVerifyTLS = insecureSkipVerifyTLS
+ }
+}
+
+// WithTLSClientConfig sets the client auth with the provided credentials.
+func WithTLSClientConfig(certFile, keyFile, caFile string) Option {
+ return func(opts *getterOptions) {
+ opts.certFile = certFile
+ opts.keyFile = keyFile
+ opts.caFile = caFile
+ }
+}
+
+func WithPlainHTTP(plainHTTP bool) Option {
+ return func(opts *getterOptions) {
+ opts.plainHTTP = plainHTTP
+ }
+}
+
+// WithTimeout sets the timeout for requests
+func WithTimeout(timeout time.Duration) Option {
+ return func(opts *getterOptions) {
+ opts.timeout = timeout
+ }
+}
+
+func WithTagName(tagname string) Option {
+ return func(opts *getterOptions) {
+ opts.version = tagname
+ }
+}
+
+func WithRegistryClient(client *registry.Client) Option {
+ return func(opts *getterOptions) {
+ opts.registryClient = client
+ }
+}
+
+func WithUntar() Option {
+ return func(opts *getterOptions) {
+ opts.unTar = true
+ }
+}
+
+// WithTransport sets the http.Transport to allow overwriting the HTTPGetter default.
+func WithTransport(transport *http.Transport) Option {
+ return func(opts *getterOptions) {
+ opts.transport = transport
+ }
+}
+
+// WithArtifactType sets the type of OCI artifact ("chart" or "plugin")
+func WithArtifactType(artifactType string) Option {
+ return func(opts *getterOptions) {
+ opts.artifactType = artifactType
+ }
+}
+
+// Getter is an interface to support GET to the specified URL.
+type Getter interface {
+ // Get file content by url string
+ Get(url string, options ...Option) (*bytes.Buffer, error)
+}
+
+// Constructor is the function for every getter which creates a specific instance
+// according to the configuration
+type Constructor func(options ...Option) (Getter, error)
+
+// Provider represents any getter and the schemes that it supports.
+//
+// For example, an HTTP provider may provide one getter that handles both
+// 'http' and 'https' schemes.
+type Provider struct {
+ Schemes []string
+ New Constructor
+}
+
+// Provides returns true if the given scheme is supported by this Provider.
+func (p Provider) Provides(scheme string) bool {
+ return slices.Contains(p.Schemes, scheme)
+}
+
+// Providers is a collection of Provider objects.
+type Providers []Provider
+
+// ByScheme returns a Provider that handles the given scheme.
+//
+// If no provider handles this scheme, this will return an error.
+func (p Providers) ByScheme(scheme string) (Getter, error) {
+ for _, pp := range p {
+ if pp.Provides(scheme) {
+ return pp.New()
+ }
+ }
+ return nil, fmt.Errorf("scheme %q not supported", scheme)
+}
+
+const (
+ // The cost timeout references curl's default connection timeout.
+ // https://github.com/curl/curl/blob/master/lib/connect.h#L40C21-L40C21
+ // The helm commands are usually executed manually. Considering the acceptable waiting time, we reduced the entire request time to 120s.
+ DefaultHTTPTimeout = 120
+)
+
+var defaultOptions = []Option{WithTimeout(time.Second * DefaultHTTPTimeout)}
+
+func Getters(extraOpts ...Option) Providers {
+ return Providers{
+ Provider{
+ Schemes: []string{"http", "https"},
+ New: func(options ...Option) (Getter, error) {
+ options = append(options, defaultOptions...)
+ options = append(options, extraOpts...)
+ return NewHTTPGetter(options...)
+ },
+ },
+ Provider{
+ Schemes: []string{registry.OCIScheme},
+ New: func(options ...Option) (Getter, error) {
+ options = append(options, defaultOptions...)
+ options = append(options, extraOpts...)
+ return NewOCIGetter(options...)
+ },
+ },
+ }
+}
+
+// All finds all of the registered getters as a list of Provider instances.
+// Currently, the built-in getters and the discovered plugins with downloader
+// notations are collected.
+func All(settings *cli.EnvSettings, opts ...Option) Providers {
+ result := Getters(opts...)
+ pluginDownloaders, _ := collectGetterPlugins(settings)
+ result = append(result, pluginDownloaders...)
+ return result
+}
diff --git a/helm/pkg/getter/getter_test.go b/helm/pkg/getter/getter_test.go
new file mode 100644
index 000000000..83920e809
--- /dev/null
+++ b/helm/pkg/getter/getter_test.go
@@ -0,0 +1,98 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "testing"
+ "time"
+
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+const pluginDir = "testdata/plugins"
+
+func TestProvider(t *testing.T) {
+ p := Provider{
+ []string{"one", "three"},
+ func(_ ...Option) (Getter, error) { return nil, nil },
+ }
+
+ if !p.Provides("three") {
+ t.Error("Expected provider to provide three")
+ }
+}
+
+func TestProviders(t *testing.T) {
+ ps := Providers{
+ {[]string{"one", "three"}, func(_ ...Option) (Getter, error) { return nil, nil }},
+ {[]string{"two", "four"}, func(_ ...Option) (Getter, error) { return nil, nil }},
+ }
+
+ if _, err := ps.ByScheme("one"); err != nil {
+ t.Error(err)
+ }
+ if _, err := ps.ByScheme("four"); err != nil {
+ t.Error(err)
+ }
+
+ if _, err := ps.ByScheme("five"); err == nil {
+ t.Error("Did not expect handler for five")
+ }
+}
+
+func TestProvidersWithTimeout(t *testing.T) {
+ want := time.Hour
+ getters := Getters(WithTimeout(want))
+ getter, err := getters.ByScheme("http")
+ if err != nil {
+ t.Error(err)
+ }
+ client, err := getter.(*HTTPGetter).httpClient()
+ if err != nil {
+ t.Error(err)
+ }
+ got := client.Timeout
+ if got != want {
+ t.Errorf("Expected %q, got %q", want, got)
+ }
+}
+
+func TestAll(t *testing.T) {
+ env := cli.New()
+ env.PluginsDirectory = pluginDir
+
+ all := All(env)
+ if len(all) != 4 {
+ t.Errorf("expected 4 providers (default plus three plugins), got %d", len(all))
+ }
+
+ if _, err := all.ByScheme("test2"); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestByScheme(t *testing.T) {
+ env := cli.New()
+ env.PluginsDirectory = pluginDir
+
+ g := All(env)
+ if _, err := g.ByScheme("test"); err != nil {
+ t.Error(err)
+ }
+ if _, err := g.ByScheme("https"); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/helm/pkg/getter/httpgetter.go b/helm/pkg/getter/httpgetter.go
new file mode 100644
index 000000000..110f45c54
--- /dev/null
+++ b/helm/pkg/getter/httpgetter.go
@@ -0,0 +1,160 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "bytes"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "sync"
+
+ "helm.sh/helm/v4/internal/tlsutil"
+ "helm.sh/helm/v4/internal/version"
+)
+
+// HTTPGetter is the default HTTP(/S) backend handler
+type HTTPGetter struct {
+ opts getterOptions
+ transport *http.Transport
+ once sync.Once
+}
+
+// Get performs a Get from repo.Getter and returns the body.
+func (g *HTTPGetter) Get(href string, options ...Option) (*bytes.Buffer, error) {
+ for _, opt := range options {
+ opt(&g.opts)
+ }
+ return g.get(href)
+}
+
+func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) {
+ // Set a helm specific user agent so that a repo server and metrics can
+ // separate helm calls from other tools interacting with repos.
+ req, err := http.NewRequest(http.MethodGet, href, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if g.opts.acceptHeader != "" {
+ req.Header.Set("Accept", g.opts.acceptHeader)
+ }
+
+ req.Header.Set("User-Agent", version.GetUserAgent())
+ if g.opts.userAgent != "" {
+ req.Header.Set("User-Agent", g.opts.userAgent)
+ }
+
+ // Before setting the basic auth credentials, make sure the URL associated
+ // with the basic auth is the one being fetched.
+ u1, err := url.Parse(g.opts.url)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse getter URL: %w", err)
+ }
+ u2, err := url.Parse(href)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse URL getting from: %w", err)
+ }
+
+ // Host on URL (returned from url.Parse) contains the port if present.
+ // This check ensures credentials are not passed between different
+ // services on different ports.
+ if g.opts.passCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) {
+ if g.opts.username != "" && g.opts.password != "" {
+ req.SetBasicAuth(g.opts.username, g.opts.password)
+ }
+ }
+
+ client, err := g.httpClient()
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("failed to fetch %s : %s", href, resp.Status)
+ }
+
+ buf := bytes.NewBuffer(nil)
+ _, err = io.Copy(buf, resp.Body)
+ return buf, err
+}
+
+// NewHTTPGetter constructs a valid http/https client as a Getter
+func NewHTTPGetter(options ...Option) (Getter, error) {
+ var client HTTPGetter
+
+ for _, opt := range options {
+ opt(&client.opts)
+ }
+
+ return &client, nil
+}
+
+func (g *HTTPGetter) httpClient() (*http.Client, error) {
+ if g.opts.transport != nil {
+ return &http.Client{
+ Transport: g.opts.transport,
+ Timeout: g.opts.timeout,
+ }, nil
+ }
+
+ g.once.Do(func() {
+ g.transport = &http.Transport{
+ DisableCompression: true,
+ Proxy: http.ProxyFromEnvironment,
+ // Being nil would cause the tls.Config default to be used
+ // "NewTLSConfig" modifies an empty TLS config, not the default one
+ TLSClientConfig: &tls.Config{},
+ }
+ })
+
+ if (g.opts.certFile != "" && g.opts.keyFile != "") || g.opts.caFile != "" || g.opts.insecureSkipVerifyTLS {
+ tlsConf, err := tlsutil.NewTLSConfig(
+ tlsutil.WithInsecureSkipVerify(g.opts.insecureSkipVerifyTLS),
+ tlsutil.WithCertKeyPairFiles(g.opts.certFile, g.opts.keyFile),
+ tlsutil.WithCAFile(g.opts.caFile),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("can't create TLS config for client: %w", err)
+ }
+
+ g.transport.TLSClientConfig = tlsConf
+ }
+
+ if g.opts.insecureSkipVerifyTLS {
+ if g.transport.TLSClientConfig == nil {
+ g.transport.TLSClientConfig = &tls.Config{
+ InsecureSkipVerify: true,
+ }
+ } else {
+ g.transport.TLSClientConfig.InsecureSkipVerify = true
+ }
+ }
+
+ client := &http.Client{
+ Transport: g.transport,
+ Timeout: g.opts.timeout,
+ }
+
+ return client, nil
+}
diff --git a/helm/pkg/getter/httpgetter_test.go b/helm/pkg/getter/httpgetter_test.go
new file mode 100644
index 000000000..b27b9f5d2
--- /dev/null
+++ b/helm/pkg/getter/httpgetter_test.go
@@ -0,0 +1,680 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "helm.sh/helm/v4/internal/tlsutil"
+ "helm.sh/helm/v4/internal/version"
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+func TestHTTPGetter(t *testing.T) {
+ g, err := NewHTTPGetter(WithURL("http://example.com"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, ok := g.(*HTTPGetter); !ok {
+ t.Fatal("Expected NewHTTPGetter to produce an *HTTPGetter")
+ }
+
+ cd := "../../testdata"
+ join := filepath.Join
+ ca, pub, priv := join(cd, "rootca.crt"), join(cd, "crt.pem"), join(cd, "key.pem")
+ insecure := false
+ timeout := time.Second * 5
+ transport := &http.Transport{}
+
+ // Test with getterOptions
+ g, err = NewHTTPGetter(
+ WithBasicAuth("I", "Am"),
+ WithPassCredentialsAll(false),
+ WithUserAgent("Groot"),
+ WithTLSClientConfig(pub, priv, ca),
+ WithInsecureSkipVerifyTLS(insecure),
+ WithTimeout(timeout),
+ WithTransport(transport),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hg, ok := g.(*HTTPGetter)
+ if !ok {
+ t.Fatal("expected NewHTTPGetter to produce an *HTTPGetter")
+ }
+
+ if hg.opts.username != "I" {
+ t.Errorf("Expected NewHTTPGetter to contain %q as the username, got %q", "I", hg.opts.username)
+ }
+
+ if hg.opts.password != "Am" {
+ t.Errorf("Expected NewHTTPGetter to contain %q as the password, got %q", "Am", hg.opts.password)
+ }
+
+ if hg.opts.passCredentialsAll != false {
+ t.Errorf("Expected NewHTTPGetter to contain %t as PassCredentialsAll, got %t", false, hg.opts.passCredentialsAll)
+ }
+
+ if hg.opts.userAgent != "Groot" {
+ t.Errorf("Expected NewHTTPGetter to contain %q as the user agent, got %q", "Groot", hg.opts.userAgent)
+ }
+
+ if hg.opts.certFile != pub {
+ t.Errorf("Expected NewHTTPGetter to contain %q as the public key file, got %q", pub, hg.opts.certFile)
+ }
+
+ if hg.opts.keyFile != priv {
+ t.Errorf("Expected NewHTTPGetter to contain %q as the private key file, got %q", priv, hg.opts.keyFile)
+ }
+
+ if hg.opts.caFile != ca {
+ t.Errorf("Expected NewHTTPGetter to contain %q as the CA file, got %q", ca, hg.opts.caFile)
+ }
+
+ if hg.opts.insecureSkipVerifyTLS != insecure {
+ t.Errorf("Expected NewHTTPGetter to contain %t as InsecureSkipVerifyTLs flag, got %t", false, hg.opts.insecureSkipVerifyTLS)
+ }
+
+ if hg.opts.timeout != timeout {
+ t.Errorf("Expected NewHTTPGetter to contain %s as Timeout flag, got %s", timeout, hg.opts.timeout)
+ }
+
+ if hg.opts.transport != transport {
+ t.Errorf("Expected NewHTTPGetter to contain %p as Transport, got %p", transport, hg.opts.transport)
+ }
+
+ // Test if setting insecureSkipVerifyTLS is being passed to the ops
+ insecure = true
+
+ g, err = NewHTTPGetter(
+ WithInsecureSkipVerifyTLS(insecure),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hg, ok = g.(*HTTPGetter)
+ if !ok {
+ t.Fatal("expected NewHTTPGetter to produce an *HTTPGetter")
+ }
+
+ if hg.opts.insecureSkipVerifyTLS != insecure {
+ t.Errorf("Expected NewHTTPGetter to contain %t as InsecureSkipVerifyTLs flag, got %t", insecure, hg.opts.insecureSkipVerifyTLS)
+ }
+
+ // Checking false by default
+ if hg.opts.passCredentialsAll != false {
+ t.Errorf("Expected NewHTTPGetter to contain %t as PassCredentialsAll, got %t", false, hg.opts.passCredentialsAll)
+ }
+
+ // Test setting PassCredentialsAll
+ g, err = NewHTTPGetter(
+ WithBasicAuth("I", "Am"),
+ WithPassCredentialsAll(true),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hg, ok = g.(*HTTPGetter)
+ if !ok {
+ t.Fatal("expected NewHTTPGetter to produce an *HTTPGetter")
+ }
+ if hg.opts.passCredentialsAll != true {
+ t.Errorf("Expected NewHTTPGetter to contain %t as PassCredentialsAll, got %t", true, hg.opts.passCredentialsAll)
+ }
+}
+
+func TestDownload(t *testing.T) {
+ expect := "Call me Ishmael"
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defaultUserAgent := version.GetUserAgent()
+ if r.UserAgent() != defaultUserAgent {
+ t.Errorf("Expected '%s', got '%s'", defaultUserAgent, r.UserAgent())
+ }
+ fmt.Fprint(w, expect)
+ }))
+ defer srv.Close()
+
+ g, err := All(cli.New()).ByScheme("http")
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := g.Get(srv.URL, WithURL(srv.URL))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if got.String() != expect {
+ t.Errorf("Expected %q, got %q", expect, got.String())
+ }
+
+ // test with http server
+ const expectedUserAgent = "I am Groot"
+ basicAuthSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ username, password, ok := r.BasicAuth()
+ if !ok || username != "username" || password != "password" {
+ t.Errorf("Expected request to use basic auth and for username == 'username' and password == 'password', got '%v', '%s', '%s'", ok, username, password)
+ }
+ if r.UserAgent() != expectedUserAgent {
+ t.Errorf("Expected '%s', got '%s'", expectedUserAgent, r.UserAgent())
+ }
+ fmt.Fprint(w, expect)
+ }))
+
+ defer basicAuthSrv.Close()
+
+ u, _ := url.ParseRequestURI(basicAuthSrv.URL)
+ httpgetter, err := NewHTTPGetter(
+ WithURL(u.String()),
+ WithBasicAuth("username", "password"),
+ WithPassCredentialsAll(false),
+ WithUserAgent(expectedUserAgent),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = httpgetter.Get(u.String())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if got.String() != expect {
+ t.Errorf("Expected %q, got %q", expect, got.String())
+ }
+
+ // test with Get URL differing from withURL
+ crossAuthSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ username, password, ok := r.BasicAuth()
+ if ok || username == "username" || password == "password" {
+ t.Errorf("Expected request to not include but got '%v', '%s', '%s'", ok, username, password)
+ }
+ fmt.Fprint(w, expect)
+ }))
+
+ defer crossAuthSrv.Close()
+
+ u, _ = url.ParseRequestURI(crossAuthSrv.URL)
+
+ // A different host is provided for the WithURL from the one used for Get
+ u2, _ := url.ParseRequestURI(crossAuthSrv.URL)
+ host := strings.Split(u2.Host, ":")
+ host[0] = host[0] + "a"
+ u2.Host = strings.Join(host, ":")
+ httpgetter, err = NewHTTPGetter(
+ WithURL(u2.String()),
+ WithBasicAuth("username", "password"),
+ WithPassCredentialsAll(false),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = httpgetter.Get(u.String())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if got.String() != expect {
+ t.Errorf("Expected %q, got %q", expect, got.String())
+ }
+
+ // test with Get URL differing from withURL and should pass creds
+ crossAuthSrv = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ username, password, ok := r.BasicAuth()
+ if !ok || username != "username" || password != "password" {
+ t.Errorf("Expected request to use basic auth and for username == 'username' and password == 'password', got '%v', '%s', '%s'", ok, username, password)
+ }
+ fmt.Fprint(w, expect)
+ }))
+
+ defer crossAuthSrv.Close()
+
+ u, _ = url.ParseRequestURI(crossAuthSrv.URL)
+
+ // A different host is provided for the WithURL from the one used for Get
+ u2, _ = url.ParseRequestURI(crossAuthSrv.URL)
+ host = strings.Split(u2.Host, ":")
+ host[0] = host[0] + "a"
+ u2.Host = strings.Join(host, ":")
+ httpgetter, err = NewHTTPGetter(
+ WithURL(u2.String()),
+ WithBasicAuth("username", "password"),
+ WithPassCredentialsAll(true),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = httpgetter.Get(u.String())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if got.String() != expect {
+ t.Errorf("Expected %q, got %q", expect, got.String())
+ }
+
+ // test server with varied Accept Header
+ const expectedAcceptHeader = "application/gzip,application/octet-stream"
+ acceptHeaderSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Accept") != expectedAcceptHeader {
+ t.Errorf("Expected '%s', got '%s'", expectedAcceptHeader, r.Header.Get("Accept"))
+ }
+ fmt.Fprint(w, expect)
+ }))
+
+ defer acceptHeaderSrv.Close()
+
+ u, _ = url.ParseRequestURI(acceptHeaderSrv.URL)
+ httpgetter, err = NewHTTPGetter(
+ WithAcceptHeader(expectedAcceptHeader),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = httpgetter.Get(u.String())
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDownloadTLS(t *testing.T) {
+ cd := "../../testdata"
+ ca, pub, priv := filepath.Join(cd, "rootca.crt"), filepath.Join(cd, "crt.pem"), filepath.Join(cd, "key.pem")
+ insecureSkipTLSVerify := false
+
+ tlsSrv := httptest.NewUnstartedServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {}))
+ tlsConf, err := tlsutil.NewTLSConfig(
+ tlsutil.WithInsecureSkipVerify(insecureSkipTLSVerify),
+ tlsutil.WithCertKeyPairFiles(pub, priv),
+ tlsutil.WithCAFile(ca),
+ )
+ if err != nil {
+ t.Fatal(fmt.Errorf("can't create TLS config for client: %w", err))
+ }
+ tlsConf.ServerName = "helm.sh"
+ tlsSrv.TLS = tlsConf
+ tlsSrv.StartTLS()
+ defer tlsSrv.Close()
+
+ u, _ := url.ParseRequestURI(tlsSrv.URL)
+ g, err := NewHTTPGetter(
+ WithURL(u.String()),
+ WithTLSClientConfig(pub, priv, ca),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := g.Get(u.String()); err != nil {
+ t.Error(err)
+ }
+
+ // now test with TLS config being passed along in .Get (see #6635)
+ g, err = NewHTTPGetter()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := g.Get(u.String(), WithURL(u.String()), WithTLSClientConfig(pub, priv, ca)); err != nil {
+ t.Error(err)
+ }
+
+ // test with only the CA file (see also #6635)
+ g, err = NewHTTPGetter()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := g.Get(u.String(), WithURL(u.String()), WithTLSClientConfig("", "", ca)); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestDownloadTLSWithRedirect(t *testing.T) {
+ cd := "../../testdata"
+ srv2Resp := "hello"
+ insecureSkipTLSVerify := false
+
+ // Server 2 that will actually fulfil the request.
+ ca, pub, priv := filepath.Join(cd, "rootca.crt"), filepath.Join(cd, "localhost-crt.pem"), filepath.Join(cd, "key.pem")
+ tlsConf, err := tlsutil.NewTLSConfig(
+ tlsutil.WithCAFile(ca),
+ tlsutil.WithCertKeyPairFiles(pub, priv),
+ tlsutil.WithInsecureSkipVerify(insecureSkipTLSVerify),
+ )
+
+ if err != nil {
+ t.Fatal(fmt.Errorf("can't create TLS config for client: %w", err))
+ }
+
+ tlsSrv2 := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) {
+ rw.Header().Set("Content-Type", "text/plain")
+ rw.Write([]byte(srv2Resp))
+ }))
+
+ tlsSrv2.TLS = tlsConf
+ tlsSrv2.StartTLS()
+ defer tlsSrv2.Close()
+
+ // Server 1 responds with a redirect to Server 2.
+ ca, pub, priv = filepath.Join(cd, "rootca.crt"), filepath.Join(cd, "crt.pem"), filepath.Join(cd, "key.pem")
+ tlsConf, err = tlsutil.NewTLSConfig(
+ tlsutil.WithCAFile(ca),
+ tlsutil.WithCertKeyPairFiles(pub, priv),
+ tlsutil.WithInsecureSkipVerify(insecureSkipTLSVerify),
+ )
+
+ if err != nil {
+ t.Fatal(fmt.Errorf("can't create TLS config for client: %w", err))
+ }
+
+ tlsSrv1 := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ u, _ := url.ParseRequestURI(tlsSrv2.URL)
+
+ // Make the request using the hostname 'localhost' (to which 'localhost-crt.pem' is issued)
+ // to verify that a successful TLS connection is made even if the client doesn't specify
+ // the hostname (SNI) in `tls.Config.ServerName`. By default the hostname is derived from the
+ // request URL for every request (including redirects). Setting `tls.Config.ServerName` on the
+ // client just overrides the remote endpoint's hostname.
+ // See https://github.com/golang/go/blob/3979fb9/src/net/http/transport.go#L1505-L1513.
+ u.Host = fmt.Sprintf("localhost:%s", u.Port())
+
+ http.Redirect(rw, r, u.String(), http.StatusTemporaryRedirect)
+ }))
+
+ tlsSrv1.TLS = tlsConf
+ tlsSrv1.StartTLS()
+ defer tlsSrv1.Close()
+
+ u, _ := url.ParseRequestURI(tlsSrv1.URL)
+
+ t.Run("Test with TLS", func(t *testing.T) {
+ g, err := NewHTTPGetter(
+ WithURL(u.String()),
+ WithTLSClientConfig(pub, priv, ca),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf, err := g.Get(u.String())
+ if err != nil {
+ t.Error(err)
+ }
+
+ b, err := io.ReadAll(buf)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(b) != srv2Resp {
+ t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b))
+ }
+ })
+
+ t.Run("Test with TLS config being passed along in .Get (see #6635)", func(t *testing.T) {
+ g, err := NewHTTPGetter()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf, err := g.Get(u.String(), WithURL(u.String()), WithTLSClientConfig(pub, priv, ca))
+ if err != nil {
+ t.Error(err)
+ }
+
+ b, err := io.ReadAll(buf)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(b) != srv2Resp {
+ t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b))
+ }
+ })
+
+ t.Run("Test with only the CA file (see also #6635)", func(t *testing.T) {
+ g, err := NewHTTPGetter()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf, err := g.Get(u.String(), WithURL(u.String()), WithTLSClientConfig("", "", ca))
+ if err != nil {
+ t.Error(err)
+ }
+
+ b, err := io.ReadAll(buf)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if string(b) != srv2Resp {
+ t.Errorf("expected response from Server2 to be '%s', instead got: %s", srv2Resp, string(b))
+ }
+ })
+}
+
+func TestDownloadInsecureSkipTLSVerify(t *testing.T) {
+ ts := httptest.NewTLSServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {}))
+ defer ts.Close()
+
+ u, _ := url.ParseRequestURI(ts.URL)
+
+ // Ensure the default behavior did not change
+ g, err := NewHTTPGetter(
+ WithURL(u.String()),
+ )
+ if err != nil {
+ t.Error(err)
+ }
+
+ if _, err := g.Get(u.String()); err == nil {
+ t.Errorf("Expected Getter to throw an error, got %s", err)
+ }
+
+ // Test certificate check skip
+ g, err = NewHTTPGetter(
+ WithURL(u.String()),
+ WithInsecureSkipVerifyTLS(true),
+ )
+ if err != nil {
+ t.Error(err)
+ }
+ if _, err = g.Get(u.String()); err != nil {
+ t.Error(err)
+ }
+
+}
+
+func TestHTTPGetterTarDownload(t *testing.T) {
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ f, _ := os.Open("testdata/empty-0.0.1.tgz")
+ defer f.Close()
+
+ b := make([]byte, 512)
+ f.Read(b)
+ // Get the file size
+ FileStat, _ := f.Stat()
+ FileSize := strconv.FormatInt(FileStat.Size(), 10)
+
+ // Simulating improper header values from bitbucket
+ w.Header().Set("Content-Type", "application/x-tar")
+ w.Header().Set("Content-Encoding", "gzip")
+ w.Header().Set("Content-Length", FileSize)
+
+ f.Seek(0, 0)
+ io.Copy(w, f)
+ }))
+
+ defer srv.Close()
+
+ g, err := NewHTTPGetter(WithURL(srv.URL))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data, _ := g.Get(srv.URL)
+ mimeType := http.DetectContentType(data.Bytes())
+
+ expectedMimeType := "application/x-gzip"
+ if mimeType != expectedMimeType {
+ t.Fatalf("Expected response with MIME type %s, but got %s", expectedMimeType, mimeType)
+ }
+}
+
+func TestHttpClientInsecureSkipVerify(t *testing.T) {
+ g := HTTPGetter{}
+ g.opts.url = "https://localhost"
+ verifyInsecureSkipVerify(t, &g, "Blank HTTPGetter", false)
+
+ g = HTTPGetter{}
+ g.opts.url = "https://localhost"
+ g.opts.caFile = "testdata/ca.crt"
+ verifyInsecureSkipVerify(t, &g, "HTTPGetter with ca file", false)
+
+ g = HTTPGetter{}
+ g.opts.url = "https://localhost"
+ g.opts.insecureSkipVerifyTLS = true
+ verifyInsecureSkipVerify(t, &g, "HTTPGetter with skip cert verification only", true)
+
+ g = HTTPGetter{}
+ g.opts.url = "https://localhost"
+ g.opts.certFile = "testdata/client.crt"
+ g.opts.keyFile = "testdata/client.key"
+ g.opts.insecureSkipVerifyTLS = true
+ transport := verifyInsecureSkipVerify(t, &g, "HTTPGetter with 2 way ssl", true)
+ if len(transport.TLSClientConfig.Certificates) <= 0 {
+ t.Fatal("transport.TLSClientConfig.Certificates is not present")
+ }
+}
+
+func verifyInsecureSkipVerify(t *testing.T, g *HTTPGetter, caseName string, expectedValue bool) *http.Transport {
+ t.Helper()
+ returnVal, err := g.httpClient()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if returnVal == nil { //nolint:staticcheck
+ t.Fatalf("Expected non nil value for http client")
+ }
+ transport := (returnVal.Transport).(*http.Transport) //nolint:staticcheck
+ gotValue := false
+ if transport.TLSClientConfig != nil {
+ gotValue = transport.TLSClientConfig.InsecureSkipVerify
+ }
+ if gotValue != expectedValue {
+ t.Fatalf("Case Name = %s\nInsecureSkipVerify did not come as expected. Expected = %t; Got = %v",
+ caseName, expectedValue, gotValue)
+ }
+ return transport
+}
+
+func TestDefaultHTTPTransportReuse(t *testing.T) {
+ g := HTTPGetter{}
+
+ httpClient1, err := g.httpClient()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if httpClient1 == nil { //nolint:staticcheck
+ t.Fatalf("Expected non nil value for http client")
+ }
+
+ transport1 := (httpClient1.Transport).(*http.Transport) //nolint:staticcheck
+
+ httpClient2, err := g.httpClient()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if httpClient2 == nil { //nolint:staticcheck
+ t.Fatalf("Expected non nil value for http client")
+ }
+
+ transport2 := (httpClient2.Transport).(*http.Transport) //nolint:staticcheck
+
+ if transport1 != transport2 {
+ t.Fatalf("Expected default transport to be reused")
+ }
+}
+
+func TestHTTPTransportOption(t *testing.T) {
+ transport := &http.Transport{}
+
+ g := HTTPGetter{}
+ g.opts.transport = transport
+ httpClient1, err := g.httpClient()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if httpClient1 == nil { //nolint:staticcheck
+ t.Fatalf("Expected non nil value for http client")
+ }
+
+ transport1 := (httpClient1.Transport).(*http.Transport) //nolint:staticcheck
+
+ if transport1 != transport {
+ t.Fatalf("Expected transport option to be applied")
+ }
+
+ httpClient2, err := g.httpClient()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if httpClient2 == nil { //nolint:staticcheck
+ t.Fatalf("Expected non nil value for http client")
+ }
+
+ transport2 := (httpClient2.Transport).(*http.Transport) //nolint:staticcheck
+
+ if transport1 != transport2 {
+ t.Fatalf("Expected applied transport to be reused")
+ }
+
+ g = HTTPGetter{}
+ g.opts.url = "https://localhost"
+ g.opts.certFile = "testdata/client.crt"
+ g.opts.keyFile = "testdata/client.key"
+ g.opts.insecureSkipVerifyTLS = true
+ g.opts.transport = transport
+ usedTransport := verifyInsecureSkipVerify(t, &g, "HTTPGetter with 2 way ssl", false)
+ if usedTransport.TLSClientConfig != nil {
+ t.Fatal("transport.TLSClientConfig should not be set")
+ }
+}
diff --git a/helm/pkg/getter/ocigetter.go b/helm/pkg/getter/ocigetter.go
new file mode 100644
index 000000000..24fc60c56
--- /dev/null
+++ b/helm/pkg/getter/ocigetter.go
@@ -0,0 +1,213 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "bytes"
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "path"
+ "strings"
+ "sync"
+ "time"
+
+ "helm.sh/helm/v4/internal/tlsutil"
+ "helm.sh/helm/v4/internal/urlutil"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+// OCIGetter is the default HTTP(/S) backend handler
+type OCIGetter struct {
+ opts getterOptions
+ transport *http.Transport
+ once sync.Once
+}
+
+// Get performs a Get from repo.Getter and returns the body.
+func (g *OCIGetter) Get(href string, options ...Option) (*bytes.Buffer, error) {
+ for _, opt := range options {
+ opt(&g.opts)
+ }
+ return g.get(href)
+}
+
+func (g *OCIGetter) get(href string) (*bytes.Buffer, error) {
+ client := g.opts.registryClient
+ // if the user has already provided a configured registry client, use it,
+ // this is particularly true when user has his own way of handling the client credentials.
+ if client == nil {
+ c, err := g.newRegistryClient()
+ if err != nil {
+ return nil, err
+ }
+ client = c
+ }
+
+ ref := strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme))
+
+ if version := g.opts.version; version != "" && !strings.Contains(path.Base(ref), ":") {
+ ref = fmt.Sprintf("%s:%s", ref, version)
+ }
+ // Check if this is a plugin request
+ if g.opts.artifactType == "plugin" {
+ return g.getPlugin(client, ref)
+ }
+
+ // Default to chart behavior for backward compatibility
+ var pullOpts []registry.PullOption
+ requestingProv := strings.HasSuffix(ref, ".prov")
+ if requestingProv {
+ ref = strings.TrimSuffix(ref, ".prov")
+ pullOpts = append(pullOpts,
+ registry.PullOptWithChart(false),
+ registry.PullOptWithProv(true))
+ }
+
+ result, err := client.Pull(ref, pullOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ if requestingProv {
+ return bytes.NewBuffer(result.Prov.Data), nil
+ }
+ return bytes.NewBuffer(result.Chart.Data), nil
+}
+
+// NewOCIGetter constructs a valid http/https client as a Getter
+func NewOCIGetter(ops ...Option) (Getter, error) {
+ var client OCIGetter
+
+ for _, opt := range ops {
+ opt(&client.opts)
+ }
+
+ return &client, nil
+}
+
+func (g *OCIGetter) newRegistryClient() (*registry.Client, error) {
+ if g.opts.transport != nil {
+ client, err := registry.NewClient(
+ registry.ClientOptHTTPClient(&http.Client{
+ Transport: g.opts.transport,
+ Timeout: g.opts.timeout,
+ }),
+ )
+ if err != nil {
+ return nil, err
+ }
+ return client, nil
+ }
+
+ g.once.Do(func() {
+ g.transport = &http.Transport{
+ // From https://github.com/google/go-containerregistry/blob/31786c6cbb82d6ec4fb8eb79cd9387905130534e/pkg/v1/remote/options.go#L87
+ DisableCompression: true,
+ DialContext: (&net.Dialer{
+ // By default we wrap the transport in retries, so reduce the
+ // default dial timeout to 5s to avoid 5x 30s of connection
+ // timeouts when doing the "ping" on certain http registries.
+ Timeout: 5 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ Proxy: http.ProxyFromEnvironment,
+ // Being nil would cause the tls.Config default to be used
+ // "NewTLSConfig" modifies an empty TLS config, not the default one
+ TLSClientConfig: &tls.Config{},
+ }
+ })
+
+ if (g.opts.certFile != "" && g.opts.keyFile != "") || g.opts.caFile != "" || g.opts.insecureSkipVerifyTLS {
+ tlsConf, err := tlsutil.NewTLSConfig(
+ tlsutil.WithInsecureSkipVerify(g.opts.insecureSkipVerifyTLS),
+ tlsutil.WithCertKeyPairFiles(g.opts.certFile, g.opts.keyFile),
+ tlsutil.WithCAFile(g.opts.caFile),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("can't create TLS config for client: %w", err)
+ }
+
+ sni, err := urlutil.ExtractHostname(g.opts.url)
+ if err != nil {
+ return nil, err
+ }
+ tlsConf.ServerName = sni
+
+ g.transport.TLSClientConfig = tlsConf
+ }
+
+ opts := []registry.ClientOption{registry.ClientOptHTTPClient(&http.Client{
+ Transport: g.transport,
+ Timeout: g.opts.timeout,
+ })}
+ if g.opts.plainHTTP {
+ opts = append(opts, registry.ClientOptPlainHTTP())
+ }
+
+ client, err := registry.NewClient(opts...)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return client, nil
+}
+
+// getPlugin handles plugin-specific OCI pulls
+func (g *OCIGetter) getPlugin(client *registry.Client, ref string) (*bytes.Buffer, error) {
+ // Check if this is a provenance file request
+ requestingProv := strings.HasSuffix(ref, ".prov")
+ if requestingProv {
+ ref = strings.TrimSuffix(ref, ".prov")
+ }
+
+ // Extract plugin name from the reference
+ // e.g., "ghcr.io/user/plugin-name:v1.0.0" -> "plugin-name"
+ parts := strings.Split(ref, "/")
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid OCI reference: %s", ref)
+ }
+ lastPart := parts[len(parts)-1]
+ pluginName := lastPart
+ if idx := strings.LastIndex(lastPart, ":"); idx > 0 {
+ pluginName = lastPart[:idx]
+ }
+ if idx := strings.LastIndex(lastPart, "@"); idx > 0 {
+ pluginName = lastPart[:idx]
+ }
+
+ var pullOpts []registry.PluginPullOption
+ if requestingProv {
+ pullOpts = append(pullOpts, registry.PullPluginOptWithProv(true))
+ }
+
+ result, err := client.PullPlugin(ref, pluginName, pullOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ if requestingProv {
+ return bytes.NewBuffer(result.Prov.Data), nil
+ }
+ return bytes.NewBuffer(result.PluginData), nil
+}
diff --git a/helm/pkg/getter/ocigetter_test.go b/helm/pkg/getter/ocigetter_test.go
new file mode 100644
index 000000000..ef196afcc
--- /dev/null
+++ b/helm/pkg/getter/ocigetter_test.go
@@ -0,0 +1,151 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "net/http"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+func TestOCIGetter(t *testing.T) {
+ g, err := NewOCIGetter(WithURL("oci://example.com"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, ok := g.(*OCIGetter); !ok {
+ t.Fatal("Expected NewOCIGetter to produce an *OCIGetter")
+ }
+
+ cd := "../../testdata"
+ join := filepath.Join
+ ca, pub, priv := join(cd, "rootca.crt"), join(cd, "crt.pem"), join(cd, "key.pem")
+ timeout := time.Second * 5
+ transport := &http.Transport{}
+ insecureSkipVerifyTLS := false
+ plainHTTP := false
+
+ // Test with getterOptions
+ g, err = NewOCIGetter(
+ WithBasicAuth("I", "Am"),
+ WithTLSClientConfig(pub, priv, ca),
+ WithTimeout(timeout),
+ WithTransport(transport),
+ WithInsecureSkipVerifyTLS(insecureSkipVerifyTLS),
+ WithPlainHTTP(plainHTTP),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ og, ok := g.(*OCIGetter)
+ if !ok {
+ t.Fatal("expected NewOCIGetter to produce an *OCIGetter")
+ }
+
+ if og.opts.username != "I" {
+ t.Errorf("Expected NewOCIGetter to contain %q as the username, got %q", "I", og.opts.username)
+ }
+
+ if og.opts.password != "Am" {
+ t.Errorf("Expected NewOCIGetter to contain %q as the password, got %q", "Am", og.opts.password)
+ }
+
+ if og.opts.certFile != pub {
+ t.Errorf("Expected NewOCIGetter to contain %q as the public key file, got %q", pub, og.opts.certFile)
+ }
+
+ if og.opts.keyFile != priv {
+ t.Errorf("Expected NewOCIGetter to contain %q as the private key file, got %q", priv, og.opts.keyFile)
+ }
+
+ if og.opts.caFile != ca {
+ t.Errorf("Expected NewOCIGetter to contain %q as the CA file, got %q", ca, og.opts.caFile)
+ }
+
+ if og.opts.timeout != timeout {
+ t.Errorf("Expected NewOCIGetter to contain %s as Timeout flag, got %s", timeout, og.opts.timeout)
+ }
+
+ if og.opts.transport != transport {
+ t.Errorf("Expected NewOCIGetter to contain %p as Transport, got %p", transport, og.opts.transport)
+ }
+
+ if og.opts.plainHTTP != plainHTTP {
+ t.Errorf("Expected NewOCIGetter to have plainHTTP as %t, got %t", plainHTTP, og.opts.plainHTTP)
+ }
+
+ if og.opts.insecureSkipVerifyTLS != insecureSkipVerifyTLS {
+ t.Errorf("Expected NewOCIGetter to have insecureSkipVerifyTLS as %t, got %t", insecureSkipVerifyTLS, og.opts.insecureSkipVerifyTLS)
+ }
+
+ // Test if setting registryClient is being passed to the ops
+ registryClient, err := registry.NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ g, err = NewOCIGetter(
+ WithRegistryClient(registryClient),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ og, ok = g.(*OCIGetter)
+ if !ok {
+ t.Fatal("expected NewOCIGetter to produce an *OCIGetter")
+ }
+
+ if og.opts.registryClient != registryClient {
+ t.Errorf("Expected NewOCIGetter to contain %p as RegistryClient, got %p", registryClient, og.opts.registryClient)
+ }
+}
+
+func TestOCIHTTPTransportReuse(t *testing.T) {
+ g := OCIGetter{}
+
+ _, err := g.newRegistryClient()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if g.transport == nil {
+ t.Fatalf("Expected non nil value for transport")
+ }
+
+ transport1 := g.transport
+
+ _, err = g.newRegistryClient()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if g.transport == nil {
+ t.Fatalf("Expected non nil value for transport")
+ }
+
+ transport2 := g.transport
+
+ if transport1 != transport2 {
+ t.Fatalf("Expected default transport to be reused")
+ }
+}
diff --git a/helm/pkg/getter/plugingetter.go b/helm/pkg/getter/plugingetter.go
new file mode 100644
index 000000000..ef8b87503
--- /dev/null
+++ b/helm/pkg/getter/plugingetter.go
@@ -0,0 +1,129 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+
+ "net/url"
+
+ "helm.sh/helm/v4/internal/plugin"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+// collectGetterPlugins scans for getter plugins.
+// This will load plugins according to the cli.
+func collectGetterPlugins(settings *cli.EnvSettings) (Providers, error) {
+ d := plugin.Descriptor{
+ Type: "getter/v1",
+ }
+ plgs, err := plugin.FindPlugins([]string{settings.PluginsDirectory}, d)
+ if err != nil {
+ return nil, err
+ }
+ env := plugin.FormatEnv(settings.EnvVars())
+ pluginConstructorBuilder := func(plg plugin.Plugin) Constructor {
+ return func(option ...Option) (Getter, error) {
+
+ return &getterPlugin{
+ options: append([]Option{}, option...),
+ plg: plg,
+ env: env,
+ }, nil
+ }
+ }
+ results := make([]Provider, 0, len(plgs))
+ for _, plg := range plgs {
+ if c, ok := plg.Metadata().Config.(*schema.ConfigGetterV1); ok {
+ results = append(results, Provider{
+ Schemes: c.Protocols,
+ New: pluginConstructorBuilder(plg),
+ })
+ }
+ }
+ return results, nil
+}
+
+func convertOptions(globalOptions, options []Option) schema.GetterOptionsV1 {
+ opts := getterOptions{}
+ for _, opt := range globalOptions {
+ opt(&opts)
+ }
+ for _, opt := range options {
+ opt(&opts)
+ }
+
+ result := schema.GetterOptionsV1{
+ URL: opts.url,
+ CertFile: opts.certFile,
+ KeyFile: opts.keyFile,
+ CAFile: opts.caFile,
+ UNTar: opts.unTar,
+ InsecureSkipVerifyTLS: opts.insecureSkipVerifyTLS,
+ PlainHTTP: opts.plainHTTP,
+ AcceptHeader: opts.acceptHeader,
+ Username: opts.username,
+ Password: opts.password,
+ PassCredentialsAll: opts.passCredentialsAll,
+ UserAgent: opts.userAgent,
+ Version: opts.version,
+ Timeout: opts.timeout,
+ }
+
+ return result
+}
+
+type getterPlugin struct {
+ options []Option
+ plg plugin.Plugin
+ env []string
+}
+
+func (g *getterPlugin) Get(href string, options ...Option) (*bytes.Buffer, error) {
+ opts := convertOptions(g.options, options)
+
+ // TODO optimization: pass this along to Get() instead of re-parsing here
+ u, err := url.Parse(href)
+ if err != nil {
+ return nil, err
+ }
+
+ input := &plugin.Input{
+ Message: schema.InputMessageGetterV1{
+ Href: href,
+ Options: opts,
+ Protocol: u.Scheme,
+ },
+ Env: g.env,
+ // TODO should we pass Stdin, Stdout, and Stderr through Input here to getter plugins?
+ // Stdout: os.Stdout,
+ }
+ output, err := g.plg.Invoke(context.Background(), input)
+ if err != nil {
+ return nil, fmt.Errorf("plugin %q failed to invoke: %w", g.plg, err)
+ }
+
+ outputMessage, ok := output.Message.(schema.OutputMessageGetterV1)
+ if !ok {
+ return nil, fmt.Errorf("invalid output message type from plugin %q", g.plg.Metadata().Name)
+ }
+
+ return bytes.NewBuffer(outputMessage.Data), nil
+}
diff --git a/helm/pkg/getter/plugingetter_test.go b/helm/pkg/getter/plugingetter_test.go
new file mode 100644
index 000000000..16af9eb31
--- /dev/null
+++ b/helm/pkg/getter/plugingetter_test.go
@@ -0,0 +1,170 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package getter
+
+import (
+ "context"
+
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/internal/plugin/schema"
+
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+func TestCollectPlugins(t *testing.T) {
+ env := cli.New()
+ env.PluginsDirectory = pluginDir
+
+ p, err := collectGetterPlugins(env)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(p) != 2 {
+ t.Errorf("Expected 2 plugins, got %d: %v", len(p), p)
+ }
+
+ if _, err := p.ByScheme("test2"); err != nil {
+ t.Error(err)
+ }
+
+ if _, err := p.ByScheme("test"); err != nil {
+ t.Error(err)
+ }
+
+ if _, err := p.ByScheme("nosuchthing"); err == nil {
+ t.Fatal("did not expect protocol handler for nosuchthing")
+ }
+}
+
+func TestConvertOptions(t *testing.T) {
+ opts := convertOptions(
+ []Option{
+ WithURL("example://foo"),
+ WithAcceptHeader("Accept-Header"),
+ WithBasicAuth("username", "password"),
+ WithPassCredentialsAll(true),
+ WithUserAgent("User-agent"),
+ WithInsecureSkipVerifyTLS(true),
+ WithTLSClientConfig("certFile.pem", "keyFile.pem", "caFile.pem"),
+ WithPlainHTTP(true),
+ WithTimeout(10),
+ WithTagName("1.2.3"),
+ WithUntar(),
+ },
+ []Option{
+ WithTimeout(20),
+ },
+ )
+
+ expected := schema.GetterOptionsV1{
+ URL: "example://foo",
+ CertFile: "certFile.pem",
+ KeyFile: "keyFile.pem",
+ CAFile: "caFile.pem",
+ UNTar: true,
+ Timeout: 20,
+ InsecureSkipVerifyTLS: true,
+ PlainHTTP: true,
+ AcceptHeader: "Accept-Header",
+ Username: "username",
+ Password: "password",
+ PassCredentialsAll: true,
+ UserAgent: "User-agent",
+ Version: "1.2.3",
+ }
+ assert.Equal(t, expected, opts)
+}
+
+type testPlugin struct {
+ t *testing.T
+ dir string
+}
+
+func (t *testPlugin) Dir() string {
+ return t.dir
+}
+
+func (t *testPlugin) Metadata() plugin.Metadata {
+ return plugin.Metadata{
+ Name: "fake-plugin",
+ Type: "cli/v1",
+ APIVersion: "v1",
+ Runtime: "subprocess",
+ Config: &schema.ConfigCLIV1{},
+ RuntimeConfig: &plugin.RuntimeConfigSubprocess{
+ PlatformCommand: []plugin.PlatformCommand{
+ {
+ Command: "echo fake-plugin",
+ },
+ },
+ },
+ }
+}
+
+func (t *testPlugin) Invoke(_ context.Context, _ *plugin.Input) (*plugin.Output, error) {
+ // Simulate a plugin invocation
+ output := &plugin.Output{
+ Message: schema.OutputMessageGetterV1{
+ Data: []byte("fake-plugin output"),
+ },
+ }
+ return output, nil
+}
+
+var _ plugin.Plugin = (*testPlugin)(nil)
+
+func TestGetterPlugin(t *testing.T) {
+ gp := getterPlugin{
+ options: []Option{},
+ plg: &testPlugin{t: t, dir: "fake/dir"},
+ }
+
+ buf, err := gp.Get("test://example.com", WithTimeout(5*time.Second))
+ require.NoError(t, err)
+
+ assert.Equal(t, "fake-plugin output", buf.String())
+}
+
+func TestCollectGetterPluginsPassesEnv(t *testing.T) {
+ env := cli.New()
+ env.PluginsDirectory = pluginDir
+ env.Debug = true
+
+ providers, err := collectGetterPlugins(env)
+ require.NoError(t, err)
+ require.NotEmpty(t, providers, "expected at least one plugin provider")
+
+ getter, err := providers.ByScheme("test")
+ require.NoError(t, err)
+
+ gp, ok := getter.(*getterPlugin)
+ require.True(t, ok, "expected getter to be a *getterPlugin")
+
+ require.NotEmpty(t, gp.env, "expected env to be set on getterPlugin")
+ envMap := plugin.ParseEnv(gp.env)
+
+ assert.Contains(t, envMap, "HELM_DEBUG", "expected HELM_DEBUG in env")
+ assert.Equal(t, "true", envMap["HELM_DEBUG"], "expected HELM_DEBUG to be true")
+ assert.Contains(t, envMap, "HELM_PLUGINS", "expected HELM_PLUGINS in env")
+ assert.Equal(t, pluginDir, envMap["HELM_PLUGINS"], "expected HELM_PLUGINS to match pluginsDirectory")
+}
diff --git a/helm/pkg/getter/testdata/ca.crt b/helm/pkg/getter/testdata/ca.crt
new file mode 100644
index 000000000..c17820085
--- /dev/null
+++ b/helm/pkg/getter/testdata/ca.crt
@@ -0,0 +1,25 @@
+-----BEGIN CERTIFICATE-----
+MIIEJDCCAwygAwIBAgIUcGE5xyj7IH7sZLntsHKxZHCd3awwDQYJKoZIhvcNAQEL
+BQAwYTELMAkGA1UEBhMCSU4xDzANBgNVBAgMBktlcmFsYTEOMAwGA1UEBwwFS29j
+aGkxGDAWBgNVBAoMD2NoYXJ0bXVzZXVtLmNvbTEXMBUGA1UEAwwOY2hhcnRtdXNl
+dW1fY2EwIBcNMjAxMjA0MDkxMjU4WhgPMjI5NDA5MTkwOTEyNThaMGExCzAJBgNV
+BAYTAklOMQ8wDQYDVQQIDAZLZXJhbGExDjAMBgNVBAcMBUtvY2hpMRgwFgYDVQQK
+DA9jaGFydG11c2V1bS5jb20xFzAVBgNVBAMMDmNoYXJ0bXVzZXVtX2NhMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqQJi/BRWzaXlkDP48kUAWgaLtD0Y
+72E30WBZDAw3S+BaYulRk1LWK1QM+ALiZQb1a6YgNvuERyywOv45pZaC2xtP6Bju
++59kwBrEtNCTNa2cSqs0hSw6NCDe+K8lpFKlTdh4c5sAkiDkMBr1R6uu7o4HvfO0
+iGMZ9VUdrbf4psZIyPVRdt/sAkAKqbjQfxr6VUmMktrZNND+mwPgrhS2kPL4P+JS
+zpxgpkuSUvg5DvJuypmCI0fDr6GwshqXM1ONHE0HT8MEVy1xZj9rVHt7sgQhjBX1
+PsFySZrq1lSz8R864c1l+tCGlk9+1ldQjc9tBzdvCjJB+nYfTTpBUk/VKwIDAQAB
+o4HRMIHOMB0GA1UdDgQWBBSv1IMZGHWsZVqJkJoPDzVLMcUivjCBngYDVR0jBIGW
+MIGTgBSv1IMZGHWsZVqJkJoPDzVLMcUivqFlpGMwYTELMAkGA1UEBhMCSU4xDzAN
+BgNVBAgMBktlcmFsYTEOMAwGA1UEBwwFS29jaGkxGDAWBgNVBAoMD2NoYXJ0bXVz
+ZXVtLmNvbTEXMBUGA1UEAwwOY2hhcnRtdXNldW1fY2GCFHBhOcco+yB+7GS57bBy
+sWRwnd2sMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI6Fg9F8cjB9
+2jJn1vZPpynSFs7XPlUBVh0YXBt+o6g7+nKInwFBPzPEQ7ZZotz3GIe4I7wYiQAn
+c6TU2nnqK+9TLbJIyv6NOfikLgwrTy+dAW8wrOiu+IIzA8Gdy8z8m3B7v9RUYVhx
+zoNoqCEvOIzCZKDH68PZDJrDVSuvPPK33Ywj3zxYeDNXU87BKGER0vjeVG4oTAcQ
+hKJURh4IRy/eW9NWiFqvNgst7k5MldOgLIOUBh1faaxlWkjuGpfdr/EBAAr491S5
+IPFU7TopsrgANnxldSzVbcgfo2nt0A976T3xZQHy3xpk1rIt55xVzT0W55NRAc7v
++9NTUOB10so=
+-----END CERTIFICATE-----
diff --git a/helm/pkg/getter/testdata/client.crt b/helm/pkg/getter/testdata/client.crt
new file mode 100644
index 000000000..f005f401d
--- /dev/null
+++ b/helm/pkg/getter/testdata/client.crt
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDejCCAmKgAwIBAgIUfSn63/ldeo1prOaxXV8I0Id6HTEwDQYJKoZIhvcNAQEL
+BQAwYTELMAkGA1UEBhMCSU4xDzANBgNVBAgMBktlcmFsYTEOMAwGA1UEBwwFS29j
+aGkxGDAWBgNVBAoMD2NoYXJ0bXVzZXVtLmNvbTEXMBUGA1UEAwwOY2hhcnRtdXNl
+dW1fY2EwIBcNMjAxMjA0MDkxMzIwWhgPMjI5NDA5MTkwOTEzMjBaMFwxCzAJBgNV
+BAYTAklOMQ8wDQYDVQQIDAZLZXJhbGExDjAMBgNVBAcMBUtvY2hpMRgwFgYDVQQK
+DA9jaGFydG11c2V1bS5jb20xEjAQBgNVBAMMCTEyNy4wLjAuMTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAKeCbADaK+7yrM9rQszF54334mGoSXbXY6Ca
+7FKdkgmKCjeeqZ+lr+i+6WQ+O+Tn0dhlyHier42IqUw5Rzzegvl7QrhiChd8C6sW
+pEqDK7Z1U+cv9gIabYd+qWDwFw67xiMNQfdZxwI/AgPzixlfsMw3ZNKM3Q0Vxtdz
+EEYdEDDNgZ34Cj+KXCPpYDi2i5hZnha4wzIfbL3+z2o7sPBBLBrrsOtPdVVkxysN
+HM4h7wp7w7QyOosndFvcTaX7yRA1ka0BoulCt2wdVc2ZBRPiPMySi893VCQ8zeHP
+QMFDL3rGmKVLbP1to2dgf9ZgckMEwE8chm2D8Ls87F9tsK9fVlUCAwEAAaMtMCsw
+EwYDVR0lBAwwCgYIKwYBBQUHAwIwFAYDVR0RBA0wC4IJMTI3LjAuMC4xMA0GCSqG
+SIb3DQEBCwUAA4IBAQCi7z5U9J5DkM6eYzyyH/8p32Azrunw+ZpwtxbKq3xEkpcX
+0XtbyTG2szegKF0eLr9NizgEN8M1nvaMO1zuxFMB6tCWO/MyNWH/0T4xvFnnVzJ4
+OKlGSvyIuMW3wofxCLRG4Cpw750iWpJ0GwjTOu2ep5tbnEMC5Ueg55WqCAE/yDrd
+nL1wZSGXy1bj5H6q8EM/4/yrzK80QkfdpbDR0NGkDO2mmAKL8d57NuASWljieyV3
+Ty5C8xXw5jF2JIESvT74by8ufozUOPKmgRqySgEPgAkNm0s5a05KAi5Cpyxgdylm
+CEvjni1LYGhJp9wXucF9ehKSdsw4qn9T5ire8YfI
+-----END CERTIFICATE-----
diff --git a/helm/pkg/getter/testdata/client.key b/helm/pkg/getter/testdata/client.key
new file mode 100644
index 000000000..4f676ba42
--- /dev/null
+++ b/helm/pkg/getter/testdata/client.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAp4JsANor7vKsz2tCzMXnjffiYahJdtdjoJrsUp2SCYoKN56p
+n6Wv6L7pZD475OfR2GXIeJ6vjYipTDlHPN6C+XtCuGIKF3wLqxakSoMrtnVT5y/2
+Ahpth36pYPAXDrvGIw1B91nHAj8CA/OLGV+wzDdk0ozdDRXG13MQRh0QMM2BnfgK
+P4pcI+lgOLaLmFmeFrjDMh9svf7Pajuw8EEsGuuw6091VWTHKw0cziHvCnvDtDI6
+iyd0W9xNpfvJEDWRrQGi6UK3bB1VzZkFE+I8zJKLz3dUJDzN4c9AwUMvesaYpUts
+/W2jZ2B/1mByQwTATxyGbYPwuzzsX22wr19WVQIDAQABAoIBABw7qUSDgUAm+uWC
+6KFnAd4115wqJye2qf4Z3pcWI9UjxREW1vQnkvyhoOjabHHqeL4GecGKzYAHdrF4
+Pf+OaXjvQ5GcRKMsrzLJACvm6+k24UtoFAjKt4dM2/OQw/IhyAWEaIfuQ9KnGAne
+dKV0MXJaK84pG+DmuLr7k9SddWskElEyxK2j0tvdyI5byRfjf5schac9M4i5ZAYV
+pT+PuXZQh8L8GEY2koE+uEMpXGOstD7yUxyV8zHFyBC7FVDkqF4S8IWY+RXQtVd6
+l8B8dRLjKSLBKDB+neStepcwNUyCDYiqyqsKfN7eVHDd0arPm6LeTuAsHKBw2OoN
+YdAmUUkCgYEA0vb9mxsMgr0ORTZ14vWghz9K12oKPk9ajYuLTQVn8GQazp0XTIi5
+Mil2I78Qj87ApzGqOyHbkEgpg0C9/mheYLOYNHC4of83kNF+pHbDi1TckwxIaIK0
+rZLb3Az3zZQ2rAWZ2IgSyoeVO9RxYK/RuvPFp+UBeucuXiYoI0YlEXcCgYEAy0Sk
+LTiYuLsnk21RIFK01iq4Y+4112K1NGTKu8Wm6wPaPsnLznP6339cEkbiSgbRgERE
+jgOfa/CiSw5CVT9dWZuQ3OoQ83pMRb7IB0TobPmhBS/HQZ8Ocbfb6PnxQ3o1Bx7I
+QuIpZFxzuTP80p1p2DMDxEl+r/DCvT/wgBKX6ZMCgYAdw1bYMSK8tytyPFK5aGnz
+asyGQ6GaVNuzqIJIpYCae6UEjUkiNQ/bsdnHBUey4jpv3CPmH8q4OlYQ/GtRnyvh
+fLT2gQirYjRWrBev4EmKOLi9zjfQ9s/CxTtbekDjsgtcjZW85MWx6Rr2y+wK9gMi
+2w2BuF9TFZaHFd8Hyvej1QKBgAoFbU6pbqYU3AOhrRE54p54ZrTOhqsCu8pEedY+
+DVeizfyweDLKdwDTx5dDFV7u7R80vmh99zscFvQ6VLzdLd4AFGk/xOwsCFyb5kKt
+fAP7Xpvh2iH7FHw4w0e+Is3f1YNvWhIqEj5XbIEh9gHwLsqw4SupL+y+ousvnszB
+nemvAoGBAJa7bYG8MMCFJ4OFAmkpgQzHSzq7dzOR6O4GKsQQhiZ/0nRK5l3sLcDO
+9viuRfhRepJGbcQ/Hw0AVIRWU01y4mejbuxfUE/FgWBoBBvpbot2zfuJgeFAIvkY
+iFsZwuxPQUFobTu2hj6gh0gOKj/LpNXHkZGbZ2zTXmK3GDYlf6bR
+-----END RSA PRIVATE KEY-----
diff --git a/helm/pkg/getter/testdata/empty-0.0.1.tgz b/helm/pkg/getter/testdata/empty-0.0.1.tgz
new file mode 100644
index 000000000..6c4c3d205
Binary files /dev/null and b/helm/pkg/getter/testdata/empty-0.0.1.tgz differ
diff --git a/helm/pkg/getter/testdata/plugins/testgetter/plugin.yaml b/helm/pkg/getter/testdata/plugins/testgetter/plugin.yaml
new file mode 100644
index 000000000..ca11b95ea
--- /dev/null
+++ b/helm/pkg/getter/testdata/plugins/testgetter/plugin.yaml
@@ -0,0 +1,13 @@
+name: "testgetter"
+version: "0.1.0"
+type: getter/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ protocols:
+ - "test"
+runtimeConfig:
+ protocolCommands:
+ - command: "echo"
+ protocols:
+ - "test"
diff --git a/helm/pkg/getter/testdata/plugins/testgetter2/plugin.yaml b/helm/pkg/getter/testdata/plugins/testgetter2/plugin.yaml
new file mode 100644
index 000000000..1c944a7c7
--- /dev/null
+++ b/helm/pkg/getter/testdata/plugins/testgetter2/plugin.yaml
@@ -0,0 +1,13 @@
+name: "testgetter2"
+version: "0.1.0"
+type: getter/v1
+apiVersion: v1
+runtime: subprocess
+config:
+ protocols:
+ - "test2"
+runtimeConfig:
+ protocolCommands:
+ - command: "echo"
+ protocols:
+ - "test2"
diff --git a/helm/pkg/getter/testdata/repository/local/index.yaml b/helm/pkg/getter/testdata/repository/local/index.yaml
new file mode 100644
index 000000000..efcf30c21
--- /dev/null
+++ b/helm/pkg/getter/testdata/repository/local/index.yaml
@@ -0,0 +1,3 @@
+apiVersion: v1
+entries: {}
+generated: 2017-04-28T12:34:38.900985501-06:00
diff --git a/helm/pkg/getter/testdata/repository/repositories.yaml b/helm/pkg/getter/testdata/repository/repositories.yaml
new file mode 100644
index 000000000..14ae6a8eb
--- /dev/null
+++ b/helm/pkg/getter/testdata/repository/repositories.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+generated: 2017-04-28T12:34:38.551693035-06:00
+repositories:
+- caFile: ""
+ cache: repository/cache/stable-index.yaml
+ certFile: ""
+ keyFile: ""
+ name: stable
+ url: https://charts.helm.sh/stable
+- caFile: ""
+ cache: repository/cache/local-index.yaml
+ certFile: ""
+ keyFile: ""
+ name: local
+ url: http://127.0.0.1:8879/charts
diff --git a/helm/pkg/helmpath/home.go b/helm/pkg/helmpath/home.go
new file mode 100644
index 000000000..bd43e8890
--- /dev/null
+++ b/helm/pkg/helmpath/home.go
@@ -0,0 +1,44 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package helmpath calculates filesystem paths to Helm's configuration, cache and data.
+package helmpath
+
+// This helper builds paths to Helm's configuration, cache and data paths.
+const lp = lazypath("helm")
+
+// ConfigPath returns the path where Helm stores configuration.
+func ConfigPath(elem ...string) string { return lp.configPath(elem...) }
+
+// CachePath returns the path where Helm stores cached objects.
+func CachePath(elem ...string) string { return lp.cachePath(elem...) }
+
+// DataPath returns the path where Helm stores data.
+func DataPath(elem ...string) string { return lp.dataPath(elem...) }
+
+// CacheIndexFile returns the path to an index for the given named repository.
+func CacheIndexFile(name string) string {
+ if name != "" {
+ name += "-"
+ }
+ return name + "index.yaml"
+}
+
+// CacheChartsFile returns the path to a text file listing all the charts
+// within the given named repository.
+func CacheChartsFile(name string) string {
+ if name != "" {
+ name += "-"
+ }
+ return name + "charts.txt"
+}
diff --git a/helm/pkg/helmpath/home_unix_test.go b/helm/pkg/helmpath/home_unix_test.go
new file mode 100644
index 000000000..a64c9bcd6
--- /dev/null
+++ b/helm/pkg/helmpath/home_unix_test.go
@@ -0,0 +1,45 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+
+package helmpath
+
+import (
+ "runtime"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/helmpath/xdg"
+)
+
+func TestHelmHome(t *testing.T) {
+ t.Setenv(xdg.CacheHomeEnvVar, "/cache")
+ t.Setenv(xdg.ConfigHomeEnvVar, "/config")
+ t.Setenv(xdg.DataHomeEnvVar, "/data")
+ isEq := func(t *testing.T, got, expected string) {
+ t.Helper()
+ if expected != got {
+ t.Error(runtime.GOOS)
+ t.Errorf("Expected %q, got %q", expected, got)
+ }
+ }
+
+ isEq(t, CachePath(), "/cache/helm")
+ isEq(t, ConfigPath(), "/config/helm")
+ isEq(t, DataPath(), "/data/helm")
+
+ // test to see if lazy-loading environment variables at runtime works
+ t.Setenv(xdg.CacheHomeEnvVar, "/cache2")
+
+ isEq(t, CachePath(), "/cache2/helm")
+}
diff --git a/helm/pkg/helmpath/home_windows_test.go b/helm/pkg/helmpath/home_windows_test.go
new file mode 100644
index 000000000..38fe5e4f1
--- /dev/null
+++ b/helm/pkg/helmpath/home_windows_test.go
@@ -0,0 +1,43 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package helmpath
+
+import (
+ "os"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/helmpath/xdg"
+)
+
+func TestHelmHome(t *testing.T) {
+ os.Setenv(xdg.CacheHomeEnvVar, "c:\\")
+ os.Setenv(xdg.ConfigHomeEnvVar, "d:\\")
+ os.Setenv(xdg.DataHomeEnvVar, "e:\\")
+ isEq := func(t *testing.T, a, b string) {
+ if a != b {
+ t.Errorf("Expected %q, got %q", b, a)
+ }
+ }
+
+ isEq(t, CachePath(), "c:\\helm")
+ isEq(t, ConfigPath(), "d:\\helm")
+ isEq(t, DataPath(), "e:\\helm")
+
+ // test to see if lazy-loading environment variables at runtime works
+ os.Setenv(xdg.CacheHomeEnvVar, "f:\\")
+
+ isEq(t, CachePath(), "f:\\helm")
+}
diff --git a/helm/pkg/helmpath/lazypath.go b/helm/pkg/helmpath/lazypath.go
new file mode 100644
index 000000000..c1f868754
--- /dev/null
+++ b/helm/pkg/helmpath/lazypath.go
@@ -0,0 +1,72 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helmpath
+
+import (
+ "os"
+ "path/filepath"
+
+ "helm.sh/helm/v4/pkg/helmpath/xdg"
+)
+
+const (
+ // CacheHomeEnvVar is the environment variable used by Helm
+ // for the cache directory. When no value is set a default is used.
+ CacheHomeEnvVar = "HELM_CACHE_HOME"
+
+ // ConfigHomeEnvVar is the environment variable used by Helm
+ // for the config directory. When no value is set a default is used.
+ ConfigHomeEnvVar = "HELM_CONFIG_HOME"
+
+ // DataHomeEnvVar is the environment variable used by Helm
+ // for the data directory. When no value is set a default is used.
+ DataHomeEnvVar = "HELM_DATA_HOME"
+)
+
+// lazypath is a lazy-loaded path buffer for the XDG base directory specification.
+type lazypath string
+
+func (l lazypath) path(helmEnvVar, xdgEnvVar string, defaultFn func() string, elem ...string) string {
+
+ // There is an order to checking for a path.
+ // 1. See if a Helm specific environment variable has been set.
+ // 2. Check if an XDG environment variable is set
+ // 3. Fall back to a default
+ base := os.Getenv(helmEnvVar)
+ if base != "" {
+ return filepath.Join(base, filepath.Join(elem...))
+ }
+ base = os.Getenv(xdgEnvVar)
+ if base == "" {
+ base = defaultFn()
+ }
+ return filepath.Join(base, string(l), filepath.Join(elem...))
+}
+
+// cachePath defines the base directory relative to which user specific non-essential data files
+// should be stored.
+func (l lazypath) cachePath(elem ...string) string {
+ return l.path(CacheHomeEnvVar, xdg.CacheHomeEnvVar, cacheHome, filepath.Join(elem...))
+}
+
+// configPath defines the base directory relative to which user specific configuration files should
+// be stored.
+func (l lazypath) configPath(elem ...string) string {
+ return l.path(ConfigHomeEnvVar, xdg.ConfigHomeEnvVar, configHome, filepath.Join(elem...))
+}
+
+// dataPath defines the base directory relative to which user specific data files should be stored.
+func (l lazypath) dataPath(elem ...string) string {
+ return l.path(DataHomeEnvVar, xdg.DataHomeEnvVar, dataHome, filepath.Join(elem...))
+}
diff --git a/helm/pkg/helmpath/lazypath_darwin.go b/helm/pkg/helmpath/lazypath_darwin.go
new file mode 100644
index 000000000..eba6dde15
--- /dev/null
+++ b/helm/pkg/helmpath/lazypath_darwin.go
@@ -0,0 +1,34 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin
+
+package helmpath
+
+import (
+ "path/filepath"
+
+ "k8s.io/client-go/util/homedir"
+)
+
+func dataHome() string {
+ return filepath.Join(homedir.HomeDir(), "Library")
+}
+
+func configHome() string {
+ return filepath.Join(homedir.HomeDir(), "Library", "Preferences")
+}
+
+func cacheHome() string {
+ return filepath.Join(homedir.HomeDir(), "Library", "Caches")
+}
diff --git a/helm/pkg/helmpath/lazypath_darwin_test.go b/helm/pkg/helmpath/lazypath_darwin_test.go
new file mode 100644
index 000000000..e3006d0d5
--- /dev/null
+++ b/helm/pkg/helmpath/lazypath_darwin_test.go
@@ -0,0 +1,86 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin
+
+package helmpath
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "k8s.io/client-go/util/homedir"
+
+ "helm.sh/helm/v4/pkg/helmpath/xdg"
+)
+
+const (
+ appName = "helm"
+ testFile = "test.txt"
+ lazy = lazypath(appName)
+)
+
+func TestDataPath(t *testing.T) {
+ os.Unsetenv(xdg.DataHomeEnvVar)
+
+ expected := filepath.Join(homedir.HomeDir(), "Library", appName, testFile)
+
+ if lazy.dataPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.dataPath(testFile))
+ }
+
+ t.Setenv(xdg.DataHomeEnvVar, "/tmp")
+
+ expected = filepath.Join("/tmp", appName, testFile)
+
+ if lazy.dataPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.dataPath(testFile))
+ }
+}
+
+func TestConfigPath(t *testing.T) {
+ os.Unsetenv(xdg.ConfigHomeEnvVar)
+
+ expected := filepath.Join(homedir.HomeDir(), "Library", "Preferences", appName, testFile)
+
+ if lazy.configPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.configPath(testFile))
+ }
+
+ t.Setenv(xdg.ConfigHomeEnvVar, "/tmp")
+
+ expected = filepath.Join("/tmp", appName, testFile)
+
+ if lazy.configPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.configPath(testFile))
+ }
+}
+
+func TestCachePath(t *testing.T) {
+ os.Unsetenv(xdg.CacheHomeEnvVar)
+
+ expected := filepath.Join(homedir.HomeDir(), "Library", "Caches", appName, testFile)
+
+ if lazy.cachePath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.cachePath(testFile))
+ }
+
+ t.Setenv(xdg.CacheHomeEnvVar, "/tmp")
+
+ expected = filepath.Join("/tmp", appName, testFile)
+
+ if lazy.cachePath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.cachePath(testFile))
+ }
+}
diff --git a/helm/pkg/helmpath/lazypath_unix.go b/helm/pkg/helmpath/lazypath_unix.go
new file mode 100644
index 000000000..82fb4b6f1
--- /dev/null
+++ b/helm/pkg/helmpath/lazypath_unix.go
@@ -0,0 +1,45 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !darwin
+
+package helmpath
+
+import (
+ "path/filepath"
+
+ "k8s.io/client-go/util/homedir"
+)
+
+// dataHome defines the base directory relative to which user specific data files should be stored.
+//
+// If $XDG_DATA_HOME is either not set or empty, a default equal to $HOME/.local/share is used.
+func dataHome() string {
+ return filepath.Join(homedir.HomeDir(), ".local", "share")
+}
+
+// configHome defines the base directory relative to which user specific configuration files should
+// be stored.
+//
+// If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config is used.
+func configHome() string {
+ return filepath.Join(homedir.HomeDir(), ".config")
+}
+
+// cacheHome defines the base directory relative to which user specific non-essential data files
+// should be stored.
+//
+// If $XDG_CACHE_HOME is either not set or empty, a default equal to $HOME/.cache is used.
+func cacheHome() string {
+ return filepath.Join(homedir.HomeDir(), ".cache")
+}
diff --git a/helm/pkg/helmpath/lazypath_unix_test.go b/helm/pkg/helmpath/lazypath_unix_test.go
new file mode 100644
index 000000000..4b0f2429b
--- /dev/null
+++ b/helm/pkg/helmpath/lazypath_unix_test.go
@@ -0,0 +1,79 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !darwin
+
+package helmpath
+
+import (
+ "path/filepath"
+ "testing"
+
+ "k8s.io/client-go/util/homedir"
+
+ "helm.sh/helm/v4/pkg/helmpath/xdg"
+)
+
+const (
+ appName = "helm"
+ testFile = "test.txt"
+ lazy = lazypath(appName)
+)
+
+func TestDataPath(t *testing.T) {
+ expected := filepath.Join(homedir.HomeDir(), ".local", "share", appName, testFile)
+
+ if lazy.dataPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.dataPath(testFile))
+ }
+
+ t.Setenv(xdg.DataHomeEnvVar, "/tmp")
+
+ expected = filepath.Join("/tmp", appName, testFile)
+
+ if lazy.dataPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.dataPath(testFile))
+ }
+}
+
+func TestConfigPath(t *testing.T) {
+ expected := filepath.Join(homedir.HomeDir(), ".config", appName, testFile)
+
+ if lazy.configPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.configPath(testFile))
+ }
+
+ t.Setenv(xdg.ConfigHomeEnvVar, "/tmp")
+
+ expected = filepath.Join("/tmp", appName, testFile)
+
+ if lazy.configPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.configPath(testFile))
+ }
+}
+
+func TestCachePath(t *testing.T) {
+ expected := filepath.Join(homedir.HomeDir(), ".cache", appName, testFile)
+
+ if lazy.cachePath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.cachePath(testFile))
+ }
+
+ t.Setenv(xdg.CacheHomeEnvVar, "/tmp")
+
+ expected = filepath.Join("/tmp", appName, testFile)
+
+ if lazy.cachePath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.cachePath(testFile))
+ }
+}
diff --git a/helm/pkg/helmpath/lazypath_windows.go b/helm/pkg/helmpath/lazypath_windows.go
new file mode 100644
index 000000000..230aee2a9
--- /dev/null
+++ b/helm/pkg/helmpath/lazypath_windows.go
@@ -0,0 +1,24 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package helmpath
+
+import "os"
+
+func dataHome() string { return configHome() }
+
+func configHome() string { return os.Getenv("APPDATA") }
+
+func cacheHome() string { return os.Getenv("TEMP") }
diff --git a/helm/pkg/helmpath/lazypath_windows_test.go b/helm/pkg/helmpath/lazypath_windows_test.go
new file mode 100644
index 000000000..ebd95e812
--- /dev/null
+++ b/helm/pkg/helmpath/lazypath_windows_test.go
@@ -0,0 +1,89 @@
+// Copyright The Helm Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package helmpath
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "k8s.io/client-go/util/homedir"
+
+ "helm.sh/helm/v4/pkg/helmpath/xdg"
+)
+
+const (
+ appName = "helm"
+ testFile = "test.txt"
+ lazy = lazypath(appName)
+)
+
+func TestDataPath(t *testing.T) {
+ os.Unsetenv(xdg.DataHomeEnvVar)
+ os.Setenv("APPDATA", filepath.Join(homedir.HomeDir(), "foo"))
+
+ expected := filepath.Join(homedir.HomeDir(), "foo", appName, testFile)
+
+ if lazy.dataPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.dataPath(testFile))
+ }
+
+ os.Setenv(xdg.DataHomeEnvVar, filepath.Join(homedir.HomeDir(), "xdg"))
+
+ expected = filepath.Join(homedir.HomeDir(), "xdg", appName, testFile)
+
+ if lazy.dataPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.dataPath(testFile))
+ }
+}
+
+func TestConfigPath(t *testing.T) {
+ os.Unsetenv(xdg.ConfigHomeEnvVar)
+ os.Setenv("APPDATA", filepath.Join(homedir.HomeDir(), "foo"))
+
+ expected := filepath.Join(homedir.HomeDir(), "foo", appName, testFile)
+
+ if lazy.configPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.configPath(testFile))
+ }
+
+ os.Setenv(xdg.ConfigHomeEnvVar, filepath.Join(homedir.HomeDir(), "xdg"))
+
+ expected = filepath.Join(homedir.HomeDir(), "xdg", appName, testFile)
+
+ if lazy.configPath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.configPath(testFile))
+ }
+}
+
+func TestCachePath(t *testing.T) {
+ os.Unsetenv(xdg.CacheHomeEnvVar)
+ os.Setenv("TEMP", filepath.Join(homedir.HomeDir(), "foo"))
+
+ expected := filepath.Join(homedir.HomeDir(), "foo", appName, testFile)
+
+ if lazy.cachePath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.cachePath(testFile))
+ }
+
+ os.Setenv(xdg.CacheHomeEnvVar, filepath.Join(homedir.HomeDir(), "xdg"))
+
+ expected = filepath.Join(homedir.HomeDir(), "xdg", appName, testFile)
+
+ if lazy.cachePath(testFile) != expected {
+ t.Errorf("expected '%s', got '%s'", expected, lazy.cachePath(testFile))
+ }
+}
diff --git a/helm/pkg/helmpath/xdg/xdg.go b/helm/pkg/helmpath/xdg/xdg.go
new file mode 100644
index 000000000..eaa3e6864
--- /dev/null
+++ b/helm/pkg/helmpath/xdg/xdg.go
@@ -0,0 +1,34 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package xdg holds constants pertaining to XDG Base Directory Specification.
+//
+// The XDG Base Directory Specification https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
+// specifies the environment variables that define user-specific base directories for various categories of files.
+package xdg
+
+const (
+ // CacheHomeEnvVar is the environment variable used by the
+ // XDG base directory specification for the cache directory.
+ CacheHomeEnvVar = "XDG_CACHE_HOME"
+
+ // ConfigHomeEnvVar is the environment variable used by the
+ // XDG base directory specification for the config directory.
+ ConfigHomeEnvVar = "XDG_CONFIG_HOME"
+
+ // DataHomeEnvVar is the environment variable used by the
+ // XDG base directory specification for the data directory.
+ DataHomeEnvVar = "XDG_DATA_HOME"
+)
diff --git a/helm/pkg/ignore/doc.go b/helm/pkg/ignore/doc.go
new file mode 100644
index 000000000..a66066eb2
--- /dev/null
+++ b/helm/pkg/ignore/doc.go
@@ -0,0 +1,68 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package ignore provides tools for writing ignore files (a la .gitignore).
+
+This provides both an ignore parser and a file-aware processor.
+
+The format of ignore files closely follows, but does not exactly match, the
+format for .gitignore files (https://git-scm.com/docs/gitignore).
+
+The formatting rules are as follows:
+
+ - Parsing is line-by-line
+ - Empty lines are ignored
+ - Lines that begin with # (comments) will be ignored
+ - Leading and trailing spaces are always ignored
+ - Inline comments are NOT supported ('foo* # Any foo' does not contain a comment)
+ - There is no support for multi-line patterns
+ - Shell glob patterns are supported. See Go's "path/filepath".Match
+ - If a pattern begins with a leading !, the match will be negated.
+ - If a pattern begins with a leading /, only paths relatively rooted will match.
+ - If the pattern ends with a trailing /, only directories will match
+ - If a pattern contains no slashes, file basenames are tested (not paths)
+ - The pattern sequence "**", while legal in a glob, will cause an error here
+ (to indicate incompatibility with .gitignore).
+
+Example:
+
+ # Match any file named foo.txt
+ foo.txt
+
+ # Match any text file
+ *.txt
+
+ # Match only directories named mydir
+ mydir/
+
+ # Match only text files in the top-level directory
+ /*.txt
+
+ # Match only the file foo.txt in the top-level directory
+ /foo.txt
+
+ # Match any file named ab.txt, ac.txt, or ad.txt
+ a[b-d].txt
+
+Notable differences from .gitignore:
+ - The '**' syntax is not supported.
+ - The globbing library is Go's 'filepath.Match', not fnmatch(3)
+ - Trailing spaces are always ignored (there is no supported escape sequence)
+ - The evaluation of escape sequences has not been tested for compatibility
+ - There is no support for '\!' as a special leading sequence.
+*/
+package ignore // import "helm.sh/helm/v4/pkg/ignore"
diff --git a/helm/pkg/ignore/rules.go b/helm/pkg/ignore/rules.go
new file mode 100644
index 000000000..a8160da2a
--- /dev/null
+++ b/helm/pkg/ignore/rules.go
@@ -0,0 +1,231 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ignore
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// HelmIgnore default name of an ignorefile.
+const HelmIgnore = ".helmignore"
+
+// Rules is a collection of path matching rules.
+//
+// Parse() and ParseFile() will construct and populate new Rules.
+// Empty() will create an immutable empty ruleset.
+type Rules struct {
+ patterns []*pattern
+}
+
+// Empty builds an empty ruleset.
+func Empty() *Rules {
+ return &Rules{patterns: []*pattern{}}
+}
+
+// AddDefaults adds default ignore patterns.
+//
+// Ignore all dotfiles in "templates/"
+func (r *Rules) AddDefaults() {
+ r.parseRule(`templates/.?*`)
+}
+
+// ParseFile parses a helmignore file and returns the *Rules.
+func ParseFile(file string) (*Rules, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return Parse(f)
+}
+
+// Parse parses a rules file
+func Parse(file io.Reader) (*Rules, error) {
+ r := &Rules{patterns: []*pattern{}}
+
+ s := bufio.NewScanner(file)
+ currentLine := 0
+ utf8bom := []byte{0xEF, 0xBB, 0xBF}
+ for s.Scan() {
+ scannedBytes := s.Bytes()
+ // We trim UTF8 BOM
+ if currentLine == 0 {
+ scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
+ }
+ line := string(scannedBytes)
+ currentLine++
+
+ if err := r.parseRule(line); err != nil {
+ return r, err
+ }
+ }
+ return r, s.Err()
+}
+
+// Ignore evaluates the file at the given path, and returns true if it should be ignored.
+//
+// Ignore evaluates path against the rules in order. Evaluation stops when a match
+// is found. Matching a negative rule will stop evaluation.
+func (r *Rules) Ignore(path string, fi os.FileInfo) bool {
+ // Don't match on empty dirs.
+ if path == "" {
+ return false
+ }
+
+ // Disallow ignoring the current working directory.
+ // See issue:
+ // 1776 (New York City) Hamilton: "Pardon me, are you Aaron Burr, sir?"
+ if path == "." || path == "./" {
+ return false
+ }
+ for _, p := range r.patterns {
+ if p.match == nil {
+ slog.Info("this will be ignored no matcher supplied", "patterns", p.raw)
+ return false
+ }
+
+ // For negative rules, we need to capture and return non-matches,
+ // and continue for matches.
+ if p.negate {
+ if p.mustDir && !fi.IsDir() {
+ return true
+ }
+ if !p.match(path, fi) {
+ return true
+ }
+ continue
+ }
+
+ // If the rule is looking for directories, and this is not a directory,
+ // skip it.
+ if p.mustDir && !fi.IsDir() {
+ continue
+ }
+ if p.match(path, fi) {
+ return true
+ }
+ }
+ return false
+}
+
+// parseRule parses a rule string and creates a pattern, which is then stored in the Rules object.
+func (r *Rules) parseRule(rule string) error {
+ rule = strings.TrimSpace(rule)
+
+ // Ignore blank lines
+ if rule == "" {
+ return nil
+ }
+ // Comment
+ if strings.HasPrefix(rule, "#") {
+ return nil
+ }
+
+ // Fail any rules that contain **
+ if strings.Contains(rule, "**") {
+ return errors.New("double-star (**) syntax is not supported")
+ }
+
+ // Fail any patterns that can't compile. A non-empty string must be
+ // given to Match() to avoid optimization that skips rule evaluation.
+ if _, err := filepath.Match(rule, "abc"); err != nil {
+ return err
+ }
+
+ p := &pattern{raw: rule}
+
+ // Negation is handled at a higher level, so strip the leading ! from the
+ // string.
+ if strings.HasPrefix(rule, "!") {
+ p.negate = true
+ rule = rule[1:]
+ }
+
+ // Directory verification is handled by a higher level, so the trailing /
+ // is removed from the rule. That way, a directory named "foo" matches,
+ // even if the supplied string does not contain a literal slash character.
+ if strings.HasSuffix(rule, "/") {
+ p.mustDir = true
+ rule = strings.TrimSuffix(rule, "/")
+ }
+
+ if after, ok := strings.CutPrefix(rule, "/"); ok {
+ // Require path matches the root path.
+ p.match = func(n string, _ os.FileInfo) bool {
+ rule = after
+ ok, err := filepath.Match(rule, n)
+ if err != nil {
+ slog.Error("failed to compile", slog.String("rule", rule), slog.Any("error", err))
+ return false
+ }
+ return ok
+ }
+ } else if strings.Contains(rule, "/") {
+ // require structural match.
+ p.match = func(n string, _ os.FileInfo) bool {
+ ok, err := filepath.Match(rule, n)
+ if err != nil {
+ slog.Error(
+ "failed to compile",
+ slog.String("rule", rule),
+ slog.Any("error", err),
+ )
+ return false
+ }
+ return ok
+ }
+ } else {
+ p.match = func(n string, _ os.FileInfo) bool {
+ // When there is no slash in the pattern, we evaluate ONLY the
+ // filename.
+ n = filepath.Base(n)
+ ok, err := filepath.Match(rule, n)
+ if err != nil {
+ slog.Error("failed to compile", slog.String("rule", rule), slog.Any("error", err))
+ return false
+ }
+ return ok
+ }
+ }
+
+ r.patterns = append(r.patterns, p)
+ return nil
+}
+
+// matcher is a function capable of computing a match.
+//
+// It returns true if the rule matches.
+type matcher func(name string, fi os.FileInfo) bool
+
+// pattern describes a pattern to be matched in a rule set.
+type pattern struct {
+ // raw is the unparsed string, with nothing stripped.
+ raw string
+ // match is the matcher function.
+ match matcher
+ // negate indicates that the rule's outcome should be negated.
+ negate bool
+ // mustDir indicates that the matched file must be a directory.
+ mustDir bool
+}
diff --git a/helm/pkg/ignore/rules_test.go b/helm/pkg/ignore/rules_test.go
new file mode 100644
index 000000000..9581cf09f
--- /dev/null
+++ b/helm/pkg/ignore/rules_test.go
@@ -0,0 +1,155 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ignore
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+var testdata = "./testdata"
+
+func TestParse(t *testing.T) {
+ rules := `#ignore
+
+ #ignore
+foo
+bar/*
+baz/bar/foo.txt
+
+one/more
+`
+ r, err := parseString(rules)
+ if err != nil {
+ t.Fatalf("Error parsing rules: %s", err)
+ }
+
+ if len(r.patterns) != 4 {
+ t.Errorf("Expected 4 rules, got %d", len(r.patterns))
+ }
+
+ expects := []string{"foo", "bar/*", "baz/bar/foo.txt", "one/more"}
+ for i, p := range r.patterns {
+ if p.raw != expects[i] {
+ t.Errorf("Expected %q, got %q", expects[i], p.raw)
+ }
+ if p.match == nil {
+ t.Errorf("Expected %s to have a matcher function.", p.raw)
+ }
+ }
+}
+
+func TestParseFail(t *testing.T) {
+ shouldFail := []string{"foo/**/bar", "[z-"}
+ for _, fail := range shouldFail {
+ _, err := parseString(fail)
+ if err == nil {
+ t.Errorf("Rule %q should have failed", fail)
+ }
+ }
+}
+
+func TestParseFile(t *testing.T) {
+ f := filepath.Join(testdata, HelmIgnore)
+ if _, err := os.Stat(f); err != nil {
+ t.Fatalf("Fixture %s missing: %s", f, err)
+ }
+
+ r, err := ParseFile(f)
+ if err != nil {
+ t.Fatalf("Failed to parse rules file: %s", err)
+ }
+
+ if len(r.patterns) != 3 {
+ t.Errorf("Expected 3 patterns, got %d", len(r.patterns))
+ }
+}
+
+func TestIgnore(t *testing.T) {
+ // Test table: Given pattern and name, Ignore should return expect.
+ tests := []struct {
+ pattern string
+ name string
+ expect bool
+ }{
+ // Glob tests
+ {`helm.txt`, "helm.txt", true},
+ {`helm.*`, "helm.txt", true},
+ {`helm.*`, "rudder.txt", false},
+ {`*.txt`, "tiller.txt", true},
+ {`*.txt`, "cargo/a.txt", true},
+ {`cargo/*.txt`, "cargo/a.txt", true},
+ {`cargo/*.*`, "cargo/a.txt", true},
+ {`cargo/*.txt`, "mast/a.txt", false},
+ {`ru[c-e]?er.txt`, "rudder.txt", true},
+ {`templates/.?*`, "templates/.dotfile", true},
+ // "." should never get ignored. https://github.com/helm/helm/issues/1776
+ {`.*`, ".", false},
+ {`.*`, "./", false},
+ {`.*`, ".joonix", true},
+ {`.*`, "helm.txt", false},
+ {`.*`, "", false},
+
+ // Directory tests
+ {`cargo/`, "cargo", true},
+ {`cargo/`, "cargo/", true},
+ {`cargo/`, "mast/", false},
+ {`helm.txt/`, "helm.txt", false},
+
+ // Negation tests
+ {`!helm.txt`, "helm.txt", false},
+ {`!helm.txt`, "tiller.txt", true},
+ {`!*.txt`, "cargo", true},
+ {`!cargo/`, "mast/", true},
+
+ // Absolute path tests
+ {`/a.txt`, "a.txt", true},
+ {`/a.txt`, "cargo/a.txt", false},
+ {`/cargo/a.txt`, "cargo/a.txt", true},
+ }
+
+ for _, test := range tests {
+ r, err := parseString(test.pattern)
+ if err != nil {
+ t.Fatalf("Failed to parse: %s", err)
+ }
+ fi, err := os.Stat(filepath.Join(testdata, test.name))
+ if err != nil {
+ t.Fatalf("Fixture missing: %s", err)
+ }
+
+ if r.Ignore(test.name, fi) != test.expect {
+ t.Errorf("Expected %q to be %v for pattern %q", test.name, test.expect, test.pattern)
+ }
+ }
+}
+
+func TestAddDefaults(t *testing.T) {
+ r := Rules{}
+ r.AddDefaults()
+
+ if len(r.patterns) != 1 {
+ t.Errorf("Expected 1 default patterns, got %d", len(r.patterns))
+ }
+}
+
+func parseString(str string) (*Rules, error) {
+ b := bytes.NewBuffer([]byte(str))
+ return Parse(b)
+}
diff --git a/helm/pkg/ignore/testdata/.helmignore b/helm/pkg/ignore/testdata/.helmignore
new file mode 100644
index 000000000..b2693bae7
--- /dev/null
+++ b/helm/pkg/ignore/testdata/.helmignore
@@ -0,0 +1,3 @@
+mast/a.txt
+.DS_Store
+.git
diff --git a/helm/pkg/ignore/testdata/.joonix b/helm/pkg/ignore/testdata/.joonix
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/ignore/testdata/a.txt b/helm/pkg/ignore/testdata/a.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/ignore/testdata/cargo/a.txt b/helm/pkg/ignore/testdata/cargo/a.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/ignore/testdata/cargo/b.txt b/helm/pkg/ignore/testdata/cargo/b.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/ignore/testdata/cargo/c.txt b/helm/pkg/ignore/testdata/cargo/c.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/ignore/testdata/helm.txt b/helm/pkg/ignore/testdata/helm.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/ignore/testdata/mast/a.txt b/helm/pkg/ignore/testdata/mast/a.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/ignore/testdata/mast/b.txt b/helm/pkg/ignore/testdata/mast/b.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/ignore/testdata/mast/c.txt b/helm/pkg/ignore/testdata/mast/c.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/ignore/testdata/rudder.txt b/helm/pkg/ignore/testdata/rudder.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/ignore/testdata/templates/.dotfile b/helm/pkg/ignore/testdata/templates/.dotfile
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/ignore/testdata/tiller.txt b/helm/pkg/ignore/testdata/tiller.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/helm/pkg/kube/client.go b/helm/pkg/kube/client.go
new file mode 100644
index 000000000..9af4bbcb3
--- /dev/null
+++ b/helm/pkg/kube/client.go
@@ -0,0 +1,1318 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v4/pkg/kube"
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "net/http"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "sync"
+
+ jsonpatch "github.com/evanphx/json-patch/v5"
+ v1 "k8s.io/api/core/v1"
+ apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+
+ "helm.sh/helm/v4/internal/logging"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/jsonmergepatch"
+ "k8s.io/apimachinery/pkg/util/mergepatch"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/util/csaupgrade"
+ "k8s.io/client-go/util/retry"
+ cmdutil "k8s.io/kubectl/pkg/cmd/util"
+)
+
+// ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found.
+var ErrNoObjectsVisited = errors.New("no objects visited")
+
+var metadataAccessor = meta.NewAccessor()
+
+// ManagedFieldsManager is the name of the manager of Kubernetes managedFields
+// first introduced in Kubernetes 1.18
+var ManagedFieldsManager string
+
+// Client represents a client capable of communicating with the Kubernetes API.
+type Client struct {
+ // Factory provides a minimal version of the kubectl Factory interface. If
+ // you need the full Factory you can type switch to the full interface.
+ // Since Kubernetes Go API does not provide backwards compatibility across
+ // minor versions, this API does not follow Helm backwards compatibility.
+ // Helm is exposing Kubernetes in this property and cannot guarantee this
+ // will not change. The minimal interface only has the functions that Helm
+ // needs. The smaller surface area of the interface means there is a lower
+ // chance of it changing.
+ Factory Factory
+ // Namespace allows to bypass the kubeconfig file for the choice of the namespace
+ Namespace string
+
+ // WaitContext is an optional context to use for wait operations.
+ // If not set, a context will be created internally using the
+ // timeout provided to the wait functions.
+ //
+ // Deprecated: Use WithWaitContext wait option when getting a Waiter instead.
+ WaitContext context.Context
+
+ Waiter
+ kubeClient kubernetes.Interface
+
+ // Embed a LogHolder to provide logger functionality
+ logging.LogHolder
+}
+
+var _ Interface = (*Client)(nil)
+
+// WaitStrategy represents the algorithm used to wait for Kubernetes
+// resources to reach their desired state.
+type WaitStrategy string
+
+const (
+ // StatusWatcherStrategy: event-driven waits using kstatus (watches + aggregated readers).
+ // Default for --wait. More accurate and responsive; waits CRs and full reconciliation.
+ // Requires: reachable API server, list+watch RBAC on deployed resources, and a non-zero timeout.
+ StatusWatcherStrategy WaitStrategy = "watcher"
+
+ // LegacyStrategy: Helm 3-style periodic polling until ready or timeout.
+ // Use when watches aren’t available/reliable, or for compatibility/simple CI.
+ // Requires only list RBAC for polled resources.
+ LegacyStrategy WaitStrategy = "legacy"
+
+ // HookOnlyStrategy: wait only for hook Pods/Jobs to complete; does not wait for general chart resources.
+ HookOnlyStrategy WaitStrategy = "hookOnly"
+)
+
+type FieldValidationDirective string
+
+const (
+ FieldValidationDirectiveIgnore FieldValidationDirective = "Ignore"
+ FieldValidationDirectiveWarn FieldValidationDirective = "Warn"
+ FieldValidationDirectiveStrict FieldValidationDirective = "Strict"
+)
+
+type CreateApplyFunc func(target *resource.Info) error
+type UpdateApplyFunc func(original, target *resource.Info) error
+
+func init() {
+ // Add CRDs to the scheme. They are missing by default.
+ if err := apiextv1.AddToScheme(scheme.Scheme); err != nil {
+ // This should never happen.
+ panic(err)
+ }
+ if err := apiextv1beta1.AddToScheme(scheme.Scheme); err != nil {
+ panic(err)
+ }
+}
+
+func (c *Client) newStatusWatcher(opts ...WaitOption) (*statusWaiter, error) {
+ var o waitOptions
+ for _, opt := range opts {
+ opt(&o)
+ }
+ cfg, err := c.Factory.ToRESTConfig()
+ if err != nil {
+ return nil, err
+ }
+ dynamicClient, err := c.Factory.DynamicClient()
+ if err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(cfg)
+ if err != nil {
+ return nil, err
+ }
+ restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ waitContext := o.ctx
+ if waitContext == nil {
+ waitContext = c.WaitContext
+ }
+ return &statusWaiter{
+ restMapper: restMapper,
+ client: dynamicClient,
+ ctx: waitContext,
+ readers: o.statusReaders,
+ }, nil
+}
+
+func (c *Client) GetWaiter(ws WaitStrategy) (Waiter, error) {
+ return c.GetWaiterWithOptions(ws)
+}
+
+func (c *Client) GetWaiterWithOptions(strategy WaitStrategy, opts ...WaitOption) (Waiter, error) {
+ switch strategy {
+ case LegacyStrategy:
+ kc, err := c.Factory.KubernetesClientSet()
+ if err != nil {
+ return nil, err
+ }
+ return &legacyWaiter{kubeClient: kc, ctx: c.WaitContext}, nil
+ case StatusWatcherStrategy:
+ return c.newStatusWatcher(opts...)
+ case HookOnlyStrategy:
+ sw, err := c.newStatusWatcher(opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &hookOnlyWaiter{sw: sw}, nil
+ case "":
+ return nil, errors.New("wait strategy not set. Choose one of: " + string(StatusWatcherStrategy) + ", " + string(HookOnlyStrategy) + ", " + string(LegacyStrategy))
+ default:
+ return nil, errors.New("unknown wait strategy (s" + string(strategy) + "). Valid values are: " + string(StatusWatcherStrategy) + ", " + string(HookOnlyStrategy) + ", " + string(LegacyStrategy))
+ }
+}
+
+func (c *Client) SetWaiter(ws WaitStrategy) error {
+ return c.SetWaiterWithOptions(ws)
+}
+
+func (c *Client) SetWaiterWithOptions(ws WaitStrategy, opts ...WaitOption) error {
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(ws, opts...)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// New creates a new Client.
+func New(getter genericclioptions.RESTClientGetter) *Client {
+ if getter == nil {
+ getter = genericclioptions.NewConfigFlags(true)
+ }
+ factory := cmdutil.NewFactory(getter)
+ c := &Client{
+ Factory: factory,
+ }
+ c.SetLogger(slog.Default().Handler())
+ return c
+}
+
+// getKubeClient get or create a new KubernetesClientSet
+func (c *Client) getKubeClient() (kubernetes.Interface, error) {
+ var err error
+ if c.kubeClient == nil {
+ c.kubeClient, err = c.Factory.KubernetesClientSet()
+ }
+
+ return c.kubeClient, err
+}
+
+// IsReachable tests connectivity to the cluster.
+func (c *Client) IsReachable() error {
+ client, err := c.getKubeClient()
+ if err == genericclioptions.ErrEmptyConfig {
+ // re-replace kubernetes ErrEmptyConfig error with a friendly error
+ // moar workarounds for Kubernetes API breaking.
+ return errors.New("kubernetes cluster unreachable")
+ }
+ if err != nil {
+ return fmt.Errorf("kubernetes cluster unreachable: %w", err)
+ }
+ if _, err := client.Discovery().ServerVersion(); err != nil {
+ return fmt.Errorf("kubernetes cluster unreachable: %w", err)
+ }
+ return nil
+}
+
+type clientCreateOptions struct {
+ serverSideApply bool
+ forceConflicts bool
+ dryRun bool
+ fieldValidationDirective FieldValidationDirective
+}
+
+type ClientCreateOption func(*clientCreateOptions) error
+
+// ClientCreateOptionServerSideApply enables performing object apply server-side
+// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/
+//
+// `forceConflicts` forces conflicts to be resolved (may be when serverSideApply enabled only)
+// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts
+func ClientCreateOptionServerSideApply(serverSideApply, forceConflicts bool) ClientCreateOption {
+ return func(o *clientCreateOptions) error {
+ if !serverSideApply && forceConflicts {
+ return fmt.Errorf("forceConflicts enabled when serverSideApply disabled")
+ }
+
+ o.serverSideApply = serverSideApply
+ o.forceConflicts = forceConflicts
+
+ return nil
+ }
+}
+
+// ClientCreateOptionDryRun requests the server to perform non-mutating operations only
+func ClientCreateOptionDryRun(dryRun bool) ClientCreateOption {
+ return func(o *clientCreateOptions) error {
+ o.dryRun = dryRun
+
+ return nil
+ }
+}
+
+// ClientCreateOptionFieldValidationDirective specifies how API operations validate object's schema
+// - For client-side apply: this is ignored
+// - For server-side apply: the directive is sent to the server to perform the validation
+//
+// Defaults to `FieldValidationDirectiveStrict`
+func ClientCreateOptionFieldValidationDirective(fieldValidationDirective FieldValidationDirective) ClientCreateOption {
+ return func(o *clientCreateOptions) error {
+ o.fieldValidationDirective = fieldValidationDirective
+
+ return nil
+ }
+}
+
+func (c *Client) makeCreateApplyFunc(serverSideApply, forceConflicts, dryRun bool, fieldValidationDirective FieldValidationDirective) CreateApplyFunc {
+ if serverSideApply {
+ c.Logger().Debug(
+ "using server-side apply for resource creation",
+ slog.Bool("forceConflicts", forceConflicts),
+ slog.Bool("dryRun", dryRun),
+ slog.String("fieldValidationDirective", string(fieldValidationDirective)))
+
+ return func(target *resource.Info) error {
+ err := patchResourceServerSide(target, dryRun, forceConflicts, fieldValidationDirective)
+
+ logger := c.Logger().With(
+ slog.String("namespace", target.Namespace),
+ slog.String("name", target.Name),
+ slog.String("gvk", target.Mapping.GroupVersionKind.String()))
+ if err != nil {
+ logger.Debug("Error creating resource via patch", slog.Any("error", err))
+ return err
+ }
+
+ logger.Debug("Created resource via patch")
+
+ return nil
+ }
+ }
+
+ c.Logger().Debug("using client-side apply for resource creation")
+ return createResource
+}
+
+// Create creates Kubernetes resources specified in the resource list.
+func (c *Client) Create(resources ResourceList, options ...ClientCreateOption) (*Result, error) {
+ c.Logger().Debug("creating resource(s)", "resources", len(resources))
+
+ createOptions := clientCreateOptions{
+ serverSideApply: true, // Default to server-side apply
+ fieldValidationDirective: FieldValidationDirectiveStrict,
+ }
+
+ errs := make([]error, 0, len(options))
+ for _, o := range options {
+ errs = append(errs, o(&createOptions))
+ }
+ if err := errors.Join(errs...); err != nil {
+ return nil, fmt.Errorf("invalid client create option(s): %w", err)
+ }
+
+ createApplyFunc := c.makeCreateApplyFunc(
+ createOptions.serverSideApply,
+ createOptions.forceConflicts,
+ createOptions.dryRun,
+ createOptions.fieldValidationDirective)
+ if err := perform(resources, createApplyFunc); err != nil {
+ return nil, err
+ }
+ return &Result{Created: resources}, nil
+}
+
+func transformRequests(req *rest.Request) {
+ tableParam := strings.Join([]string{
+ fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1.SchemeGroupVersion.Version, metav1.GroupName),
+ fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1beta1.SchemeGroupVersion.Version, metav1beta1.GroupName),
+ "application/json",
+ }, ",")
+ req.SetHeader("Accept", tableParam)
+
+ // if sorting, ensure we receive the full object in order to introspect its fields via jsonpath
+ req.Param("includeObject", "Object")
+}
+
+// Get retrieves the resource objects supplied. If related is set to true the
+// related pods are fetched as well. If the passed in resources are a table kind
+// the related resources will also be fetched as kind=table.
+func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime.Object, error) {
+ buf := new(bytes.Buffer)
+ objs := make(map[string][]runtime.Object)
+
+ podSelectors := []map[string]string{}
+ err := resources.Visit(func(info *resource.Info, err error) error {
+ if err != nil {
+ return err
+ }
+
+ gvk := info.ResourceMapping().GroupVersionKind
+ vk := gvk.Version + "/" + gvk.Kind
+ obj, err := getResource(info)
+ if err != nil {
+ fmt.Fprintf(buf, "Get resource %s failed, err:%v\n", info.Name, err)
+ } else {
+ objs[vk] = append(objs[vk], obj)
+
+ // Only fetch related pods if they are requested
+ if related {
+ // Discover if the existing object is a table. If it is, request
+ // the pods as Tables. Otherwise request them normally.
+ objGVK := obj.GetObjectKind().GroupVersionKind()
+ var isTable bool
+ if objGVK.Kind == "Table" {
+ isTable = true
+ }
+
+ objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors)
+ if err != nil {
+ c.Logger().Warn("get the relation pod is failed", slog.Any("error", err))
+ }
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return objs, nil
+}
+
+func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]runtime.Object, table bool, podSelectors *[]map[string]string) (map[string][]runtime.Object, error) {
+ if info == nil {
+ return objs, nil
+ }
+ c.Logger().Debug("get relation pod of object", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
+ selector, ok, _ := getSelectorFromObject(info.Object)
+ if !ok {
+ return objs, nil
+ }
+
+ for index := range *podSelectors {
+ if reflect.DeepEqual((*podSelectors)[index], selector) {
+ // check if pods for selectors are already added. This avoids duplicate printing of pods
+ return objs, nil
+ }
+ }
+
+ *podSelectors = append(*podSelectors, selector)
+
+ var infos []*resource.Info
+ var err error
+ if table {
+ infos, err = c.Factory.NewBuilder().
+ Unstructured().
+ ContinueOnError().
+ NamespaceParam(info.Namespace).
+ DefaultNamespace().
+ ResourceTypes("pods").
+ LabelSelector(labels.Set(selector).AsSelector().String()).
+ TransformRequests(transformRequests).
+ Do().Infos()
+ if err != nil {
+ return objs, err
+ }
+ } else {
+ infos, err = c.Factory.NewBuilder().
+ Unstructured().
+ ContinueOnError().
+ NamespaceParam(info.Namespace).
+ DefaultNamespace().
+ ResourceTypes("pods").
+ LabelSelector(labels.Set(selector).AsSelector().String()).
+ Do().Infos()
+ if err != nil {
+ return objs, err
+ }
+ }
+ vk := "v1/Pod(related)"
+
+ for _, info := range infos {
+ objs[vk] = append(objs[vk], info.Object)
+ }
+ return objs, nil
+}
+
+func getSelectorFromObject(obj runtime.Object) (map[string]string, bool, error) {
+ typed := obj.(*unstructured.Unstructured)
+ kind := typed.Object["kind"]
+ switch kind {
+ case "ReplicaSet", "Deployment", "StatefulSet", "DaemonSet", "Job":
+ return unstructured.NestedStringMap(typed.Object, "spec", "selector", "matchLabels")
+ case "ReplicationController":
+ return unstructured.NestedStringMap(typed.Object, "spec", "selector")
+ default:
+ return nil, false, nil
+ }
+}
+
+func getResource(info *resource.Info) (runtime.Object, error) {
+ obj, err := resource.NewHelper(info.Client, info.Mapping).Get(info.Namespace, info.Name)
+ if err != nil {
+ return nil, err
+ }
+ return obj, nil
+}
+
+func (c *Client) namespace() string {
+ if c.Namespace != "" {
+ return c.Namespace
+ }
+ if ns, _, err := c.Factory.ToRawKubeConfigLoader().Namespace(); err == nil {
+ return ns
+ }
+ return v1.NamespaceDefault
+}
+
+func determineFieldValidationDirective(validate bool) FieldValidationDirective {
+ if validate {
+ return FieldValidationDirectiveStrict
+ }
+
+ return FieldValidationDirectiveIgnore
+}
+
+func buildResourceList(f Factory, namespace string, validationDirective FieldValidationDirective, reader io.Reader, transformRequest resource.RequestTransform) (ResourceList, error) {
+
+ schema, err := f.Validator(string(validationDirective))
+ if err != nil {
+ return nil, err
+ }
+
+ builder := f.NewBuilder().
+ ContinueOnError().
+ NamespaceParam(namespace).
+ DefaultNamespace().
+ Flatten().
+ Unstructured().
+ Schema(schema).
+ Stream(reader, "")
+ if transformRequest != nil {
+ builder.TransformRequests(transformRequest)
+ }
+ result, err := builder.Do().Infos()
+ return result, scrubValidationError(err)
+}
+
+// Build validates for Kubernetes objects and returns unstructured infos.
+func (c *Client) Build(reader io.Reader, validate bool) (ResourceList, error) {
+ return buildResourceList(
+ c.Factory,
+ c.namespace(),
+ determineFieldValidationDirective(validate),
+ reader,
+ nil)
+}
+
+// BuildTable validates for Kubernetes objects and returns unstructured infos.
+// The returned kind is a Table.
+func (c *Client) BuildTable(reader io.Reader, validate bool) (ResourceList, error) {
+ return buildResourceList(
+ c.Factory,
+ c.namespace(),
+ determineFieldValidationDirective(validate),
+ reader,
+ transformRequests)
+}
+
+func (c *Client) update(originals, targets ResourceList, createApplyFunc CreateApplyFunc, updateApplyFunc UpdateApplyFunc) (*Result, error) {
+ updateErrors := []error{}
+ res := &Result{}
+
+ c.Logger().Debug("checking resources for changes", "resources", len(targets))
+ err := targets.Visit(func(target *resource.Info, err error) error {
+ if err != nil {
+ return err
+ }
+
+ helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
+ if _, err := helper.Get(target.Namespace, target.Name); err != nil {
+ if !apierrors.IsNotFound(err) {
+ return fmt.Errorf("could not get information about the resource: %w", err)
+ }
+
+ // Append the created resource to the results, even if something fails
+ res.Created = append(res.Created, target)
+
+ // Since the resource does not exist, create it.
+ if err := createApplyFunc(target); err != nil {
+ return fmt.Errorf("failed to create resource: %w", err)
+ }
+
+ kind := target.Mapping.GroupVersionKind.Kind
+ c.Logger().Debug(
+ "created a new resource",
+ slog.String("namespace", target.Namespace),
+ slog.String("name", target.Name),
+ slog.String("kind", kind),
+ )
+ return nil
+ }
+
+ original := originals.Get(target)
+ if original == nil {
+ kind := target.Mapping.GroupVersionKind.Kind
+
+ slog.Warn("resource exists on cluster but not in original release, using cluster state as baseline",
+ "namespace", target.Namespace, "name", target.Name, "kind", kind)
+
+ currentObj, err := helper.Get(target.Namespace, target.Name)
+ if err != nil {
+ return fmt.Errorf("original object %s with the name %q not found", kind, target.Name)
+ }
+
+ // Create a temporary Info with the current cluster state to use as "original"
+ currentInfo := &resource.Info{
+ Client: target.Client,
+ Mapping: target.Mapping,
+ Namespace: target.Namespace,
+ Name: target.Name,
+ Object: currentObj,
+ }
+
+ if err := updateApplyFunc(currentInfo, target); err != nil {
+ updateErrors = append(updateErrors, err)
+ }
+
+ // Because we check for errors later, append the info regardless
+ res.Updated = append(res.Updated, target)
+
+ return nil
+ }
+
+ if err := updateApplyFunc(original, target); err != nil {
+ updateErrors = append(updateErrors, err)
+ }
+
+ // Because we check for errors later, append the info regardless
+ res.Updated = append(res.Updated, target)
+
+ return nil
+ })
+
+ switch {
+ case err != nil:
+ return res, err
+ case len(updateErrors) != 0:
+ return res, joinErrors(updateErrors, " && ")
+ }
+
+ for _, info := range originals.Difference(targets) {
+ c.Logger().Debug("deleting resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind)
+
+ if err := info.Get(); err != nil {
+ c.Logger().Debug(
+ "unable to get object",
+ slog.String("namespace", info.Namespace),
+ slog.String("name", info.Name),
+ slog.String("kind", info.Mapping.GroupVersionKind.Kind),
+ slog.Any("error", err),
+ )
+ continue
+ }
+ annotations, err := metadataAccessor.Annotations(info.Object)
+ if err != nil {
+ c.Logger().Debug(
+ "unable to get annotations",
+ slog.String("namespace", info.Namespace),
+ slog.String("name", info.Name),
+ slog.String("kind", info.Mapping.GroupVersionKind.Kind),
+ slog.Any("error", err),
+ )
+ }
+ if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy {
+ c.Logger().Debug("skipping delete due to annotation", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, "annotation", ResourcePolicyAnno, "value", KeepPolicy)
+ continue
+ }
+ if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil {
+ c.Logger().Debug(
+ "failed to delete resource",
+ slog.String("namespace", info.Namespace),
+ slog.String("name", info.Name),
+ slog.String("kind", info.Mapping.GroupVersionKind.Kind),
+ slog.Any("error", err),
+ )
+ if !apierrors.IsNotFound(err) {
+ updateErrors = append(updateErrors, fmt.Errorf(
+ "failed to delete resource namespace=%s, name=%s, kind=%s: %w",
+ info.Namespace, info.Name, info.Mapping.GroupVersionKind.Kind, err))
+ }
+ continue
+ }
+ res.Deleted = append(res.Deleted, info)
+ }
+
+ if len(updateErrors) != 0 {
+ return res, joinErrors(updateErrors, " && ")
+ }
+ return res, nil
+}
+
+type clientUpdateOptions struct {
+ threeWayMergeForUnstructured bool
+ serverSideApply bool
+ forceReplace bool
+ forceConflicts bool
+ dryRun bool
+ fieldValidationDirective FieldValidationDirective
+ upgradeClientSideFieldManager bool
+}
+
+type ClientUpdateOption func(*clientUpdateOptions) error
+
+// ClientUpdateOptionThreeWayMergeForUnstructured enables performing three-way merge for unstructured objects
+// Must not be enabled when ClientUpdateOptionServerSideApply is enabled
+func ClientUpdateOptionThreeWayMergeForUnstructured(threeWayMergeForUnstructured bool) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ o.threeWayMergeForUnstructured = threeWayMergeForUnstructured
+
+ return nil
+ }
+}
+
+// ClientUpdateOptionServerSideApply enables performing object apply server-side (default)
+// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/
+// Must not be enabled when ClientUpdateOptionThreeWayMerge is enabled
+//
+// `forceConflicts` forces conflicts to be resolved (may be enabled when serverSideApply enabled only)
+// see: https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts
+func ClientUpdateOptionServerSideApply(serverSideApply, forceConflicts bool) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ if !serverSideApply && forceConflicts {
+ return fmt.Errorf("forceConflicts enabled when serverSideApply disabled")
+ }
+
+ o.serverSideApply = serverSideApply
+ o.forceConflicts = forceConflicts
+
+ return nil
+ }
+}
+
+// ClientUpdateOptionForceReplace forces objects to be replaced rather than updated via patch
+// Must not be enabled when ClientUpdateOptionForceConflicts is enabled
+func ClientUpdateOptionForceReplace(forceReplace bool) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ o.forceReplace = forceReplace
+
+ return nil
+ }
+}
+
+// ClientUpdateOptionDryRun requests the server to perform non-mutating operations only
+func ClientUpdateOptionDryRun(dryRun bool) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ o.dryRun = dryRun
+
+ return nil
+ }
+}
+
+// ClientUpdateOptionFieldValidationDirective specifies how API operations validate object's schema
+// - For client-side apply: this is ignored
+// - For server-side apply: the directive is sent to the server to perform the validation
+//
+// Defaults to `FieldValidationDirectiveStrict`
+func ClientUpdateOptionFieldValidationDirective(fieldValidationDirective FieldValidationDirective) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ o.fieldValidationDirective = fieldValidationDirective
+
+ return nil
+ }
+}
+
+// ClientUpdateOptionUpgradeClientSideFieldManager specifies that resources client-side field manager should be upgraded to server-side apply
+// (before applying the object server-side)
+// This is required when upgrading a chart from client-side to server-side apply, otherwise the client-side field management remains. Conflicting with server-side applied updates.
+//
+// Note:
+// if this option is specified, but the object is not managed by client-side field manager, it will be a no-op. However, the cost of fetching the objects will be incurred.
+//
+// see:
+// - https://github.com/kubernetes/kubernetes/pull/112905
+// - `UpgradeManagedFields` / https://github.com/kubernetes/kubernetes/blob/f47e9696d7237f1011d23c9b55f6947e60526179/staging/src/k8s.io/client-go/util/csaupgrade/upgrade.go#L81
+func ClientUpdateOptionUpgradeClientSideFieldManager(upgradeClientSideFieldManager bool) ClientUpdateOption {
+ return func(o *clientUpdateOptions) error {
+ o.upgradeClientSideFieldManager = upgradeClientSideFieldManager
+
+ return nil
+ }
+}
+
+// Update takes the current list of objects and target list of objects and
+// creates resources that don't already exist, updates resources that have been
+// modified in the target configuration, and deletes resources from the current
+// configuration that are not present in the target configuration. If an error
+// occurs, a Result will still be returned with the error, containing all
+// resource updates, creations, and deletions that were attempted. These can be
+// used for cleanup or other logging purposes.
+//
+// The default is to use server-side apply, equivalent to: `ClientUpdateOptionServerSideApply(true)`
+func (c *Client) Update(originals, targets ResourceList, options ...ClientUpdateOption) (*Result, error) {
+ updateOptions := clientUpdateOptions{
+ serverSideApply: true, // Default to server-side apply
+ fieldValidationDirective: FieldValidationDirectiveStrict,
+ }
+
+ errs := make([]error, 0, len(options))
+ for _, o := range options {
+ errs = append(errs, o(&updateOptions))
+ }
+ if err := errors.Join(errs...); err != nil {
+ return &Result{}, fmt.Errorf("invalid client update option(s): %w", err)
+ }
+
+ if updateOptions.threeWayMergeForUnstructured && updateOptions.serverSideApply {
+ return &Result{}, fmt.Errorf("invalid operation: cannot use three-way merge for unstructured and server-side apply together")
+ }
+
+ if updateOptions.forceConflicts && updateOptions.forceReplace {
+ return &Result{}, fmt.Errorf("invalid operation: cannot use force conflicts and force replace together")
+ }
+
+ if updateOptions.serverSideApply && updateOptions.forceReplace {
+ return &Result{}, fmt.Errorf("invalid operation: cannot use server-side apply and force replace together")
+ }
+
+ createApplyFunc := c.makeCreateApplyFunc(
+ updateOptions.serverSideApply,
+ updateOptions.forceConflicts,
+ updateOptions.dryRun,
+ updateOptions.fieldValidationDirective)
+
+ makeUpdateApplyFunc := func() UpdateApplyFunc {
+ if updateOptions.forceReplace {
+ c.Logger().Debug(
+ "using resource replace update strategy",
+ slog.String("fieldValidationDirective", string(updateOptions.fieldValidationDirective)))
+ return func(original, target *resource.Info) error {
+ if err := replaceResource(target, updateOptions.fieldValidationDirective); err != nil {
+ c.Logger().With(
+ slog.String("namespace", target.Namespace),
+ slog.String("name", target.Name),
+ slog.String("gvk", target.Mapping.GroupVersionKind.String()),
+ ).Debug(
+ "error replacing the resource", slog.Any("error", err),
+ )
+ return err
+ }
+
+ originalObject := original.Object
+ kind := target.Mapping.GroupVersionKind.Kind
+ c.Logger().Debug("replace succeeded", "name", original.Name, "initialKind", originalObject.GetObjectKind().GroupVersionKind().Kind, "kind", kind)
+
+ return nil
+ }
+ } else if updateOptions.serverSideApply {
+ c.Logger().Debug(
+ "using server-side apply for resource update",
+ slog.Bool("forceConflicts", updateOptions.forceConflicts),
+ slog.Bool("dryRun", updateOptions.dryRun),
+ slog.String("fieldValidationDirective", string(updateOptions.fieldValidationDirective)),
+ slog.Bool("upgradeClientSideFieldManager", updateOptions.upgradeClientSideFieldManager))
+ return func(original, target *resource.Info) error {
+
+ logger := c.Logger().With(
+ slog.String("namespace", target.Namespace),
+ slog.String("name", target.Name),
+ slog.String("gvk", target.Mapping.GroupVersionKind.String()))
+
+ if updateOptions.upgradeClientSideFieldManager {
+ patched, err := upgradeClientSideFieldManager(original, updateOptions.dryRun, updateOptions.fieldValidationDirective)
+ if err != nil {
+ c.Logger().Debug("Error patching resource to replace CSA field management", slog.Any("error", err))
+ return err
+ }
+
+ if patched {
+ logger.Debug("Upgraded object client-side field management with server-side apply field management")
+ }
+ }
+
+ if err := patchResourceServerSide(target, updateOptions.dryRun, updateOptions.forceConflicts, updateOptions.fieldValidationDirective); err != nil {
+ logger.Debug("Error patching resource", slog.Any("error", err))
+ return err
+ }
+
+ logger.Debug("Patched resource")
+
+ return nil
+ }
+ }
+
+ c.Logger().Debug("using client-side apply for resource update", slog.Bool("threeWayMergeForUnstructured", updateOptions.threeWayMergeForUnstructured))
+ return func(original, target *resource.Info) error {
+ return patchResourceClientSide(original.Object, target, updateOptions.threeWayMergeForUnstructured)
+ }
+ }
+
+ return c.update(originals, targets, createApplyFunc, makeUpdateApplyFunc())
+}
+
+// Delete deletes Kubernetes resources specified in the resources list with
+// given deletion propagation policy. It will attempt to delete all resources even
+// if one or more fail and collect any errors. All successfully deleted items
+// will be returned in the `Deleted` ResourceList that is part of the result.
+func (c *Client) Delete(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error) {
+ var errs []error
+ res := &Result{}
+ mtx := sync.Mutex{}
+ err := perform(resources, func(target *resource.Info) error {
+ c.Logger().Debug("starting delete resource", "namespace", target.Namespace, "name", target.Name, "kind", target.Mapping.GroupVersionKind.Kind)
+ err := deleteResource(target, policy)
+ if err == nil || apierrors.IsNotFound(err) {
+ if err != nil {
+ c.Logger().Debug(
+ "ignoring delete failure",
+ slog.String("namespace", target.Namespace),
+ slog.String("name", target.Name),
+ slog.String("kind", target.Mapping.GroupVersionKind.Kind),
+ slog.Any("error", err))
+ }
+ mtx.Lock()
+ defer mtx.Unlock()
+ res.Deleted = append(res.Deleted, target)
+ return nil
+ }
+ mtx.Lock()
+ defer mtx.Unlock()
+ // Collect the error and continue on
+ errs = append(errs, err)
+ return nil
+ })
+ if err != nil {
+ if errors.Is(err, ErrNoObjectsVisited) {
+ err = fmt.Errorf("object not found, skipping delete: %w", err)
+ }
+ errs = append(errs, err)
+ }
+ if errs != nil {
+ return nil, errs
+ }
+ return res, nil
+}
+
+// https://github.com/kubernetes/kubectl/blob/197123726db24c61aa0f78d1f0ba6e91a2ec2f35/pkg/cmd/apply/apply.go#L439
+func isIncompatibleServerError(err error) bool {
+ // 415: Unsupported media type means we're talking to a server which doesn't
+ // support server-side apply.
+ if _, ok := err.(*apierrors.StatusError); !ok {
+ // Non-StatusError means the error isn't because the server is incompatible.
+ return false
+ }
+ return err.(*apierrors.StatusError).Status().Code == http.StatusUnsupportedMediaType
+}
+
+// getManagedFieldsManager returns the manager string. If one was set it will be returned.
+// Otherwise, one is calculated based on the name of the binary.
+func getManagedFieldsManager() string {
+
+ // When a manager is explicitly set use it
+ if ManagedFieldsManager != "" {
+ return ManagedFieldsManager
+ }
+
+ // When no manager is set and no calling application can be found it is unknown
+ if len(os.Args[0]) == 0 {
+ return "unknown"
+ }
+
+ // When there is an application that can be determined and no set manager
+ // use the base name. This is one of the ways Kubernetes libs handle figuring
+ // names out.
+ return filepath.Base(os.Args[0])
+}
+
+func perform(infos ResourceList, fn func(*resource.Info) error) error {
+ var result error
+
+ if len(infos) == 0 {
+ return ErrNoObjectsVisited
+ }
+
+ errs := make(chan error)
+ go batchPerform(infos, fn, errs)
+
+ for range infos {
+ err := <-errs
+ if err != nil {
+ result = errors.Join(result, err)
+ }
+ }
+
+ return result
+}
+
+func batchPerform(infos ResourceList, fn func(*resource.Info) error, errs chan<- error) {
+ var kind string
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ for _, info := range infos {
+ currentKind := info.Object.GetObjectKind().GroupVersionKind().Kind
+ if kind != currentKind {
+ wg.Wait()
+ kind = currentKind
+ }
+
+ wg.Add(1)
+ go func(info *resource.Info) {
+ errs <- fn(info)
+ wg.Done()
+ }(info)
+ }
+}
+
+var createMutex sync.Mutex
+
+func createResource(info *resource.Info) error {
+ return retry.RetryOnConflict(
+ retry.DefaultRetry,
+ func() error {
+ createMutex.Lock()
+ defer createMutex.Unlock()
+ obj, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).Create(info.Namespace, true, info.Object)
+ if err != nil {
+ return err
+ }
+
+ return info.Refresh(obj, true)
+ })
+}
+
+func deleteResource(info *resource.Info, policy metav1.DeletionPropagation) error {
+ return retry.RetryOnConflict(
+ retry.DefaultRetry,
+ func() error {
+ opts := &metav1.DeleteOptions{PropagationPolicy: &policy}
+ _, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).DeleteWithOptions(info.Namespace, info.Name, opts)
+ return err
+ })
+}
+
+func createPatch(original runtime.Object, target *resource.Info, threeWayMergeForUnstructured bool) ([]byte, types.PatchType, error) {
+ oldData, err := json.Marshal(original)
+ if err != nil {
+ return nil, types.StrategicMergePatchType, fmt.Errorf("serializing current configuration: %w", err)
+ }
+ newData, err := json.Marshal(target.Object)
+ if err != nil {
+ return nil, types.StrategicMergePatchType, fmt.Errorf("serializing target configuration: %w", err)
+ }
+
+ // Fetch the current object for the three way merge
+ helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
+ currentObj, err := helper.Get(target.Namespace, target.Name)
+ if err != nil && !apierrors.IsNotFound(err) {
+ return nil, types.StrategicMergePatchType, fmt.Errorf("unable to get data for current object %s/%s: %w", target.Namespace, target.Name, err)
+ }
+
+ // Even if currentObj is nil (because it was not found), it will marshal just fine
+ currentData, err := json.Marshal(currentObj)
+ if err != nil {
+ return nil, types.StrategicMergePatchType, fmt.Errorf("serializing live configuration: %w", err)
+ }
+
+ // Get a versioned object
+ versionedObject := AsVersioned(target)
+
+ // Unstructured objects, such as CRDs, may not have a not registered error
+ // returned from ConvertToVersion. Anything that's unstructured should
+ // use generic JSON merge patch. Strategic Merge Patch is not supported
+ // on objects like CRDs.
+ _, isUnstructured := versionedObject.(runtime.Unstructured)
+
+ // On newer K8s versions, CRDs aren't unstructured but has this dedicated type
+ _, isCRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition)
+
+ if isUnstructured || isCRD {
+ if threeWayMergeForUnstructured {
+ // from https://github.com/kubernetes/kubectl/blob/b83b2ec7d15f286720bccf7872b5c72372cb8e80/pkg/cmd/apply/patcher.go#L129
+ preconditions := []mergepatch.PreconditionFunc{
+ mergepatch.RequireKeyUnchanged("apiVersion"),
+ mergepatch.RequireKeyUnchanged("kind"),
+ mergepatch.RequireMetadataKeyUnchanged("name"),
+ }
+ patch, err := jsonmergepatch.CreateThreeWayJSONMergePatch(oldData, newData, currentData, preconditions...)
+ if err != nil && mergepatch.IsPreconditionFailed(err) {
+ err = fmt.Errorf("%w: at least one field was changed: apiVersion, kind or name", err)
+ }
+ return patch, types.MergePatchType, err
+ }
+ // fall back to generic JSON merge patch
+ patch, err := jsonpatch.CreateMergePatch(oldData, newData)
+ return patch, types.MergePatchType, err
+ }
+
+ patchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject)
+ if err != nil {
+ return nil, types.StrategicMergePatchType, fmt.Errorf("unable to create patch metadata from object: %w", err)
+ }
+
+ patch, err := strategicpatch.CreateThreeWayMergePatch(oldData, newData, currentData, patchMeta, true)
+ return patch, types.StrategicMergePatchType, err
+}
+
+func replaceResource(target *resource.Info, fieldValidationDirective FieldValidationDirective) error {
+
+ helper := resource.NewHelper(target.Client, target.Mapping).
+ WithFieldValidation(string(fieldValidationDirective)).
+ WithFieldManager(getManagedFieldsManager())
+
+ obj, err := helper.Replace(target.Namespace, target.Name, true, target.Object)
+ if err != nil {
+ return fmt.Errorf("failed to replace object: %w", err)
+ }
+
+ if err := target.Refresh(obj, true); err != nil {
+ return fmt.Errorf("failed to refresh object after replace: %w", err)
+ }
+
+ return nil
+
+}
+
+func patchResourceClientSide(original runtime.Object, target *resource.Info, threeWayMergeForUnstructured bool) error {
+
+ patch, patchType, err := createPatch(original, target, threeWayMergeForUnstructured)
+ if err != nil {
+ return fmt.Errorf("failed to create patch: %w", err)
+ }
+
+ kind := target.Mapping.GroupVersionKind.Kind
+ if patch == nil || string(patch) == "{}" {
+ slog.Debug("no changes detected", "kind", kind, "name", target.Name)
+ // This needs to happen to make sure that Helm has the latest info from the API
+ // Otherwise there will be no labels and other functions that use labels will panic
+ if err := target.Get(); err != nil {
+ return fmt.Errorf("failed to refresh resource information: %w", err)
+ }
+ return nil
+ }
+
+ // send patch to server
+ slog.Debug("patching resource", "kind", kind, "name", target.Name, "namespace", target.Namespace)
+ helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager())
+ obj, err := helper.Patch(target.Namespace, target.Name, patchType, patch, nil)
+ if err != nil {
+ return fmt.Errorf("cannot patch %q with kind %s: %w", target.Name, kind, err)
+ }
+
+ target.Refresh(obj, true)
+
+ return nil
+}
+
+// upgradeClientSideFieldManager is simply a wrapper around csaupgrade.UpgradeManagedFields
+// that upgrade CSA managed fields to SSA apply
+// see: https://github.com/kubernetes/kubernetes/pull/112905
+func upgradeClientSideFieldManager(info *resource.Info, dryRun bool, fieldValidationDirective FieldValidationDirective) (bool, error) {
+
+ fieldManagerName := getManagedFieldsManager()
+
+ patched := false
+ err := retry.RetryOnConflict(
+ retry.DefaultRetry,
+ func() error {
+
+ if err := info.Get(); err != nil {
+ return fmt.Errorf("failed to get object %s/%s %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.String(), err)
+ }
+
+ helper := resource.NewHelper(
+ info.Client,
+ info.Mapping).
+ DryRun(dryRun).
+ WithFieldManager(fieldManagerName).
+ WithFieldValidation(string(fieldValidationDirective))
+
+ patchData, err := csaupgrade.UpgradeManagedFieldsPatch(
+ info.Object,
+ sets.New(fieldManagerName),
+ fieldManagerName)
+ if err != nil {
+ return fmt.Errorf("failed to upgrade managed fields for object %s/%s %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.String(), err)
+ }
+
+ if len(patchData) == 0 {
+ return nil
+ }
+
+ obj, err := helper.Patch(
+ info.Namespace,
+ info.Name,
+ types.JSONPatchType,
+ patchData,
+ nil)
+
+ if err == nil {
+ patched = true
+ return info.Refresh(obj, true)
+ }
+
+ if !apierrors.IsConflict(err) {
+ return fmt.Errorf("failed to patch object to upgrade CSA field manager %s/%s %s: %w", info.Namespace, info.Name, info.Mapping.GroupVersionKind.String(), err)
+ }
+
+ return err
+ })
+
+ return patched, err
+}
+
+// Patch reource using server-side apply
+func patchResourceServerSide(target *resource.Info, dryRun bool, forceConflicts bool, fieldValidationDirective FieldValidationDirective) error {
+ helper := resource.NewHelper(
+ target.Client,
+ target.Mapping).
+ DryRun(dryRun).
+ WithFieldManager(getManagedFieldsManager()).
+ WithFieldValidation(string(fieldValidationDirective))
+
+ // Send the full object to be applied on the server side.
+ data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, target.Object)
+ if err != nil {
+ return fmt.Errorf("failed to encode object %s/%s %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.String(), err)
+ }
+ options := metav1.PatchOptions{
+ Force: &forceConflicts,
+ }
+ obj, err := helper.Patch(
+ target.Namespace,
+ target.Name,
+ types.ApplyPatchType,
+ data,
+ &options,
+ )
+ if err != nil {
+ if isIncompatibleServerError(err) {
+ return fmt.Errorf("server-side apply not available on the server: %v", err)
+ }
+
+ if apierrors.IsConflict(err) {
+ return fmt.Errorf("conflict occurred while applying object %s/%s %s: %w", target.Namespace, target.Name, target.Mapping.GroupVersionKind.String(), err)
+ }
+
+ return err
+ }
+
+ return target.Refresh(obj, true)
+}
+
+// GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions
+func (c *Client) GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) {
+ podList, err := c.kubeClient.CoreV1().Pods(namespace).List(context.Background(), listOptions)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get pod list with options: %+v with error: %v", listOptions, err)
+ }
+ return podList, nil
+}
+
+// OutputContainerLogsForPodList is a helper that outputs logs for a list of pods
+func (c *Client) OutputContainerLogsForPodList(podList *v1.PodList, namespace string, writerFunc func(namespace, pod, container string) io.Writer) error {
+ for _, pod := range podList.Items {
+ for _, container := range pod.Spec.Containers {
+ options := &v1.PodLogOptions{
+ Container: container.Name,
+ }
+ request := c.kubeClient.CoreV1().Pods(namespace).GetLogs(pod.Name, options)
+ err2 := copyRequestStreamToWriter(request, pod.Name, container.Name, writerFunc(namespace, pod.Name, container.Name))
+ if err2 != nil {
+ return err2
+ }
+ }
+ }
+ return nil
+}
+
+func copyRequestStreamToWriter(request *rest.Request, podName, containerName string, writer io.Writer) error {
+ readCloser, err := request.Stream(context.Background())
+ if err != nil {
+ return fmt.Errorf("failed to stream pod logs for pod: %s, container: %s", podName, containerName)
+ }
+ defer readCloser.Close()
+ _, err = io.Copy(writer, readCloser)
+ if err != nil {
+ return fmt.Errorf("failed to copy IO from logs for pod: %s, container: %s", podName, containerName)
+ }
+ return nil
+}
+
+// scrubValidationError removes kubectl info from the message.
+func scrubValidationError(err error) error {
+ if err == nil {
+ return nil
+ }
+ const stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false"
+
+ if strings.Contains(err.Error(), stopValidateMessage) {
+ return errors.New(strings.ReplaceAll(err.Error(), "; "+stopValidateMessage, ""))
+ }
+ return err
+}
+
+type joinedErrors struct {
+ errs []error
+ sep string
+}
+
+func joinErrors(errs []error, sep string) error {
+ return &joinedErrors{
+ errs: errs,
+ sep: sep,
+ }
+}
+
+func (e *joinedErrors) Error() string {
+ errs := make([]string, 0, len(e.errs))
+ for _, err := range e.errs {
+ errs = append(errs, err.Error())
+ }
+ return strings.Join(errs, e.sep)
+}
+
+func (e *joinedErrors) Unwrap() []error {
+ return e.errs
+}
diff --git a/helm/pkg/kube/client_test.go b/helm/pkg/kube/client_test.go
new file mode 100644
index 000000000..008a68649
--- /dev/null
+++ b/helm/pkg/kube/client_test.go
@@ -0,0 +1,2361 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+ "github.com/fluxcd/cli-utils/pkg/object"
+ "github.com/fluxcd/cli-utils/pkg/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ batchv1 "k8s.io/api/batch/v1"
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ jsonserializer "k8s.io/apimachinery/pkg/runtime/serializer/json"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/yaml"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
+ "k8s.io/cli-runtime/pkg/resource"
+ dynamicfake "k8s.io/client-go/dynamic/fake"
+ "k8s.io/client-go/kubernetes"
+ k8sfake "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest/fake"
+ cmdtesting "k8s.io/kubectl/pkg/cmd/testing"
+)
+
+var (
+ unstructuredSerializer = resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer
+ codec = scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...)
+)
+
+func objBody(obj runtime.Object) io.ReadCloser {
+ return io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj))))
+}
+
+func newPod(name string) v1.Pod {
+ return newPodWithStatus(name, v1.PodStatus{}, "")
+}
+
+func newPodWithStatus(name string, status v1.PodStatus, namespace string) v1.Pod {
+ ns := v1.NamespaceDefault
+ if namespace != "" {
+ ns = namespace
+ }
+ return v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: ns,
+ SelfLink: "/api/v1/namespaces/default/pods/" + name,
+ },
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{{
+ Name: "app:v4",
+ Image: "abc/app:v4",
+ Ports: []v1.ContainerPort{{Name: "http", ContainerPort: 80}},
+ }},
+ },
+ Status: status,
+ }
+}
+
+func newPodList(names ...string) v1.PodList {
+ var list v1.PodList
+ for _, name := range names {
+ list.Items = append(list.Items, newPod(name))
+ }
+ return list
+}
+
+func notFoundBody() *metav1.Status {
+ return &metav1.Status{
+ Code: http.StatusNotFound,
+ Status: metav1.StatusFailure,
+ Reason: metav1.StatusReasonNotFound,
+ Message: " \"\" not found",
+ Details: &metav1.StatusDetails{},
+ }
+}
+
+func newResponse(code int, obj runtime.Object) (*http.Response, error) {
+ header := http.Header{}
+ header.Set("Content-Type", runtime.ContentTypeJSON)
+ body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj))))
+ return &http.Response{StatusCode: code, Header: header, Body: body}, nil
+}
+
+func newResponseJSON(code int, json []byte) (*http.Response, error) {
+ header := http.Header{}
+ header.Set("Content-Type", runtime.ContentTypeJSON)
+ body := io.NopCloser(bytes.NewReader(json))
+ return &http.Response{StatusCode: code, Header: header, Body: body}, nil
+}
+
+func newTestClient(t *testing.T) *Client {
+ t.Helper()
+ testFactory := cmdtesting.NewTestFactory()
+ t.Cleanup(testFactory.Cleanup)
+
+ return &Client{
+ Factory: testFactory.WithNamespace(v1.NamespaceDefault),
+ }
+}
+
+type RequestResponseAction struct {
+ Request http.Request
+ Response http.Response
+ Error error
+}
+
+type RoundTripperTestFunc func(previous []RequestResponseAction, req *http.Request) (*http.Response, error)
+
+func NewRequestResponseLogClient(t *testing.T, cb RoundTripperTestFunc) RequestResponseLogClient {
+ t.Helper()
+ return RequestResponseLogClient{
+ t: t,
+ cb: cb,
+ }
+}
+
+// RequestResponseLogClient is a test client that logs requests and responses
+// Satisfying http.RoundTripper interface, it can be used to mock HTTP requests in tests.
+// Forwarding requests to a callback function (cb) that can be used to simulate server responses.
+type RequestResponseLogClient struct {
+ t *testing.T
+ cb RoundTripperTestFunc
+ actionsLock sync.Mutex
+ Actions []RequestResponseAction
+}
+
+func (r *RequestResponseLogClient) Do(req *http.Request) (*http.Response, error) {
+ t := r.t
+ t.Helper()
+
+ readBodyBytes := func(body io.ReadCloser) []byte {
+ if body == nil {
+ return []byte{}
+ }
+
+ defer body.Close()
+ bodyBytes, err := io.ReadAll(body)
+ require.NoError(t, err)
+
+ return bodyBytes
+ }
+
+ reqBytes := readBodyBytes(req.Body)
+
+ t.Logf("Request: %s %s %s", req.Method, req.URL.String(), reqBytes)
+ if req.Body != nil {
+ req.Body = io.NopCloser(bytes.NewReader(reqBytes))
+ }
+
+ resp, err := r.cb(r.Actions, req)
+
+ respBytes := readBodyBytes(resp.Body)
+ t.Logf("Response: %d %s", resp.StatusCode, string(respBytes))
+ if resp.Body != nil {
+ resp.Body = io.NopCloser(bytes.NewReader(respBytes))
+ }
+
+ r.actionsLock.Lock()
+ defer r.actionsLock.Unlock()
+ r.Actions = append(r.Actions, RequestResponseAction{
+ Request: *req,
+ Response: *resp,
+ Error: err,
+ })
+
+ return resp, err
+}
+
+func TestCreate(t *testing.T) {
+ // Note: c.Create with the fake client can currently only test creation of a single pod/object in the same list. When testing
+ // with more than one pod, c.Create will run into a data race as it calls perform->batchPerform which performs creation
+ // in batches. The race is something in the fake client itself in `func (c *RESTClient) do(...)`
+ // when it stores the req: c.Req = req and cannot (?) be fixed easily.
+
+ type testCase struct {
+ Name string
+ Pods v1.PodList
+ Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error)
+ ServerSideApply bool
+ ExpectedActions []string
+ ExpectedErrorContains string
+ }
+
+ testCases := map[string]testCase{
+ "Create success (client-side apply)": {
+ Pods: newPodList("starfish"),
+ ServerSideApply: false,
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, _ *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ if len(previous) < 2 { // simulate a conflict
+ return newResponseJSON(http.StatusConflict, resourceQuotaConflict)
+ }
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ ExpectedActions: []string{
+ "/namespaces/default/pods:POST",
+ "/namespaces/default/pods:POST",
+ "/namespaces/default/pods:POST",
+ },
+ },
+ "Create success (server-side apply)": {
+ Pods: newPodList("whale"),
+ ServerSideApply: true,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, _ *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ ExpectedActions: []string{
+ "/namespaces/default/pods/whale:PATCH",
+ },
+ },
+ "Create fail: incompatible server (server-side apply)": {
+ Pods: newPodList("lobster"),
+ ServerSideApply: true,
+ Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return &http.Response{
+ StatusCode: http.StatusUnsupportedMediaType,
+ Request: req,
+ }, nil
+ },
+ ExpectedErrorContains: "server-side apply not available on the server:",
+ ExpectedActions: []string{
+ "/namespaces/default/pods/lobster:PATCH",
+ },
+ },
+ "Create fail: quota (server-side apply)": {
+ Pods: newPodList("dolphin"),
+ ServerSideApply: true,
+ Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, _ *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return newResponseJSON(http.StatusConflict, resourceQuotaConflict)
+ },
+ ExpectedErrorContains: "Operation cannot be fulfilled on resourcequotas \"quota\": the object has been modified; " +
+ "please apply your changes to the latest version and try again",
+ ExpectedActions: []string{
+ "/namespaces/default/pods/dolphin:PATCH",
+ },
+ },
+ }
+
+ c := newTestClient(t)
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+
+ client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ return tc.Callback(t, tc, previous, req)
+ })
+
+ c.Factory.(*cmdtesting.TestFactory).UnstructuredClient = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(client.Do),
+ }
+
+ list, err := c.Build(objBody(&tc.Pods), false)
+ require.NoError(t, err)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ result, err := c.Create(
+ list,
+ ClientCreateOptionServerSideApply(tc.ServerSideApply, false))
+ if tc.ExpectedErrorContains != "" {
+ require.ErrorContains(t, err, tc.ExpectedErrorContains)
+ } else {
+ require.NoError(t, err)
+
+ // See note above about limitations in supporting more than a single object
+ assert.Len(t, result.Created, 1, "expected 1 object created, got %d", len(result.Created))
+ }
+
+ actions := []string{}
+ for _, action := range client.Actions {
+ path, method := action.Request.URL.Path, action.Request.Method
+ actions = append(actions, path+":"+method)
+ }
+
+ assert.Equal(t, tc.ExpectedActions, actions)
+
+ })
+ }
+}
+
+func TestUpdate(t *testing.T) {
+ type testCase struct {
+ OriginalPods v1.PodList
+ TargetPods v1.PodList
+ ThreeWayMergeForUnstructured bool
+ ServerSideApply bool
+ ExpectedActions []string
+ ExpectedError string
+ }
+
+ expectedActionsClientSideApply := []string{
+ "/namespaces/default/pods/starfish:GET",
+ "/namespaces/default/pods/starfish:GET",
+ "/namespaces/default/pods/starfish:PATCH",
+ "/namespaces/default/pods/otter:GET",
+ "/namespaces/default/pods/otter:GET",
+ "/namespaces/default/pods/otter:GET",
+ "/namespaces/default/pods/dolphin:GET",
+ "/namespaces/default/pods:POST", // create dolphin
+ "/namespaces/default/pods:POST", // retry due to 409
+ "/namespaces/default/pods:POST", // retry due to 409
+ "/namespaces/default/pods/squid:GET",
+ "/namespaces/default/pods/squid:DELETE",
+ "/namespaces/default/pods/notfound:GET",
+ "/namespaces/default/pods/notfound:DELETE",
+ }
+
+ expectedActionsServerSideApply := []string{
+ "/namespaces/default/pods/starfish:GET",
+ "/namespaces/default/pods/starfish:GET",
+ "/namespaces/default/pods/starfish:PATCH",
+ "/namespaces/default/pods/otter:GET",
+ "/namespaces/default/pods/otter:GET",
+ "/namespaces/default/pods/otter:PATCH",
+ "/namespaces/default/pods/dolphin:GET",
+ "/namespaces/default/pods/dolphin:PATCH", // create dolphin
+ "/namespaces/default/pods/squid:GET",
+ "/namespaces/default/pods/squid:DELETE",
+ "/namespaces/default/pods/notfound:GET",
+ "/namespaces/default/pods/notfound:DELETE",
+ }
+
+ testCases := map[string]testCase{
+ "client-side apply": {
+ OriginalPods: newPodList("starfish", "otter", "squid", "notfound"),
+ TargetPods: func() v1.PodList {
+ listTarget := newPodList("starfish", "otter", "dolphin")
+ listTarget.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return listTarget
+ }(),
+ ThreeWayMergeForUnstructured: false,
+ ServerSideApply: false,
+ ExpectedActions: expectedActionsClientSideApply,
+ ExpectedError: "",
+ },
+ "client-side apply (three-way merge for unstructured)": {
+ OriginalPods: newPodList("starfish", "otter", "squid", "notfound"),
+ TargetPods: func() v1.PodList {
+ listTarget := newPodList("starfish", "otter", "dolphin")
+ listTarget.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return listTarget
+ }(),
+ ThreeWayMergeForUnstructured: true,
+ ServerSideApply: false,
+ ExpectedActions: expectedActionsClientSideApply,
+ ExpectedError: "",
+ },
+ "serverSideApply": {
+ OriginalPods: newPodList("starfish", "otter", "squid", "notfound"),
+ TargetPods: func() v1.PodList {
+ listTarget := newPodList("starfish", "otter", "dolphin")
+ listTarget.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return listTarget
+ }(),
+ ThreeWayMergeForUnstructured: false,
+ ServerSideApply: true,
+ ExpectedActions: expectedActionsServerSideApply,
+ ExpectedError: "",
+ },
+ "serverSideApply with forbidden deletion": {
+ OriginalPods: newPodList("starfish", "otter", "squid", "notfound", "forbidden"),
+ TargetPods: func() v1.PodList {
+ listTarget := newPodList("starfish", "otter", "dolphin")
+ listTarget.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return listTarget
+ }(),
+ ThreeWayMergeForUnstructured: false,
+ ServerSideApply: true,
+ ExpectedActions: append(expectedActionsServerSideApply,
+ "/namespaces/default/pods/forbidden:GET",
+ "/namespaces/default/pods/forbidden:DELETE",
+ ),
+ ExpectedError: "failed to delete resource namespace=default, name=forbidden, kind=Pod:",
+ },
+ "rollback after failed upgrade with removed resource": {
+ // Simulates rollback scenario:
+ // - Revision 1 had "newpod"
+ // - Revision 2 removed "newpod" but upgrade failed (OriginalPods is empty)
+ // - Cluster still has "newpod" from Revision 1
+ // - Rolling back to Revision 1 (TargetPods with "newpod") should succeed
+ OriginalPods: v1.PodList{}, // Revision 2 (failed) - resource was removed
+ TargetPods: newPodList("newpod"), // Revision 1 - rolling back to this
+ ThreeWayMergeForUnstructured: false,
+ ServerSideApply: true,
+ ExpectedActions: []string{
+ "/namespaces/default/pods/newpod:GET", // Check if resource exists
+ "/namespaces/default/pods/newpod:GET", // Get current state (first call in update path)
+ "/namespaces/default/pods/newpod:GET", // Get current cluster state to use as baseline
+ "/namespaces/default/pods/newpod:PATCH", // Update using cluster state as baseline
+ },
+ ExpectedError: "",
+ },
+ }
+
+ c := newTestClient(t)
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+
+ listOriginal := tc.OriginalPods
+ listTarget := tc.TargetPods
+
+ iterationCounter := 0
+ cb := func(_ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ p, m := req.URL.Path, req.Method
+
+ switch {
+ case p == "/namespaces/default/pods/newpod" && m == http.MethodGet:
+ return newResponse(http.StatusOK, &listTarget.Items[0])
+ case p == "/namespaces/default/pods/newpod" && m == http.MethodPatch:
+ return newResponse(http.StatusOK, &listTarget.Items[0])
+ case p == "/namespaces/default/pods/starfish" && m == http.MethodGet:
+ return newResponse(http.StatusOK, &listOriginal.Items[0])
+ case p == "/namespaces/default/pods/otter" && m == http.MethodGet:
+ return newResponse(http.StatusOK, &listOriginal.Items[1])
+ case p == "/namespaces/default/pods/otter" && m == http.MethodPatch:
+ if !tc.ServerSideApply {
+ defer req.Body.Close()
+ data, err := io.ReadAll(req.Body)
+ require.NoError(t, err)
+
+ assert.Equal(t, `{}`, string(data))
+ }
+
+ return newResponse(http.StatusOK, &listTarget.Items[0])
+ case p == "/namespaces/default/pods/dolphin" && m == http.MethodGet:
+ return newResponse(http.StatusNotFound, notFoundBody())
+ case p == "/namespaces/default/pods/starfish" && m == http.MethodPatch:
+ if !tc.ServerSideApply {
+ // Ensure client-side apply specifies correct patch
+ defer req.Body.Close()
+ data, err := io.ReadAll(req.Body)
+ require.NoError(t, err)
+
+ expected := `{"spec":{"$setElementOrder/containers":[{"name":"app:v4"}],"containers":[{"$setElementOrder/ports":[{"containerPort":443}],"name":"app:v4","ports":[{"containerPort":443,"name":"https"},{"$patch":"delete","containerPort":80}]}]}}`
+ assert.Equal(t, expected, string(data))
+ }
+
+ return newResponse(http.StatusOK, &listTarget.Items[0])
+ case p == "/namespaces/default/pods" && m == http.MethodPost:
+ if iterationCounter < 2 {
+ iterationCounter++
+ return newResponseJSON(http.StatusConflict, resourceQuotaConflict)
+ }
+
+ return newResponse(http.StatusOK, &listTarget.Items[1])
+ case p == "/namespaces/default/pods/dolphin" && m == http.MethodPatch:
+ return newResponse(http.StatusOK, &listTarget.Items[1])
+ case p == "/namespaces/default/pods/squid" && m == http.MethodDelete:
+ return newResponse(http.StatusOK, &listTarget.Items[1])
+ case p == "/namespaces/default/pods/squid" && m == http.MethodGet:
+ return newResponse(http.StatusOK, &listTarget.Items[2])
+ case p == "/namespaces/default/pods/notfound" && m == http.MethodGet:
+ // Resource exists in original but will simulate not found on delete
+ return newResponse(http.StatusOK, &listOriginal.Items[3])
+ case p == "/namespaces/default/pods/notfound" && m == http.MethodDelete:
+ // Simulate a not found during deletion; should not cause update to fail
+ return newResponse(http.StatusNotFound, notFoundBody())
+ case p == "/namespaces/default/pods/forbidden" && m == http.MethodGet:
+ return newResponse(http.StatusOK, &listOriginal.Items[4])
+ case p == "/namespaces/default/pods/forbidden" && m == http.MethodDelete:
+ // Simulate RBAC forbidden that should cause update to fail
+ return newResponse(http.StatusForbidden, &metav1.Status{
+ Status: metav1.StatusFailure,
+ Message: "pods \"forbidden\" is forbidden: User \"test-user\" cannot delete resource \"pods\" in API group \"\" in the namespace \"default\"",
+ Reason: metav1.StatusReasonForbidden,
+ Code: http.StatusForbidden,
+ })
+ }
+
+ t.FailNow()
+ return nil, nil
+ }
+
+ client := NewRequestResponseLogClient(t, cb)
+
+ c.Factory.(*cmdtesting.TestFactory).UnstructuredClient = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(client.Do),
+ }
+
+ first, err := c.Build(objBody(&listOriginal), false)
+ require.NoError(t, err)
+
+ second, err := c.Build(objBody(&listTarget), false)
+ require.NoError(t, err)
+
+ result, err := c.Update(
+ first,
+ second,
+ ClientUpdateOptionThreeWayMergeForUnstructured(tc.ThreeWayMergeForUnstructured),
+ ClientUpdateOptionForceReplace(false),
+ ClientUpdateOptionServerSideApply(tc.ServerSideApply, false),
+ ClientUpdateOptionUpgradeClientSideFieldManager(true))
+
+ if tc.ExpectedError != "" {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tc.ExpectedError)
+ } else {
+ require.NoError(t, err)
+ }
+
+ // Special handling for the rollback test case
+ if name == "rollback after failed upgrade with removed resource" {
+ assert.Len(t, result.Created, 0, "expected 0 resource created, got %d", len(result.Created))
+ assert.Len(t, result.Updated, 1, "expected 1 resource updated, got %d", len(result.Updated))
+ assert.Len(t, result.Deleted, 0, "expected 0 resource deleted, got %d", len(result.Deleted))
+ } else {
+ assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created))
+ assert.Len(t, result.Updated, 2, "expected 2 resource updated, got %d", len(result.Updated))
+ assert.Len(t, result.Deleted, 1, "expected 1 resource deleted, got %d", len(result.Deleted))
+ }
+
+ if tc.ExpectedError != "" {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tc.ExpectedError)
+ } else {
+ require.NoError(t, err)
+ }
+
+ actions := []string{}
+ for _, action := range client.Actions {
+ path, method := action.Request.URL.Path, action.Request.Method
+ actions = append(actions, path+":"+method)
+ }
+
+ assert.Equal(t, tc.ExpectedActions, actions)
+ })
+ }
+}
+
+func TestBuild(t *testing.T) {
+ tests := []struct {
+ name string
+ namespace string
+ reader io.Reader
+ count int
+ err bool
+ }{
+ {
+ name: "Valid input",
+ namespace: "test",
+ reader: strings.NewReader(guestbookManifest),
+ count: 6,
+ }, {
+ name: "Valid input, deploying resources into different namespaces",
+ namespace: "test",
+ reader: strings.NewReader(namespacedGuestbookManifest),
+ count: 1,
+ },
+ }
+
+ c := newTestClient(t)
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Test for an invalid manifest
+ infos, err := c.Build(tt.reader, false)
+ if err != nil && !tt.err {
+ t.Errorf("Got error message when no error should have occurred: %v", err)
+ } else if err != nil && strings.Contains(err.Error(), "--validate=false") {
+ t.Error("error message was not scrubbed")
+ }
+
+ if len(infos) != tt.count {
+ t.Errorf("expected %d result objects, got %d", tt.count, len(infos))
+ }
+ })
+ }
+}
+
+func TestBuildTable(t *testing.T) {
+ tests := []struct {
+ name string
+ namespace string
+ reader io.Reader
+ count int
+ err bool
+ }{
+ {
+ name: "Valid input",
+ namespace: "test",
+ reader: strings.NewReader(guestbookManifest),
+ count: 6,
+ }, {
+ name: "Valid input, deploying resources into different namespaces",
+ namespace: "test",
+ reader: strings.NewReader(namespacedGuestbookManifest),
+ count: 1,
+ },
+ }
+
+ c := newTestClient(t)
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Test for an invalid manifest
+ infos, err := c.BuildTable(tt.reader, false)
+ if err != nil && !tt.err {
+ t.Errorf("Got error message when no error should have occurred: %v", err)
+ } else if err != nil && strings.Contains(err.Error(), "--validate=false") {
+ t.Error("error message was not scrubbed")
+ }
+
+ if len(infos) != tt.count {
+ t.Errorf("expected %d result objects, got %d", tt.count, len(infos))
+ }
+ })
+ }
+}
+
+func TestPerform(t *testing.T) {
+ tests := []struct {
+ name string
+ reader io.Reader
+ count int
+ err bool
+ errMessage string
+ }{
+ {
+ name: "Valid input",
+ reader: strings.NewReader(guestbookManifest),
+ count: 6,
+ }, {
+ name: "Empty manifests",
+ reader: strings.NewReader(""),
+ err: true,
+ errMessage: "no objects visited",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ results := []*resource.Info{}
+
+ fn := func(info *resource.Info) error {
+ results = append(results, info)
+ return nil
+ }
+
+ c := newTestClient(t)
+ infos, err := c.Build(tt.reader, false)
+ if err != nil && err.Error() != tt.errMessage {
+ t.Errorf("Error while building manifests: %v", err)
+ }
+
+ err = perform(infos, fn)
+ if (err != nil) != tt.err {
+ t.Errorf("expected error: %v, got %v", tt.err, err)
+ }
+ if err != nil && err.Error() != tt.errMessage {
+ t.Errorf("expected error message: %v, got %v", tt.errMessage, err)
+ }
+
+ if len(results) != tt.count {
+ t.Errorf("expected %d result objects, got %d", tt.count, len(results))
+ }
+ })
+ }
+}
+
+func TestWait(t *testing.T) {
+ podList := newPodList("starfish", "otter", "squid")
+
+ var created *time.Time
+
+ c := newTestClient(t)
+ c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
+ p, m := req.URL.Path, req.Method
+ t.Logf("got request %s %s", p, m)
+ switch {
+ case p == "/api/v1/namespaces/default/pods/starfish" && m == http.MethodGet:
+ pod := &podList.Items[0]
+ if created != nil && time.Since(*created) >= time.Second*5 {
+ pod.Status.Conditions = []v1.PodCondition{
+ {
+ Type: v1.PodReady,
+ Status: v1.ConditionTrue,
+ },
+ }
+ }
+ return newResponse(http.StatusOK, pod)
+ case p == "/api/v1/namespaces/default/pods/otter" && m == http.MethodGet:
+ pod := &podList.Items[1]
+ if created != nil && time.Since(*created) >= time.Second*5 {
+ pod.Status.Conditions = []v1.PodCondition{
+ {
+ Type: v1.PodReady,
+ Status: v1.ConditionTrue,
+ },
+ }
+ }
+ return newResponse(http.StatusOK, pod)
+ case p == "/api/v1/namespaces/default/pods/squid" && m == http.MethodGet:
+ pod := &podList.Items[2]
+ if created != nil && time.Since(*created) >= time.Second*5 {
+ pod.Status.Conditions = []v1.PodCondition{
+ {
+ Type: v1.PodReady,
+ Status: v1.ConditionTrue,
+ },
+ }
+ }
+ return newResponse(http.StatusOK, pod)
+ case p == "/namespaces/default/pods" && m == http.MethodPost:
+ resources, err := c.Build(req.Body, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ now := time.Now()
+ created = &now
+ return newResponse(http.StatusOK, resources[0].Object)
+ default:
+ t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path)
+ return nil, nil
+ }
+ }),
+ }
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy)
+ if err != nil {
+ t.Fatal(err)
+ }
+ resources, err := c.Build(objBody(&podList), false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ result, err := c.Create(
+ resources,
+ ClientCreateOptionServerSideApply(false, false))
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(result.Created) != 3 {
+ t.Errorf("expected 3 resource created, got %d", len(result.Created))
+ }
+
+ if err := c.Wait(resources, time.Second*30); err != nil {
+ t.Errorf("expected wait without error, got %s", err)
+ }
+
+ if time.Since(*created) < time.Second*5 {
+ t.Errorf("expected to wait at least 5 seconds before ready status was detected, but got %s", time.Since(*created))
+ }
+}
+
+func TestWaitJob(t *testing.T) {
+ job := newJob("starfish", 0, intToInt32(1), 0, 0)
+
+ var created *time.Time
+
+ c := newTestClient(t)
+ c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
+ p, m := req.URL.Path, req.Method
+ t.Logf("got request %s %s", p, m)
+ switch {
+ case p == "/apis/batch/v1/namespaces/default/jobs/starfish" && m == http.MethodGet:
+ if created != nil && time.Since(*created) >= time.Second*5 {
+ job.Status.Succeeded = 1
+ }
+ return newResponse(http.StatusOK, job)
+ case p == "/namespaces/default/jobs" && m == http.MethodPost:
+ resources, err := c.Build(req.Body, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ now := time.Now()
+ created = &now
+ return newResponse(http.StatusOK, resources[0].Object)
+ default:
+ t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path)
+ return nil, nil
+ }
+ }),
+ }
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy)
+ if err != nil {
+ t.Fatal(err)
+ }
+ resources, err := c.Build(objBody(job), false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ result, err := c.Create(
+ resources,
+ ClientCreateOptionServerSideApply(false, false))
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(result.Created) != 1 {
+ t.Errorf("expected 1 resource created, got %d", len(result.Created))
+ }
+
+ if err := c.WaitWithJobs(resources, time.Second*30); err != nil {
+ t.Errorf("expected wait without error, got %s", err)
+ }
+
+ if time.Since(*created) < time.Second*5 {
+ t.Errorf("expected to wait at least 5 seconds before ready status was detected, but got %s", time.Since(*created))
+ }
+}
+
+func TestWaitDelete(t *testing.T) {
+ pod := newPod("starfish")
+
+ var deleted *time.Time
+
+ c := newTestClient(t)
+ c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
+ p, m := req.URL.Path, req.Method
+ t.Logf("got request %s %s", p, m)
+ switch {
+ case p == "/namespaces/default/pods/starfish" && m == http.MethodGet:
+ if deleted != nil && time.Since(*deleted) >= time.Second*5 {
+ return newResponse(http.StatusNotFound, notFoundBody())
+ }
+ return newResponse(http.StatusOK, &pod)
+ case p == "/namespaces/default/pods/starfish" && m == http.MethodDelete:
+ now := time.Now()
+ deleted = &now
+ return newResponse(http.StatusOK, &pod)
+ case p == "/namespaces/default/pods" && m == http.MethodPost:
+ resources, err := c.Build(req.Body, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return newResponse(http.StatusOK, resources[0].Object)
+ default:
+ t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path)
+ return nil, nil
+ }
+ }),
+ }
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy)
+ if err != nil {
+ t.Fatal(err)
+ }
+ resources, err := c.Build(objBody(&pod), false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ result, err := c.Create(
+ resources,
+ ClientCreateOptionServerSideApply(false, false))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(result.Created) != 1 {
+ t.Errorf("expected 1 resource created, got %d", len(result.Created))
+ }
+ if _, err := c.Delete(resources, metav1.DeletePropagationBackground); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := c.WaitForDelete(resources, time.Second*30); err != nil {
+ t.Errorf("expected wait without error, got %s", err)
+ }
+
+ if time.Since(*deleted) < time.Second*5 {
+ t.Errorf("expected to wait at least 5 seconds before ready status was detected, but got %s", time.Since(*deleted))
+ }
+}
+
+func TestReal(t *testing.T) {
+ t.Skip("This is a live test, comment this line to run")
+ c := New(nil)
+ resources, err := c.Build(strings.NewReader(guestbookManifest), false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := c.Create(resources); err != nil {
+ t.Fatal(err)
+ }
+
+ testSvcEndpointManifest := testServiceManifest + "\n---\n" + testEndpointManifest
+ c = New(nil)
+ resources, err = c.Build(strings.NewReader(testSvcEndpointManifest), false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := c.Create(resources); err != nil {
+ t.Fatal(err)
+ }
+
+ resources, err = c.Build(strings.NewReader(testEndpointManifest), false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, errs := c.Delete(resources, metav1.DeletePropagationBackground); errs != nil {
+ t.Fatal(errs)
+ }
+
+ resources, err = c.Build(strings.NewReader(testSvcEndpointManifest), false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // ensures that delete does not fail if a resource is not found
+ if _, errs := c.Delete(resources, metav1.DeletePropagationBackground); errs != nil {
+ t.Fatal(errs)
+ }
+}
+
+func TestGetPodList(t *testing.T) {
+ namespace := "some-namespace"
+ names := []string{"dave", "jimmy"}
+ var responsePodList v1.PodList
+ for _, name := range names {
+ responsePodList.Items = append(responsePodList.Items, newPodWithStatus(name, v1.PodStatus{}, namespace))
+ }
+
+ kubeClient := k8sfake.NewClientset(&responsePodList)
+ c := Client{Namespace: namespace, kubeClient: kubeClient}
+
+ podList, err := c.GetPodList(namespace, metav1.ListOptions{})
+ clientAssertions := assert.New(t)
+ clientAssertions.NoError(err)
+ clientAssertions.Equal(&responsePodList, podList)
+}
+
+func TestOutputContainerLogsForPodList(t *testing.T) {
+ namespace := "some-namespace"
+ somePodList := newPodList("jimmy", "three", "structs")
+
+ kubeClient := k8sfake.NewClientset(&somePodList)
+ c := Client{Namespace: namespace, kubeClient: kubeClient}
+ outBuffer := &bytes.Buffer{}
+ outBufferFunc := func(_, _, _ string) io.Writer { return outBuffer }
+ err := c.OutputContainerLogsForPodList(&somePodList, namespace, outBufferFunc)
+ clientAssertions := assert.New(t)
+ clientAssertions.NoError(err)
+ clientAssertions.Equal("fake logsfake logsfake logs", outBuffer.String())
+}
+
+const testServiceManifest = `
+kind: Service
+apiVersion: v1
+metadata:
+ name: my-service
+spec:
+ selector:
+ app: myapp
+ ports:
+ - port: 80
+ protocol: TCP
+ targetPort: 9376
+`
+
+const testEndpointManifest = `
+kind: Endpoints
+apiVersion: v1
+metadata:
+ name: my-service
+subsets:
+ - addresses:
+ - ip: "1.2.3.4"
+ ports:
+ - port: 9376
+`
+
+const guestbookManifest = `
+apiVersion: v1
+kind: Service
+metadata:
+ name: redis-master
+ labels:
+ app: redis
+ tier: backend
+ role: master
+spec:
+ ports:
+ - port: 6379
+ targetPort: 6379
+ selector:
+ app: redis
+ tier: backend
+ role: master
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: redis-master
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: redis
+ role: master
+ tier: backend
+ spec:
+ containers:
+ - name: master
+ image: registry.k8s.io/redis:e2e # or just image: redis
+ resources:
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ ports:
+ - containerPort: 6379
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: redis-replica
+ labels:
+ app: redis
+ tier: backend
+ role: replica
+spec:
+ ports:
+ # the port that this service should serve on
+ - port: 6379
+ selector:
+ app: redis
+ tier: backend
+ role: replica
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: redis-replica
+spec:
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: redis
+ role: replica
+ tier: backend
+ spec:
+ containers:
+ - name: replica
+ image: gcr.io/google_samples/gb-redisreplica:v1
+ resources:
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ env:
+ - name: GET_HOSTS_FROM
+ value: dns
+ ports:
+ - containerPort: 6379
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: frontend
+ labels:
+ app: guestbook
+ tier: frontend
+spec:
+ ports:
+ - port: 80
+ selector:
+ app: guestbook
+ tier: frontend
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: frontend
+spec:
+ replicas: 3
+ template:
+ metadata:
+ labels:
+ app: guestbook
+ tier: frontend
+ spec:
+ containers:
+ - name: php-redis
+ image: gcr.io/google-samples/gb-frontend:v4
+ resources:
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ env:
+ - name: GET_HOSTS_FROM
+ value: dns
+ ports:
+ - containerPort: 80
+`
+
+const namespacedGuestbookManifest = `
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: frontend
+ namespace: guestbook
+spec:
+ replicas: 3
+ template:
+ metadata:
+ labels:
+ app: guestbook
+ tier: frontend
+ spec:
+ containers:
+ - name: php-redis
+ image: gcr.io/google-samples/gb-frontend:v4
+ resources:
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ env:
+ - name: GET_HOSTS_FROM
+ value: dns
+ ports:
+ - containerPort: 80
+`
+
+var resourceQuotaConflict = []byte(`
+{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Operation cannot be fulfilled on resourcequotas \"quota\": the object has been modified; please apply your changes to the latest version and try again","reason":"Conflict","details":{"name":"quota","kind":"resourcequotas"},"code":409}`)
+
+type createPatchTestCase struct {
+ name string
+
+ // The target state.
+ target *unstructured.Unstructured
+ // The state as it exists in the release.
+ original *unstructured.Unstructured
+ // The actual state as it exists in the cluster.
+ actual *unstructured.Unstructured
+
+ threeWayMergeForUnstructured bool
+ // The patch is supposed to transfer the current state to the target state,
+ // thereby preserving the actual state, wherever possible.
+ expectedPatch string
+ expectedPatchType types.PatchType
+}
+
+func (c createPatchTestCase) run(t *testing.T) {
+ scheme := runtime.NewScheme()
+ v1.AddToScheme(scheme)
+ encoder := jsonserializer.NewSerializerWithOptions(
+ jsonserializer.DefaultMetaFactory, scheme, scheme, jsonserializer.SerializerOptions{
+ Yaml: false, Pretty: false, Strict: true,
+ },
+ )
+ objBody := func(obj runtime.Object) io.ReadCloser {
+ return io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, obj))))
+ }
+ header := make(http.Header)
+ header.Set("Content-Type", runtime.ContentTypeJSON)
+ restClient := &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Resp: &http.Response{
+ StatusCode: http.StatusOK,
+ Body: objBody(c.actual),
+ Header: header,
+ },
+ }
+
+ targetInfo := &resource.Info{
+ Client: restClient,
+ Namespace: "default",
+ Name: "test-obj",
+ Object: c.target,
+ Mapping: &meta.RESTMapping{
+ Resource: schema.GroupVersionResource{
+ Group: "crd.com",
+ Version: "v1",
+ Resource: "datas",
+ },
+ Scope: meta.RESTScopeNamespace,
+ },
+ }
+
+ patch, patchType, err := createPatch(c.original, targetInfo, c.threeWayMergeForUnstructured)
+ if err != nil {
+ t.Fatalf("Failed to create patch: %v", err)
+ }
+
+ if c.expectedPatch != string(patch) {
+ t.Errorf("Unexpected patch.\nTarget:\n%s\nOriginal:\n%s\nActual:\n%s\n\nExpected:\n%s\nGot:\n%s",
+ c.target,
+ c.original,
+ c.actual,
+ c.expectedPatch,
+ string(patch),
+ )
+ }
+
+ if patchType != types.MergePatchType {
+ t.Errorf("Expected patch type %s, got %s", types.MergePatchType, patchType)
+ }
+}
+
+func newTestCustomResourceData(metadata map[string]string, spec map[string]interface{}) *unstructured.Unstructured {
+ if metadata == nil {
+ metadata = make(map[string]string)
+ }
+ if _, ok := metadata["name"]; !ok {
+ metadata["name"] = "test-obj"
+ }
+ if _, ok := metadata["namespace"]; !ok {
+ metadata["namespace"] = "default"
+ }
+ o := map[string]interface{}{
+ "apiVersion": "crd.com/v1",
+ "kind": "Data",
+ "metadata": metadata,
+ }
+ if len(spec) > 0 {
+ o["spec"] = spec
+ }
+ return &unstructured.Unstructured{
+ Object: o,
+ }
+}
+
+func TestCreatePatchCustomResourceMetadata(t *testing.T) {
+ target := newTestCustomResourceData(map[string]string{
+ "meta.helm.sh/release-name": "foo-simple",
+ "meta.helm.sh/release-namespace": "default",
+ "objectset.rio.cattle.io/id": "default-foo-simple",
+ }, nil)
+ testCase := createPatchTestCase{
+ name: "take ownership of resource",
+ target: target,
+ original: target,
+ actual: newTestCustomResourceData(nil, map[string]interface{}{
+ "color": "red",
+ }),
+ threeWayMergeForUnstructured: true,
+ expectedPatch: `{"metadata":{"meta.helm.sh/release-name":"foo-simple","meta.helm.sh/release-namespace":"default","objectset.rio.cattle.io/id":"default-foo-simple"}}`,
+ expectedPatchType: types.MergePatchType,
+ }
+ t.Run(testCase.name, testCase.run)
+
+ // Previous behavior.
+ testCase.threeWayMergeForUnstructured = false
+ testCase.expectedPatch = `{}`
+ t.Run(testCase.name, testCase.run)
+}
+
+func TestCreatePatchCustomResourceSpec(t *testing.T) {
+ target := newTestCustomResourceData(nil, map[string]interface{}{
+ "color": "red",
+ "size": "large",
+ })
+ testCase := createPatchTestCase{
+ name: "merge with spec of existing custom resource",
+ target: target,
+ original: target,
+ actual: newTestCustomResourceData(nil, map[string]interface{}{
+ "color": "red",
+ "weight": "heavy",
+ }),
+ threeWayMergeForUnstructured: true,
+ expectedPatch: `{"spec":{"size":"large"}}`,
+ expectedPatchType: types.MergePatchType,
+ }
+ t.Run(testCase.name, testCase.run)
+
+ // Previous behavior.
+ testCase.threeWayMergeForUnstructured = false
+ testCase.expectedPatch = `{}`
+ t.Run(testCase.name, testCase.run)
+}
+
+type errorFactory struct {
+ *cmdtesting.TestFactory
+ err error
+}
+
+func (f *errorFactory) KubernetesClientSet() (*kubernetes.Clientset, error) {
+ return nil, f.err
+}
+
+func newTestClientWithDiscoveryError(t *testing.T, err error) *Client {
+ t.Helper()
+ c := newTestClient(t)
+ c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
+ if req.URL.Path == "/version" {
+ return nil, err
+ }
+ resp, respErr := newResponse(http.StatusOK, &v1.Pod{})
+ return resp, respErr
+ }),
+ }
+ return c
+}
+
+func TestIsReachable(t *testing.T) {
+ const (
+ expectedUnreachableMsg = "kubernetes cluster unreachable"
+ )
+ tests := []struct {
+ name string
+ setupClient func(*testing.T) *Client
+ expectError bool
+ errorContains string
+ }{
+ {
+ name: "successful reachability test",
+ setupClient: func(t *testing.T) *Client {
+ t.Helper()
+ client := newTestClient(t)
+ client.kubeClient = k8sfake.NewClientset()
+ return client
+ },
+ expectError: false,
+ },
+ {
+ name: "client creation error with ErrEmptyConfig",
+ setupClient: func(t *testing.T) *Client {
+ t.Helper()
+ client := newTestClient(t)
+ client.Factory = &errorFactory{err: genericclioptions.ErrEmptyConfig}
+ return client
+ },
+ expectError: true,
+ errorContains: expectedUnreachableMsg,
+ },
+ {
+ name: "client creation error with general error",
+ setupClient: func(t *testing.T) *Client {
+ t.Helper()
+ client := newTestClient(t)
+ client.Factory = &errorFactory{err: errors.New("connection refused")}
+ return client
+ },
+ expectError: true,
+ errorContains: "kubernetes cluster unreachable: connection refused",
+ },
+ {
+ name: "discovery error with cluster unreachable",
+ setupClient: func(t *testing.T) *Client {
+ t.Helper()
+ return newTestClientWithDiscoveryError(t, http.ErrServerClosed)
+ },
+ expectError: true,
+ errorContains: expectedUnreachableMsg,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ client := tt.setupClient(t)
+ err := client.IsReachable()
+
+ if tt.expectError {
+ if err == nil {
+ t.Error("expected error but got nil")
+ return
+ }
+
+ if !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("expected error message to contain '%s', got: %v", tt.errorContains, err)
+ }
+
+ } else {
+ if err != nil {
+ t.Errorf("expected no error but got: %v", err)
+ }
+ }
+ })
+ }
+}
+
+func TestIsIncompatibleServerError(t *testing.T) {
+ testCases := map[string]struct {
+ Err error
+ Want bool
+ }{
+ "Unsupported media type": {
+ Err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusUnsupportedMediaType}},
+ Want: true,
+ },
+ "Not found error": {
+ Err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusNotFound}},
+ Want: false,
+ },
+ "Generic error": {
+ Err: fmt.Errorf("some generic error"),
+ Want: false,
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ if got := isIncompatibleServerError(tc.Err); got != tc.Want {
+ t.Errorf("isIncompatibleServerError() = %v, want %v", got, tc.Want)
+ }
+ })
+ }
+}
+
+func TestReplaceResource(t *testing.T) {
+ type testCase struct {
+ Pods v1.PodList
+ Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error)
+ ExpectedErrorContains string
+ }
+
+ testCases := map[string]testCase{
+ "normal": {
+ Pods: newPodList("whale"),
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ switch len(previous) {
+ case 0:
+ assert.Equal(t, "GET", req.Method)
+ case 1:
+ assert.Equal(t, "PUT", req.Method)
+ }
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "conflict": {
+ Pods: newPodList("whale"),
+ Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return &http.Response{
+ StatusCode: http.StatusConflict,
+ Request: req,
+ }, nil
+ },
+ ExpectedErrorContains: "failed to replace object: the server reported a conflict",
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+
+ testFactory := cmdtesting.NewTestFactory()
+ t.Cleanup(testFactory.Cleanup)
+
+ client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return tc.Callback(t, tc, previous, req)
+ })
+
+ testFactory.UnstructuredClient = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(client.Do),
+ }
+
+ resourceList, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.Pods), nil)
+ require.NoError(t, err)
+
+ require.Len(t, resourceList, 1)
+ info := resourceList[0]
+
+ err = replaceResource(info, FieldValidationDirectiveStrict)
+ if tc.ExpectedErrorContains != "" {
+ require.ErrorContains(t, err, tc.ExpectedErrorContains)
+ } else {
+ require.NoError(t, err)
+ require.NotNil(t, info.Object)
+ }
+ })
+ }
+}
+
+func TestPatchResourceClientSide(t *testing.T) {
+ type testCase struct {
+ OriginalPods v1.PodList
+ TargetPods v1.PodList
+ ThreeWayMergeForUnstructured bool
+ Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error)
+ ExpectedErrorContains string
+ }
+
+ testCases := map[string]testCase{
+ "normal": {
+ OriginalPods: newPodList("whale"),
+ TargetPods: func() v1.PodList {
+ pods := newPodList("whale")
+ pods.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return pods
+ }(),
+ ThreeWayMergeForUnstructured: false,
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ switch len(previous) {
+ case 0:
+ assert.Equal(t, "GET", req.Method)
+ return newResponse(http.StatusOK, &tc.OriginalPods.Items[0])
+ case 1:
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/strategic-merge-patch+json", req.Header.Get("Content-Type"))
+ return newResponse(http.StatusOK, &tc.TargetPods.Items[0])
+ }
+
+ t.Fail()
+ return nil, nil
+ },
+ },
+ "three way merge for unstructured": {
+ OriginalPods: newPodList("whale"),
+ TargetPods: func() v1.PodList {
+ pods := newPodList("whale")
+ pods.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return pods
+ }(),
+ ThreeWayMergeForUnstructured: true,
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ switch len(previous) {
+ case 0:
+ assert.Equal(t, "GET", req.Method)
+ return newResponse(http.StatusOK, &tc.OriginalPods.Items[0])
+ case 1:
+ t.Logf("patcher: %+v", req.Header)
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/strategic-merge-patch+json", req.Header.Get("Content-Type"))
+ return newResponse(http.StatusOK, &tc.TargetPods.Items[0])
+ }
+
+ t.Fail()
+ return nil, nil
+ },
+ },
+ "conflict": {
+ OriginalPods: newPodList("whale"),
+ TargetPods: func() v1.PodList {
+ pods := newPodList("whale")
+ pods.Items[0].Spec.Containers[0].Ports = []v1.ContainerPort{{Name: "https", ContainerPort: 443}}
+
+ return pods
+ }(),
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ switch len(previous) {
+ case 0:
+ assert.Equal(t, "GET", req.Method)
+ return newResponse(http.StatusOK, &tc.OriginalPods.Items[0])
+ case 1:
+ assert.Equal(t, "PATCH", req.Method)
+ return &http.Response{
+ StatusCode: http.StatusConflict,
+ Request: req,
+ }, nil
+ }
+
+ t.Fail()
+ return nil, nil
+
+ },
+ ExpectedErrorContains: "cannot patch \"whale\" with kind Pod: the server reported a conflict",
+ },
+ "no patch": {
+ OriginalPods: newPodList("whale"),
+ TargetPods: newPodList("whale"),
+ Callback: func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ switch len(previous) {
+ case 0:
+ assert.Equal(t, "GET", req.Method)
+ return newResponse(http.StatusOK, &tc.OriginalPods.Items[0])
+ case 1:
+ assert.Equal(t, "GET", req.Method)
+ return newResponse(http.StatusOK, &tc.TargetPods.Items[0])
+ }
+
+ t.Fail()
+ return nil, nil // newResponse(http.StatusOK, &tc.TargetPods.Items[0])
+
+ },
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+
+ testFactory := cmdtesting.NewTestFactory()
+ t.Cleanup(testFactory.Cleanup)
+
+ client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ return tc.Callback(t, tc, previous, req)
+ })
+
+ testFactory.UnstructuredClient = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(client.Do),
+ }
+
+ resourceListOriginal, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.OriginalPods), nil)
+ require.NoError(t, err)
+ require.Len(t, resourceListOriginal, 1)
+
+ resourceListTarget, err := buildResourceList(testFactory, v1.NamespaceDefault, FieldValidationDirectiveStrict, objBody(&tc.TargetPods), nil)
+ require.NoError(t, err)
+ require.Len(t, resourceListTarget, 1)
+
+ original := resourceListOriginal[0]
+ target := resourceListTarget[0]
+
+ err = patchResourceClientSide(original.Object, target, tc.ThreeWayMergeForUnstructured)
+ if tc.ExpectedErrorContains != "" {
+ require.ErrorContains(t, err, tc.ExpectedErrorContains)
+ } else {
+ require.NoError(t, err)
+ require.NotNil(t, target.Object)
+ }
+ })
+ }
+}
+
+func TestPatchResourceServerSide(t *testing.T) {
+ type testCase struct {
+ Pods v1.PodList
+ DryRun bool
+ ForceConflicts bool
+ FieldValidationDirective FieldValidationDirective
+ Callback func(t *testing.T, tc testCase, previous []RequestResponseAction, req *http.Request) (*http.Response, error)
+ ExpectedErrorContains string
+ }
+
+ testCases := map[string]testCase{
+ "normal": {
+ Pods: newPodList("whale"),
+ DryRun: false,
+ ForceConflicts: false,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type"))
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ assert.Equal(t, "false", req.URL.Query().Get("force"))
+ assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation"))
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "dry run": {
+ Pods: newPodList("whale"),
+ DryRun: true,
+ ForceConflicts: false,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type"))
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ assert.Equal(t, "All", req.URL.Query().Get("dryRun"))
+ assert.Equal(t, "false", req.URL.Query().Get("force"))
+ assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation"))
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "force conflicts": {
+ Pods: newPodList("whale"),
+ DryRun: false,
+ ForceConflicts: true,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type"))
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ assert.Equal(t, "true", req.URL.Query().Get("force"))
+ assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation"))
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "dry run + force conflicts": {
+ Pods: newPodList("whale"),
+ DryRun: true,
+ ForceConflicts: true,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type"))
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ assert.Equal(t, "All", req.URL.Query().Get("dryRun"))
+ assert.Equal(t, "true", req.URL.Query().Get("force"))
+ assert.Equal(t, "Strict", req.URL.Query().Get("fieldValidation"))
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "field validation ignore": {
+ Pods: newPodList("whale"),
+ DryRun: false,
+ ForceConflicts: false,
+ FieldValidationDirective: FieldValidationDirectiveIgnore,
+ Callback: func(t *testing.T, tc testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ assert.Equal(t, "PATCH", req.Method)
+ assert.Equal(t, "application/apply-patch+yaml", req.Header.Get("Content-Type"))
+ assert.Equal(t, "/namespaces/default/pods/whale", req.URL.Path)
+ assert.Equal(t, "false", req.URL.Query().Get("force"))
+ assert.Equal(t, "Ignore", req.URL.Query().Get("fieldValidation"))
+
+ return newResponse(http.StatusOK, &tc.Pods.Items[0])
+ },
+ },
+ "incompatible server": {
+ Pods: newPodList("whale"),
+ DryRun: false,
+ ForceConflicts: false,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return &http.Response{
+ StatusCode: http.StatusUnsupportedMediaType,
+ Request: req,
+ }, nil
+ },
+ ExpectedErrorContains: "server-side apply not available on the server:",
+ },
+ "conflict": {
+ Pods: newPodList("whale"),
+ DryRun: false,
+ ForceConflicts: false,
+ FieldValidationDirective: FieldValidationDirectiveStrict,
+ Callback: func(t *testing.T, _ testCase, _ []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ t.Helper()
+
+ return &http.Response{
+ StatusCode: http.StatusConflict,
+ Request: req,
+ }, nil
+ },
+ ExpectedErrorContains: "the server reported a conflict",
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+
+ testFactory := cmdtesting.NewTestFactory()
+ t.Cleanup(testFactory.Cleanup)
+
+ client := NewRequestResponseLogClient(t, func(previous []RequestResponseAction, req *http.Request) (*http.Response, error) {
+ return tc.Callback(t, tc, previous, req)
+ })
+
+ testFactory.UnstructuredClient = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(client.Do),
+ }
+
+ resourceList, err := buildResourceList(testFactory, v1.NamespaceDefault, tc.FieldValidationDirective, objBody(&tc.Pods), nil)
+ require.NoError(t, err)
+
+ require.Len(t, resourceList, 1)
+ info := resourceList[0]
+
+ err = patchResourceServerSide(info, tc.DryRun, tc.ForceConflicts, tc.FieldValidationDirective)
+ if tc.ExpectedErrorContains != "" {
+ require.ErrorContains(t, err, tc.ExpectedErrorContains)
+ } else {
+ require.NoError(t, err)
+ require.NotNil(t, info.Object)
+ }
+ })
+ }
+}
+
+func TestDetermineFieldValidationDirective(t *testing.T) {
+
+ assert.Equal(t, FieldValidationDirectiveIgnore, determineFieldValidationDirective(false))
+ assert.Equal(t, FieldValidationDirectiveStrict, determineFieldValidationDirective(true))
+}
+
+func TestClientWaitContextCancellationLegacy(t *testing.T) {
+ podList := newPodList("starfish", "otter")
+
+ ctx, cancel := context.WithCancel(t.Context())
+
+ c := newTestClient(t)
+ c.WaitContext = ctx
+
+ requestCount := 0
+ c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
+ requestCount++
+ p, m := req.URL.Path, req.Method
+ t.Logf("got request %s %s", p, m)
+
+ if requestCount == 2 {
+ cancel()
+ }
+
+ switch {
+ case p == "/api/v1/namespaces/default/pods/starfish" && m == http.MethodGet:
+ pod := &podList.Items[0]
+ pod.Status.Conditions = []v1.PodCondition{
+ {
+ Type: v1.PodReady,
+ Status: v1.ConditionFalse,
+ },
+ }
+ return newResponse(http.StatusOK, pod)
+ case p == "/api/v1/namespaces/default/pods/otter" && m == http.MethodGet:
+ pod := &podList.Items[1]
+ pod.Status.Conditions = []v1.PodCondition{
+ {
+ Type: v1.PodReady,
+ Status: v1.ConditionFalse,
+ },
+ }
+ return newResponse(http.StatusOK, pod)
+ case p == "/namespaces/default/pods" && m == http.MethodPost:
+ resources, err := c.Build(req.Body, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return newResponse(http.StatusOK, resources[0].Object)
+ default:
+ t.Logf("unexpected request: %s %s", req.Method, req.URL.Path)
+ return newResponse(http.StatusNotFound, notFoundBody())
+ }
+ }),
+ }
+
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy)
+ require.NoError(t, err)
+
+ resources, err := c.Build(objBody(&podList), false)
+ require.NoError(t, err)
+
+ result, err := c.Create(
+ resources,
+ ClientCreateOptionServerSideApply(false, false))
+ require.NoError(t, err)
+ assert.Len(t, result.Created, 2, "expected 2 resources created, got %d", len(result.Created))
+
+ err = c.Wait(resources, time.Second*30)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "context canceled", "expected context canceled error, got: %v", err)
+}
+
+func TestClientWaitWithJobsContextCancellationLegacy(t *testing.T) {
+ job := newJob("starfish", 0, intToInt32(1), 0, 0)
+
+ ctx, cancel := context.WithCancel(t.Context())
+
+ c := newTestClient(t)
+ c.WaitContext = ctx
+
+ requestCount := 0
+ c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
+ requestCount++
+ p, m := req.URL.Path, req.Method
+ t.Logf("got request %s %s", p, m)
+
+ if requestCount == 2 {
+ cancel()
+ }
+
+ switch {
+ case p == "/apis/batch/v1/namespaces/default/jobs/starfish" && m == http.MethodGet:
+ job.Status.Succeeded = 0
+ return newResponse(http.StatusOK, job)
+ case p == "/namespaces/default/jobs" && m == http.MethodPost:
+ resources, err := c.Build(req.Body, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return newResponse(http.StatusOK, resources[0].Object)
+ default:
+ t.Logf("unexpected request: %s %s", req.Method, req.URL.Path)
+ return newResponse(http.StatusNotFound, notFoundBody())
+ }
+ }),
+ }
+
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy)
+ require.NoError(t, err)
+
+ resources, err := c.Build(objBody(job), false)
+ require.NoError(t, err)
+
+ result, err := c.Create(
+ resources,
+ ClientCreateOptionServerSideApply(false, false))
+ require.NoError(t, err)
+ assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created))
+
+ err = c.WaitWithJobs(resources, time.Second*30)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "context canceled", "expected context canceled error, got: %v", err)
+}
+
+func TestClientWaitForDeleteContextCancellationLegacy(t *testing.T) {
+ pod := newPod("starfish")
+
+ ctx, cancel := context.WithCancel(t.Context())
+
+ c := newTestClient(t)
+ c.WaitContext = ctx
+
+ deleted := false
+ requestCount := 0
+ c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
+ requestCount++
+ p, m := req.URL.Path, req.Method
+ t.Logf("got request %s %s", p, m)
+
+ if requestCount == 3 {
+ cancel()
+ }
+
+ switch {
+ case p == "/namespaces/default/pods/starfish" && m == http.MethodGet:
+ if deleted {
+ return newResponse(http.StatusOK, &pod)
+ }
+ return newResponse(http.StatusOK, &pod)
+ case p == "/namespaces/default/pods/starfish" && m == http.MethodDelete:
+ deleted = true
+ return newResponse(http.StatusOK, &pod)
+ case p == "/namespaces/default/pods" && m == http.MethodPost:
+ resources, err := c.Build(req.Body, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return newResponse(http.StatusOK, resources[0].Object)
+ default:
+ t.Logf("unexpected request: %s %s", req.Method, req.URL.Path)
+ return newResponse(http.StatusNotFound, notFoundBody())
+ }
+ }),
+ }
+
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy)
+ require.NoError(t, err)
+
+ resources, err := c.Build(objBody(&pod), false)
+ require.NoError(t, err)
+
+ result, err := c.Create(
+ resources,
+ ClientCreateOptionServerSideApply(false, false))
+ require.NoError(t, err)
+ assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created))
+
+ if _, err := c.Delete(resources, metav1.DeletePropagationBackground); err != nil {
+ t.Fatal(err)
+ }
+
+ err = c.WaitForDelete(resources, time.Second*30)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "context canceled", "expected context canceled error, got: %v", err)
+}
+
+func TestClientWaitContextNilDoesNotPanic(t *testing.T) {
+ podList := newPodList("starfish")
+
+ var created *time.Time
+
+ c := newTestClient(t)
+ c.WaitContext = nil
+
+ c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
+ p, m := req.URL.Path, req.Method
+ t.Logf("got request %s %s", p, m)
+ switch {
+ case p == "/api/v1/namespaces/default/pods/starfish" && m == http.MethodGet:
+ pod := &podList.Items[0]
+ if created != nil && time.Since(*created) >= time.Second*2 {
+ pod.Status.Conditions = []v1.PodCondition{
+ {
+ Type: v1.PodReady,
+ Status: v1.ConditionTrue,
+ },
+ }
+ }
+ return newResponse(http.StatusOK, pod)
+ case p == "/namespaces/default/pods" && m == http.MethodPost:
+ resources, err := c.Build(req.Body, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ now := time.Now()
+ created = &now
+ return newResponse(http.StatusOK, resources[0].Object)
+ default:
+ t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path)
+ return nil, nil
+ }
+ }),
+ }
+
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy)
+ require.NoError(t, err)
+
+ resources, err := c.Build(objBody(&podList), false)
+ require.NoError(t, err)
+
+ result, err := c.Create(
+ resources,
+ ClientCreateOptionServerSideApply(false, false))
+ require.NoError(t, err)
+ assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created))
+
+ err = c.Wait(resources, time.Second*30)
+ require.NoError(t, err)
+
+ assert.GreaterOrEqual(t, time.Since(*created), time.Second*2, "expected to wait at least 2 seconds")
+}
+
+func TestClientWaitContextPreCancelledLegacy(t *testing.T) {
+ podList := newPodList("starfish")
+
+ ctx, cancel := context.WithCancel(t.Context())
+ cancel()
+
+ c := newTestClient(t)
+ c.WaitContext = ctx
+
+ c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{
+ NegotiatedSerializer: unstructuredSerializer,
+ Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
+ p, m := req.URL.Path, req.Method
+ t.Logf("got request %s %s", p, m)
+ switch {
+ case p == "/api/v1/namespaces/default/pods/starfish" && m == http.MethodGet:
+ pod := &podList.Items[0]
+ return newResponse(http.StatusOK, pod)
+ case p == "/namespaces/default/pods" && m == http.MethodPost:
+ resources, err := c.Build(req.Body, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return newResponse(http.StatusOK, resources[0].Object)
+ default:
+ t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path)
+ return nil, nil
+ }
+ }),
+ }
+
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy)
+ require.NoError(t, err)
+
+ resources, err := c.Build(objBody(&podList), false)
+ require.NoError(t, err)
+
+ result, err := c.Create(
+ resources,
+ ClientCreateOptionServerSideApply(false, false))
+ require.NoError(t, err)
+ assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created))
+
+ err = c.Wait(resources, time.Second*30)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "context canceled", "expected context canceled error, got: %v", err)
+}
+
+func TestClientWaitContextCancellationStatusWatcher(t *testing.T) {
+ ctx, cancel := context.WithCancel(t.Context())
+
+ c := newTestClient(t)
+ c.WaitContext = ctx
+
+ podManifest := `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: test-pod
+ namespace: default
+`
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy)
+ require.NoError(t, err)
+
+ resources, err := c.Build(strings.NewReader(podManifest), false)
+ require.NoError(t, err)
+
+ cancel()
+
+ err = c.Wait(resources, time.Second*30)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "context canceled", "expected context canceled error, got: %v", err)
+}
+
+func TestClientWaitWithJobsContextCancellationStatusWatcher(t *testing.T) {
+ ctx, cancel := context.WithCancel(t.Context())
+
+ c := newTestClient(t)
+ c.WaitContext = ctx
+
+ jobManifest := `
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: test-job
+ namespace: default
+`
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy)
+ require.NoError(t, err)
+
+ resources, err := c.Build(strings.NewReader(jobManifest), false)
+ require.NoError(t, err)
+
+ cancel()
+
+ err = c.WaitWithJobs(resources, time.Second*30)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "context canceled", "expected context canceled error, got: %v", err)
+}
+
+func TestClientWaitForDeleteContextCancellationStatusWatcher(t *testing.T) {
+ ctx, cancel := context.WithCancel(t.Context())
+
+ c := newTestClient(t)
+ c.WaitContext = ctx
+
+ podManifest := `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: test-pod
+ namespace: default
+status:
+ conditions:
+ - type: Ready
+ status: "True"
+ phase: Running
+`
+ var err error
+ c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy)
+ require.NoError(t, err)
+
+ resources, err := c.Build(strings.NewReader(podManifest), false)
+ require.NoError(t, err)
+
+ cancel()
+
+ err = c.WaitForDelete(resources, time.Second*30)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "context canceled", "expected context canceled error, got: %v", err)
+}
+
+// testStatusReader is a custom status reader for testing that returns a configurable status.
+type testStatusReader struct {
+ supportedGK schema.GroupKind
+ status status.Status
+}
+
+func (r *testStatusReader) Supports(gk schema.GroupKind) bool {
+ return gk == r.supportedGK
+}
+
+func (r *testStatusReader) ReadStatus(_ context.Context, _ engine.ClusterReader, id object.ObjMetadata) (*event.ResourceStatus, error) {
+ return &event.ResourceStatus{
+ Identifier: id,
+ Status: r.status,
+ Message: "test status reader",
+ }, nil
+}
+
+func (r *testStatusReader) ReadStatusForObject(_ context.Context, _ engine.ClusterReader, u *unstructured.Unstructured) (*event.ResourceStatus, error) {
+ id := object.ObjMetadata{
+ Namespace: u.GetNamespace(),
+ Name: u.GetName(),
+ GroupKind: u.GroupVersionKind().GroupKind(),
+ }
+ return &event.ResourceStatus{
+ Identifier: id,
+ Status: r.status,
+ Message: "test status reader",
+ }, nil
+}
+
+func TestClientStatusReadersPassedToStatusWaiter(t *testing.T) {
+ // This test verifies that Client.StatusReaders is correctly passed through
+ // to the statusWaiter when using the StatusWatcherStrategy.
+ // We use a custom status reader that immediately returns CurrentStatus for pods,
+ // which allows a pod without Ready condition to pass the wait.
+ podManifest := `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: test-pod
+ namespace: default
+`
+
+ c := newTestClient(t)
+ statusReaders := []engine.StatusReader{
+ &testStatusReader{
+ supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+ status: status.CurrentStatus,
+ },
+ }
+
+ // Create a fake dynamic client with the pod resource
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(v1.SchemeGroupVersion.WithKind("Pod"))
+
+ // Create the pod in the fake client
+ m := make(map[string]interface{})
+ err := yaml.Unmarshal([]byte(podManifest), &m)
+ require.NoError(t, err)
+ podObj := &unstructured.Unstructured{Object: m}
+ gvk := podObj.GroupVersionKind()
+ mapping, err := fakeMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+ require.NoError(t, err)
+ err = fakeClient.Tracker().Create(mapping.Resource, podObj, podObj.GetNamespace())
+ require.NoError(t, err)
+
+ // Set up the waiter with the fake client and custom status readers
+ c.Waiter = &statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ readers: statusReaders,
+ }
+
+ resources, err := c.Build(strings.NewReader(podManifest), false)
+ require.NoError(t, err)
+
+ // The pod has no Ready condition, but our custom reader returns CurrentStatus,
+ // so the wait should succeed immediately without timeout.
+ err = c.Wait(resources, time.Second*3)
+ require.NoError(t, err)
+}
+
+func TestClientStatusReadersWithWaitWithJobs(t *testing.T) {
+ // This test verifies that Client.StatusReaders is correctly passed through
+ // to the statusWaiter when using WaitWithJobs.
+ jobManifest := `
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: test-job
+ namespace: default
+`
+
+ c := newTestClient(t)
+ statusReaders := []engine.StatusReader{
+ &testStatusReader{
+ supportedGK: schema.GroupKind{Group: "batch", Kind: "Job"},
+ status: status.CurrentStatus,
+ },
+ }
+
+ // Create a fake dynamic client with the job resource
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(batchv1.SchemeGroupVersion.WithKind("Job"))
+
+ // Create the job in the fake client
+ m := make(map[string]interface{})
+ err := yaml.Unmarshal([]byte(jobManifest), &m)
+ require.NoError(t, err)
+ jobObj := &unstructured.Unstructured{Object: m}
+ gvk := jobObj.GroupVersionKind()
+ mapping, err := fakeMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+ require.NoError(t, err)
+ err = fakeClient.Tracker().Create(mapping.Resource, jobObj, jobObj.GetNamespace())
+ require.NoError(t, err)
+
+ // Set up the waiter with the fake client and custom status readers
+ c.Waiter = &statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ readers: statusReaders,
+ }
+
+ resources, err := c.Build(strings.NewReader(jobManifest), false)
+ require.NoError(t, err)
+
+ // The job has no Complete condition, but our custom reader returns CurrentStatus,
+ // so the wait should succeed immediately without timeout.
+ err = c.WaitWithJobs(resources, time.Second*3)
+ require.NoError(t, err)
+}
diff --git a/helm/pkg/kube/converter.go b/helm/pkg/kube/converter.go
new file mode 100644
index 000000000..ac6d95fb4
--- /dev/null
+++ b/helm/pkg/kube/converter.go
@@ -0,0 +1,69 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v4/pkg/kube"
+
+import (
+ "sync"
+
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes/scheme"
+)
+
+var k8sNativeScheme *runtime.Scheme
+var k8sNativeSchemeOnce sync.Once
+
+// AsVersioned converts the given info into a runtime.Object with the correct
+// group and version set
+func AsVersioned(info *resource.Info) runtime.Object {
+ return convertWithMapper(info.Object, info.Mapping)
+}
+
+// convertWithMapper converts the given object with the optional provided
+// RESTMapping. If no mapping is provided, the default schema versioner is used
+func convertWithMapper(obj runtime.Object, mapping *meta.RESTMapping) runtime.Object {
+ s := kubernetesNativeScheme()
+ var gv = runtime.GroupVersioner(schema.GroupVersions(s.PrioritizedVersionsAllGroups()))
+ if mapping != nil {
+ gv = mapping.GroupVersionKind.GroupVersion()
+ }
+ if obj, err := runtime.ObjectConvertor(s).ConvertToVersion(obj, gv); err == nil {
+ return obj
+ }
+ return obj
+}
+
+// kubernetesNativeScheme returns a clean *runtime.Scheme with _only_ Kubernetes
+// native resources added to it. This is required to break free of custom resources
+// that may have been added to scheme.Scheme due to Helm being used as a package in
+// combination with e.g. a versioned kube client. If we would not do this, the client
+// may attempt to perform e.g. a 3-way-merge strategy patch for custom resources.
+func kubernetesNativeScheme() *runtime.Scheme {
+ k8sNativeSchemeOnce.Do(func() {
+ k8sNativeScheme = runtime.NewScheme()
+ scheme.AddToScheme(k8sNativeScheme)
+ // API extensions are not in the above scheme set,
+ // and must thus be added separately.
+ apiextensionsv1beta1.AddToScheme(k8sNativeScheme)
+ apiextensionsv1.AddToScheme(k8sNativeScheme)
+ })
+ return k8sNativeScheme
+}
diff --git a/helm/pkg/kube/factory.go b/helm/pkg/kube/factory.go
new file mode 100644
index 000000000..1d237c307
--- /dev/null
+++ b/helm/pkg/kube/factory.go
@@ -0,0 +1,55 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v4/pkg/kube"
+
+import (
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/dynamic"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+ "k8s.io/kubectl/pkg/validation"
+)
+
+// Factory provides abstractions that allow the Kubectl command to be extended across multiple types
+// of resources and different API sets.
+// This interface is a minimal copy of the kubectl Factory interface containing only the functions
+// needed by Helm. Since Kubernetes Go APIs, including interfaces, can change in any minor release
+// this interface is not covered by the Helm backwards compatibility guarantee. The reasons for the
+// minimal copy is that it does not include the full interface. Changes or additions to functions
+// Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes
+// being exposed.
+type Factory interface {
+ // ToRESTConfig returns restconfig
+ ToRESTConfig() (*rest.Config, error)
+
+ // ToRawKubeConfigLoader return kubeconfig loader as-is
+ ToRawKubeConfigLoader() clientcmd.ClientConfig
+
+ // DynamicClient returns a dynamic client ready for use
+ DynamicClient() (dynamic.Interface, error)
+
+ // KubernetesClientSet gives you back an external clientset
+ KubernetesClientSet() (*kubernetes.Clientset, error)
+
+ // NewBuilder returns an object that assists in loading objects from both disk and the server
+ // and which implements the common patterns for CLI interactions with generic resources.
+ NewBuilder() *resource.Builder
+
+ // Returns a schema that can validate objects stored on disk.
+ Validator(validationDirective string) (validation.Schema, error)
+}
diff --git a/helm/pkg/kube/fake/failing_kube_client.go b/helm/pkg/kube/fake/failing_kube_client.go
new file mode 100644
index 000000000..0f7787f79
--- /dev/null
+++ b/helm/pkg/kube/fake/failing_kube_client.go
@@ -0,0 +1,189 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fake implements various fake KubeClients for use in testing
+package fake
+
+import (
+ "io"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/cli-runtime/pkg/resource"
+
+ "helm.sh/helm/v4/pkg/kube"
+)
+
+// FailingKubeClient implements KubeClient for testing purposes. It also has
+// additional errors you can set to fail different functions, otherwise it
+// delegates all its calls to `PrintingKubeClient`
+type FailingKubeClient struct {
+ PrintingKubeClient
+ CreateError error
+ GetError error
+ DeleteError error
+ UpdateError error
+ BuildError error
+ BuildTableError error
+ ConnectionError error
+ BuildDummy bool
+ DummyResources kube.ResourceList
+ BuildUnstructuredError error
+ WaitError error
+ WaitForDeleteError error
+ WatchUntilReadyError error
+ WaitDuration time.Duration
+ // RecordedWaitOptions stores the WaitOptions passed to GetWaiter for testing
+ RecordedWaitOptions []kube.WaitOption
+}
+
+var _ kube.Interface = &FailingKubeClient{}
+
+// FailingKubeWaiter implements kube.Waiter for testing purposes.
+// It also has additional errors you can set to fail different functions, otherwise it delegates all its calls to `PrintingKubeWaiter`
+type FailingKubeWaiter struct {
+ *PrintingKubeWaiter
+ waitError error
+ waitForDeleteError error
+ watchUntilReadyError error
+ waitDuration time.Duration
+}
+
+// Create returns the configured error if set or prints
+func (f *FailingKubeClient) Create(resources kube.ResourceList, options ...kube.ClientCreateOption) (*kube.Result, error) {
+ if f.CreateError != nil {
+ return nil, f.CreateError
+ }
+ return f.PrintingKubeClient.Create(resources, options...)
+}
+
+// Get returns the configured error if set or prints
+func (f *FailingKubeClient) Get(resources kube.ResourceList, related bool) (map[string][]runtime.Object, error) {
+ if f.GetError != nil {
+ return nil, f.GetError
+ }
+ return f.PrintingKubeClient.Get(resources, related)
+}
+
+// Waits the amount of time defined on f.WaitDuration, then returns the configured error if set or prints.
+func (f *FailingKubeWaiter) Wait(resources kube.ResourceList, d time.Duration) error {
+ time.Sleep(f.waitDuration)
+ if f.waitError != nil {
+ return f.waitError
+ }
+ return f.PrintingKubeWaiter.Wait(resources, d)
+}
+
+// WaitWithJobs returns the configured error if set or prints
+func (f *FailingKubeWaiter) WaitWithJobs(resources kube.ResourceList, d time.Duration) error {
+ if f.waitError != nil {
+ return f.waitError
+ }
+ return f.PrintingKubeWaiter.WaitWithJobs(resources, d)
+}
+
+// WaitForDelete returns the configured error if set or prints
+func (f *FailingKubeWaiter) WaitForDelete(resources kube.ResourceList, d time.Duration) error {
+ if f.waitForDeleteError != nil {
+ return f.waitForDeleteError
+ }
+ return f.PrintingKubeWaiter.WaitForDelete(resources, d)
+}
+
+// Delete returns the configured error if set or prints
+func (f *FailingKubeClient) Delete(resources kube.ResourceList, deletionPropagation metav1.DeletionPropagation) (*kube.Result, []error) {
+ if f.DeleteError != nil {
+ return nil, []error{f.DeleteError}
+ }
+
+ return f.PrintingKubeClient.Delete(resources, deletionPropagation)
+}
+
+// WatchUntilReady returns the configured error if set or prints
+func (f *FailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, d time.Duration) error {
+ if f.watchUntilReadyError != nil {
+ return f.watchUntilReadyError
+ }
+ return f.PrintingKubeWaiter.WatchUntilReady(resources, d)
+}
+
+// Update returns the configured error if set or prints
+func (f *FailingKubeClient) Update(r, modified kube.ResourceList, options ...kube.ClientUpdateOption) (*kube.Result, error) {
+ if f.UpdateError != nil {
+ return &kube.Result{}, f.UpdateError
+ }
+ return f.PrintingKubeClient.Update(r, modified, options...)
+}
+
+// Build returns the configured error if set or prints
+func (f *FailingKubeClient) Build(r io.Reader, _ bool) (kube.ResourceList, error) {
+ if f.BuildError != nil {
+ return []*resource.Info{}, f.BuildError
+ }
+ if f.DummyResources != nil {
+ return f.DummyResources, nil
+ }
+ if f.BuildDummy {
+ return createDummyResourceList(), nil
+ }
+ return f.PrintingKubeClient.Build(r, false)
+}
+
+// BuildTable returns the configured error if set or prints
+func (f *FailingKubeClient) BuildTable(r io.Reader, _ bool) (kube.ResourceList, error) {
+ if f.BuildTableError != nil {
+ return []*resource.Info{}, f.BuildTableError
+ }
+ if f.BuildDummy {
+ return createDummyResourceList(), nil
+ }
+ return f.PrintingKubeClient.BuildTable(r, false)
+}
+
+func (f *FailingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) {
+ return f.GetWaiterWithOptions(ws)
+}
+
+func (f *FailingKubeClient) GetWaiterWithOptions(ws kube.WaitStrategy, opts ...kube.WaitOption) (kube.Waiter, error) {
+ // Record the WaitOptions for testing
+ f.RecordedWaitOptions = append(f.RecordedWaitOptions, opts...)
+ waiter, _ := f.PrintingKubeClient.GetWaiterWithOptions(ws, opts...)
+ printingKubeWaiter, _ := waiter.(*PrintingKubeWaiter)
+ return &FailingKubeWaiter{
+ PrintingKubeWaiter: printingKubeWaiter,
+ waitError: f.WaitError,
+ waitForDeleteError: f.WaitForDeleteError,
+ watchUntilReadyError: f.WatchUntilReadyError,
+ waitDuration: f.WaitDuration,
+ }, nil
+}
+
+func (f *FailingKubeClient) IsReachable() error {
+ if f.ConnectionError != nil {
+ return f.ConnectionError
+ }
+ return f.PrintingKubeClient.IsReachable()
+}
+
+func createDummyResourceList() kube.ResourceList {
+ var resInfo resource.Info
+ resInfo.Name = "dummyName"
+ resInfo.Namespace = "dummyNamespace"
+ var resourceList kube.ResourceList
+ resourceList.Append(&resInfo)
+ return resourceList
+}
diff --git a/helm/pkg/kube/fake/printer.go b/helm/pkg/kube/fake/printer.go
new file mode 100644
index 000000000..e3fa11576
--- /dev/null
+++ b/helm/pkg/kube/fake/printer.go
@@ -0,0 +1,165 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/cli-runtime/pkg/resource"
+
+ "helm.sh/helm/v4/pkg/kube"
+)
+
+// PrintingKubeClient implements KubeClient, but simply prints the reader to
+// the given output.
+type PrintingKubeClient struct {
+ Out io.Writer
+ LogOutput io.Writer
+}
+
+// PrintingKubeWaiter implements kube.Waiter, but simply prints the reader to the given output
+type PrintingKubeWaiter struct {
+ Out io.Writer
+ LogOutput io.Writer
+}
+
+var _ kube.Interface = &PrintingKubeClient{}
+
+// IsReachable checks if the cluster is reachable
+func (p *PrintingKubeClient) IsReachable() error {
+ return nil
+}
+
+// Create prints the values of what would be created with a real KubeClient.
+func (p *PrintingKubeClient) Create(resources kube.ResourceList, _ ...kube.ClientCreateOption) (*kube.Result, error) {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ if err != nil {
+ return nil, err
+ }
+ return &kube.Result{Created: resources}, nil
+}
+
+func (p *PrintingKubeClient) Get(resources kube.ResourceList, _ bool) (map[string][]runtime.Object, error) {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ if err != nil {
+ return nil, err
+ }
+ return make(map[string][]runtime.Object), nil
+}
+
+func (p *PrintingKubeWaiter) Wait(resources kube.ResourceList, _ time.Duration) error {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ return err
+}
+
+func (p *PrintingKubeWaiter) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ return err
+}
+
+func (p *PrintingKubeWaiter) WaitForDelete(resources kube.ResourceList, _ time.Duration) error {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ return err
+}
+
+// WatchUntilReady implements KubeClient WatchUntilReady.
+func (p *PrintingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ return err
+}
+
+// Delete implements KubeClient delete.
+//
+// It only prints out the content to be deleted.
+func (p *PrintingKubeClient) Delete(resources kube.ResourceList, _ metav1.DeletionPropagation) (*kube.Result, []error) {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ if err != nil {
+ return nil, []error{err}
+ }
+ return &kube.Result{Deleted: resources}, nil
+}
+
+// Update implements KubeClient Update.
+func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ ...kube.ClientUpdateOption) (*kube.Result, error) {
+ _, err := io.Copy(p.Out, bufferize(modified))
+ if err != nil {
+ return nil, err
+ }
+ // TODO: This doesn't completely mock out have some that get created,
+ // updated, and deleted. I don't think these are used in any unit tests, but
+ // we may want to refactor a way to handle future tests
+ return &kube.Result{Updated: modified}, nil
+}
+
+// Build implements KubeClient Build.
+func (p *PrintingKubeClient) Build(_ io.Reader, _ bool) (kube.ResourceList, error) {
+ return []*resource.Info{}, nil
+}
+
+// BuildTable implements KubeClient BuildTable.
+func (p *PrintingKubeClient) BuildTable(_ io.Reader, _ bool) (kube.ResourceList, error) {
+ return []*resource.Info{}, nil
+}
+
+// WaitAndGetCompletedPodPhase implements KubeClient WaitAndGetCompletedPodPhase.
+func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(_ string, _ time.Duration) (v1.PodPhase, error) {
+ return v1.PodSucceeded, nil
+}
+
+// GetPodList implements KubeClient GetPodList.
+func (p *PrintingKubeClient) GetPodList(_ string, _ metav1.ListOptions) (*v1.PodList, error) {
+ return &v1.PodList{}, nil
+}
+
+// OutputContainerLogsForPodList implements KubeClient OutputContainerLogsForPodList.
+func (p *PrintingKubeClient) OutputContainerLogsForPodList(_ *v1.PodList, someNamespace string, _ func(namespace, pod, container string) io.Writer) error {
+ _, err := io.Copy(p.LogOutput, strings.NewReader(fmt.Sprintf("attempted to output logs for namespace: %s", someNamespace)))
+ return err
+}
+
+// DeleteWithPropagationPolicy implements KubeClient delete.
+//
+// It only prints out the content to be deleted.
+func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, _ metav1.DeletionPropagation) (*kube.Result, []error) {
+ _, err := io.Copy(p.Out, bufferize(resources))
+ if err != nil {
+ return nil, []error{err}
+ }
+ return &kube.Result{Deleted: resources}, nil
+}
+
+func (p *PrintingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) {
+ return p.GetWaiterWithOptions(ws)
+}
+
+func (p *PrintingKubeClient) GetWaiterWithOptions(_ kube.WaitStrategy, _ ...kube.WaitOption) (kube.Waiter, error) {
+ return &PrintingKubeWaiter{Out: p.Out, LogOutput: p.LogOutput}, nil
+}
+
+func bufferize(resources kube.ResourceList) io.Reader {
+ var builder strings.Builder
+ for _, info := range resources {
+ builder.WriteString(info.String() + "\n")
+ }
+ return strings.NewReader(builder.String())
+}
diff --git a/helm/pkg/kube/interface.go b/helm/pkg/kube/interface.go
new file mode 100644
index 000000000..63c784751
--- /dev/null
+++ b/helm/pkg/kube/interface.go
@@ -0,0 +1,112 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube
+
+import (
+ "io"
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// Interface represents a client capable of communicating with the Kubernetes API.
+//
+// A KubernetesClient must be concurrency safe.
+type Interface interface {
+ // Get details of deployed resources.
+ // The first argument is a list of resources to get. The second argument
+ // specifies if related pods should be fetched. For example, the pods being
+ // managed by a deployment.
+ Get(resources ResourceList, related bool) (map[string][]runtime.Object, error)
+
+ // Create creates one or more resources.
+ Create(resources ResourceList, options ...ClientCreateOption) (*Result, error)
+
+ // Delete destroys one or more resources using the specified deletion propagation policy.
+ // The 'policy' parameter determines how child resources are handled during deletion.
+ Delete(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error)
+
+ // Update updates one or more resources or creates the resource
+ // if it doesn't exist.
+ Update(original, target ResourceList, options ...ClientUpdateOption) (*Result, error)
+
+ // Build creates a resource list from a Reader.
+ //
+ // Reader must contain a YAML stream (one or more YAML documents separated
+ // by "\n---\n")
+ //
+ // Validates against OpenAPI schema if validate is true.
+ Build(reader io.Reader, validate bool) (ResourceList, error)
+ // IsReachable checks whether the client is able to connect to the cluster.
+ IsReachable() error
+
+ // GetWaiter gets the Kube.Waiter.
+ GetWaiter(ws WaitStrategy) (Waiter, error)
+
+ // GetPodList lists all pods that match the specified listOptions
+ GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error)
+
+ // OutputContainerLogsForPodList outputs the logs for a pod list
+ OutputContainerLogsForPodList(podList *v1.PodList, namespace string, writerFunc func(namespace, pod, container string) io.Writer) error
+
+ // BuildTable creates a resource list from a Reader. This differs from
+ // Interface.Build() in that a table kind is returned. A table is useful
+ // if you want to use a printer to display the information.
+ //
+ // Reader must contain a YAML stream (one or more YAML documents separated
+ // by "\n---\n")
+ //
+ // Validates against OpenAPI schema if validate is true.
+ // TODO Helm 4: Integrate into Build with an argument
+ BuildTable(reader io.Reader, validate bool) (ResourceList, error)
+}
+
+// Waiter defines methods related to waiting for resource states.
+type Waiter interface {
+ // Wait waits up to the given timeout for the specified resources to be ready.
+ Wait(resources ResourceList, timeout time.Duration) error
+
+ // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
+ WaitWithJobs(resources ResourceList, timeout time.Duration) error
+
+ // WaitForDelete wait up to the given timeout for the specified resources to be deleted.
+ WaitForDelete(resources ResourceList, timeout time.Duration) error
+
+ // WatchUntilReady watches the resources given and waits until it is ready.
+ //
+ // This method is mainly for hook implementations. It watches for a resource to
+ // hit a particular milestone. The milestone depends on the Kind.
+ //
+ // For Jobs, "ready" means the Job ran to completion (exited without error).
+ // For Pods, "ready" means the Pod phase is marked "succeeded".
+ // For all other kinds, it means the kind was created or modified without
+ // error.
+ WatchUntilReady(resources ResourceList, timeout time.Duration) error
+}
+
+// InterfaceWaitOptions defines an interface that extends Interface with
+// methods that accept wait options.
+//
+// TODO Helm 5: Remove InterfaceWaitOptions and integrate its method(s) into the Interface.
+type InterfaceWaitOptions interface {
+ // GetWaiter gets the Kube.Waiter with options.
+ GetWaiterWithOptions(ws WaitStrategy, opts ...WaitOption) (Waiter, error)
+}
+
+var _ InterfaceWaitOptions = (*Client)(nil)
diff --git a/helm/pkg/kube/options.go b/helm/pkg/kube/options.go
new file mode 100644
index 000000000..49c6229ba
--- /dev/null
+++ b/helm/pkg/kube/options.go
@@ -0,0 +1,45 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube
+
+import (
+ "context"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
+)
+
+// WaitOption is a function that configures an option for waiting on resources.
+type WaitOption func(*waitOptions)
+
+// WithWaitContext sets the context for waiting on resources.
+func WithWaitContext(ctx context.Context) WaitOption {
+ return func(wo *waitOptions) {
+ wo.ctx = ctx
+ }
+}
+
+// WithKStatusReaders sets the status readers to be used while waiting on resources.
+func WithKStatusReaders(readers ...engine.StatusReader) WaitOption {
+ return func(wo *waitOptions) {
+ wo.statusReaders = readers
+ }
+}
+
+type waitOptions struct {
+ ctx context.Context
+ statusReaders []engine.StatusReader
+}
diff --git a/helm/pkg/kube/ready.go b/helm/pkg/kube/ready.go
new file mode 100644
index 000000000..bfa98504c
--- /dev/null
+++ b/helm/pkg/kube/ready.go
@@ -0,0 +1,466 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v4/pkg/kube"
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+
+ appsv1 "k8s.io/api/apps/v1"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/scheme"
+
+ deploymentutil "helm.sh/helm/v4/internal/third_party/k8s.io/kubernetes/deployment/util"
+)
+
+// ReadyCheckerOption is a function that configures a ReadyChecker.
+type ReadyCheckerOption func(*ReadyChecker)
+
+// PausedAsReady returns a ReadyCheckerOption that configures a ReadyChecker
+// to consider paused resources to be ready. For example a Deployment
+// with spec.paused equal to true would be considered ready.
+func PausedAsReady(pausedAsReady bool) ReadyCheckerOption {
+ return func(c *ReadyChecker) {
+ c.pausedAsReady = pausedAsReady
+ }
+}
+
+// CheckJobs returns a ReadyCheckerOption that configures a ReadyChecker
+// to consider readiness of Job resources.
+func CheckJobs(checkJobs bool) ReadyCheckerOption {
+ return func(c *ReadyChecker) {
+ c.checkJobs = checkJobs
+ }
+}
+
+// NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can
+// be used to override defaults.
+func NewReadyChecker(cl kubernetes.Interface, opts ...ReadyCheckerOption) ReadyChecker {
+ c := ReadyChecker{
+ client: cl,
+ }
+ for _, opt := range opts {
+ opt(&c)
+ }
+ return c
+}
+
+// ReadyChecker is a type that can check core Kubernetes types for readiness.
+type ReadyChecker struct {
+ client kubernetes.Interface
+ checkJobs bool
+ pausedAsReady bool
+}
+
+// IsReady checks if v is ready. It supports checking readiness for pods,
+// deployments, persistent volume claims, services, daemon sets, custom
+// resource definitions, stateful sets, replication controllers, jobs (optional),
+// and replica sets. All other resource kinds are always considered ready.
+//
+// IsReady will fetch the latest state of the object from the server prior to
+// performing readiness checks, and it will return any error encountered.
+func (c *ReadyChecker) IsReady(ctx context.Context, v *resource.Info) (bool, error) {
+ switch value := AsVersioned(v).(type) {
+ case *corev1.Pod:
+ pod, err := c.client.CoreV1().Pods(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil || !c.isPodReady(pod) {
+ return false, err
+ }
+ case *batchv1.Job:
+ if c.checkJobs {
+ job, err := c.client.BatchV1().Jobs(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ ready, err := c.jobReady(job)
+ return ready, err
+ }
+ case *appsv1.Deployment:
+ currentDeployment, err := c.client.AppsV1().Deployments(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ // If paused deployment will never be ready
+ if currentDeployment.Spec.Paused {
+ return c.pausedAsReady, nil
+ }
+ // Find RS associated with deployment
+ newReplicaSet, err := deploymentutil.GetNewReplicaSet(currentDeployment, c.client.AppsV1())
+ if err != nil || newReplicaSet == nil {
+ return false, err
+ }
+ if !c.deploymentReady(newReplicaSet, currentDeployment) {
+ return false, nil
+ }
+ case *corev1.PersistentVolumeClaim:
+ claim, err := c.client.CoreV1().PersistentVolumeClaims(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ if !c.volumeReady(claim) {
+ return false, nil
+ }
+ case *corev1.Service:
+ svc, err := c.client.CoreV1().Services(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ if !c.serviceReady(svc) {
+ return false, nil
+ }
+ case *appsv1.DaemonSet:
+ ds, err := c.client.AppsV1().DaemonSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ if !c.daemonSetReady(ds) {
+ return false, nil
+ }
+ case *apiextv1beta1.CustomResourceDefinition:
+ if err := v.Get(); err != nil {
+ return false, err
+ }
+ crd := &apiextv1beta1.CustomResourceDefinition{}
+ if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil {
+ return false, err
+ }
+ if !c.crdBetaReady(*crd) {
+ return false, nil
+ }
+ case *apiextv1.CustomResourceDefinition:
+ if err := v.Get(); err != nil {
+ return false, err
+ }
+ crd := &apiextv1.CustomResourceDefinition{}
+ if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil {
+ return false, err
+ }
+ if !c.crdReady(*crd) {
+ return false, nil
+ }
+ case *appsv1.StatefulSet:
+ sts, err := c.client.AppsV1().StatefulSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ if !c.statefulSetReady(sts) {
+ return false, nil
+ }
+ case *corev1.ReplicationController:
+ rc, err := c.client.CoreV1().ReplicationControllers(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ if !c.replicationControllerReady(rc) {
+ return false, nil
+ }
+ ready, err := c.podsReadyForObject(ctx, v.Namespace, value)
+ if !ready || err != nil {
+ return false, err
+ }
+ case *appsv1.ReplicaSet:
+ rs, err := c.client.AppsV1().ReplicaSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ if !c.replicaSetReady(rs) {
+ return false, nil
+ }
+ ready, err := c.podsReadyForObject(ctx, v.Namespace, value)
+ if !ready || err != nil {
+ return false, err
+ }
+ }
+ return true, nil
+}
+
+func (c *ReadyChecker) podsReadyForObject(ctx context.Context, namespace string, obj runtime.Object) (bool, error) {
+ pods, err := c.podsforObject(ctx, namespace, obj)
+ if err != nil {
+ return false, err
+ }
+ for _, pod := range pods {
+ if !c.isPodReady(&pod) {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+func (c *ReadyChecker) podsforObject(ctx context.Context, namespace string, obj runtime.Object) ([]corev1.Pod, error) {
+ selector, err := SelectorsForObject(obj)
+ if err != nil {
+ return nil, err
+ }
+ list, err := getPods(ctx, c.client, namespace, selector.String())
+ return list, err
+}
+
+// isPodReady returns true if a pod is ready; false otherwise.
+func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool {
+ for _, c := range pod.Status.Conditions {
+ if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
+ return true
+ }
+ }
+ slog.Debug("Pod is not ready", "namespace", pod.GetNamespace(), "name", pod.GetName())
+ return false
+}
+
+func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) {
+ if job.Status.Failed > *job.Spec.BackoffLimit {
+ slog.Debug("Job is failed", "namespace", job.GetNamespace(), "name", job.GetName())
+ // If a job is failed, it can't recover, so throw an error
+ return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName())
+ }
+ if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions {
+ slog.Debug("Job is not completed", "namespace", job.GetNamespace(), "name", job.GetName())
+ return false, nil
+ }
+ slog.Debug("Job is completed", "namespace", job.GetNamespace(), "name", job.GetName())
+ return true, nil
+}
+
+func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
+ // ExternalName Services are external to cluster so helm shouldn't be checking to see if they're 'ready' (i.e. have an IP Set)
+ if s.Spec.Type == corev1.ServiceTypeExternalName {
+ return true
+ }
+
+ // Ensure that the service cluster IP is not empty
+ if s.Spec.ClusterIP == "" {
+ slog.Debug("Service does not have cluster IP address", "namespace", s.GetNamespace(), "name", s.GetName())
+ return false
+ }
+
+ // This checks if the service has a LoadBalancer and that balancer has an Ingress defined
+ if s.Spec.Type == corev1.ServiceTypeLoadBalancer {
+ // do not wait when at least 1 external IP is set
+ if len(s.Spec.ExternalIPs) > 0 {
+ slog.Debug("Service has external IP addresses", "namespace", s.GetNamespace(), "name", s.GetName(), "externalIPs", s.Spec.ExternalIPs)
+ return true
+ }
+
+ if s.Status.LoadBalancer.Ingress == nil {
+ slog.Debug("Service does not have load balancer ingress IP address", "namespace", s.GetNamespace(), "name", s.GetName())
+ return false
+ }
+ }
+ slog.Debug("Service is ready", "namespace", s.GetNamespace(), "name", s.GetName(), "clusterIP", s.Spec.ClusterIP, "externalIPs", s.Spec.ExternalIPs)
+ return true
+}
+
+func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool {
+ if v.Status.Phase != corev1.ClaimBound {
+ slog.Debug("PersistentVolumeClaim is not bound", "namespace", v.GetNamespace(), "name", v.GetName())
+ return false
+ }
+ slog.Debug("PersistentVolumeClaim is bound", "namespace", v.GetNamespace(), "name", v.GetName(), "phase", v.Status.Phase)
+ return true
+}
+
+func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deployment) bool {
+ // Verify the replicaset readiness
+ if !c.replicaSetReady(rs) {
+ return false
+ }
+ // Verify the generation observed by the deployment controller matches the spec generation
+ if dep.Status.ObservedGeneration != dep.Generation {
+ slog.Debug("Deployment is not ready, observedGeneration does not match spec generation", "namespace", dep.GetNamespace(), "name", dep.GetName(), "actualGeneration", dep.Status.ObservedGeneration, "expectedGeneration", dep.Generation)
+ return false
+ }
+
+ expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep)
+ if rs.Status.ReadyReplicas < expectedReady {
+ slog.Debug("Deployment does not have enough pods ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady)
+ return false
+ }
+ slog.Debug("Deployment is ready", "namespace", dep.GetNamespace(), "name", dep.GetName(), "readyPods", rs.Status.ReadyReplicas, "totalPods", expectedReady)
+ return true
+}
+
+func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
+ // Verify the generation observed by the daemonSet controller matches the spec generation
+ if ds.Status.ObservedGeneration != ds.Generation {
+ slog.Debug("DaemonSet is not ready, observedGeneration does not match spec generation", "namespace", ds.GetNamespace(), "name", ds.GetName(), "observedGeneration", ds.Status.ObservedGeneration, "expectedGeneration", ds.Generation)
+ return false
+ }
+
+ // If the update strategy is not a rolling update, there will be nothing to wait for
+ if ds.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType {
+ return true
+ }
+
+ // Make sure all the updated pods have been scheduled
+ if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled {
+ slog.Debug("DaemonSet does not have enough Pods scheduled", "namespace", ds.GetNamespace(), "name", ds.GetName(), "scheduledPods", ds.Status.UpdatedNumberScheduled, "totalPods", ds.Status.DesiredNumberScheduled)
+ return false
+ }
+ maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true)
+ if err != nil {
+ // If for some reason the value is invalid, set max unavailable to the
+ // number of desired replicas. This is the same behavior as the
+ // `MaxUnavailable` function in deploymentutil
+ maxUnavailable = int(ds.Status.DesiredNumberScheduled)
+ }
+
+ expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable
+ if int(ds.Status.NumberReady) < expectedReady {
+ slog.Debug("DaemonSet does not have enough Pods ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady)
+ return false
+ }
+ slog.Debug("DaemonSet is ready", "namespace", ds.GetNamespace(), "name", ds.GetName(), "readyPods", ds.Status.NumberReady, "totalPods", expectedReady)
+ return true
+}
+
+// Because the v1 extensions API is not available on all supported k8s versions
+// yet and because Go doesn't support generics, we need to have a duplicate
+// function to support the v1beta1 types
+func (c *ReadyChecker) crdBetaReady(crd apiextv1beta1.CustomResourceDefinition) bool {
+ for _, cond := range crd.Status.Conditions {
+ switch cond.Type {
+ case apiextv1beta1.Established:
+ if cond.Status == apiextv1beta1.ConditionTrue {
+ return true
+ }
+ case apiextv1beta1.NamesAccepted:
+ if cond.Status == apiextv1beta1.ConditionFalse {
+ // This indicates a naming conflict, but it's probably not the
+ // job of this function to fail because of that. Instead,
+ // we treat it as a success, since the process should be able to
+ // continue.
+ return true
+ }
+ default:
+ // intentionally left empty
+ }
+ }
+ return false
+}
+
+func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool {
+ for _, cond := range crd.Status.Conditions {
+ switch cond.Type {
+ case apiextv1.Established:
+ if cond.Status == apiextv1.ConditionTrue {
+ return true
+ }
+ case apiextv1.NamesAccepted:
+ if cond.Status == apiextv1.ConditionFalse {
+ // This indicates a naming conflict, but it's probably not the
+ // job of this function to fail because of that. Instead,
+ // we treat it as a success, since the process should be able to
+ // continue.
+ return true
+ }
+ default:
+ // intentionally left empty
+ }
+ }
+ return false
+}
+
+func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
+ // Verify the generation observed by the statefulSet controller matches the spec generation
+ if sts.Status.ObservedGeneration != sts.Generation {
+ slog.Debug("StatefulSet is not ready, observedGeneration doest not match spec generation", "namespace", sts.GetNamespace(), "name", sts.GetName(), "actualGeneration", sts.Status.ObservedGeneration, "expectedGeneration", sts.Generation)
+ return false
+ }
+
+ // If the update strategy is not a rolling update, there will be nothing to wait for
+ if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
+ slog.Debug("StatefulSet skipped ready check", "namespace", sts.GetNamespace(), "name", sts.GetName(), "updateStrategy", sts.Spec.UpdateStrategy.Type)
+ return true
+ }
+
+ // Dereference all the pointers because StatefulSets like them
+ var partition int
+ // 1 is the default for replicas if not set
+ replicas := 1
+ // For some reason, even if the update strategy is a rolling update, the
+ // actual rollingUpdate field can be nil. If it is, we can safely assume
+ // there is no partition value
+ if sts.Spec.UpdateStrategy.RollingUpdate != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
+ partition = int(*sts.Spec.UpdateStrategy.RollingUpdate.Partition)
+ }
+ if sts.Spec.Replicas != nil {
+ replicas = int(*sts.Spec.Replicas)
+ }
+
+ // Because an update strategy can use partitioning, we need to calculate the
+ // number of updated replicas we should have. For example, if the replicas
+ // is set to 3 and the partition is 2, we'd expect only one pod to be
+ // updated
+ expectedReplicas := replicas - partition
+
+ // Make sure all the updated pods have been scheduled
+ if int(sts.Status.UpdatedReplicas) < expectedReplicas {
+ slog.Debug("StatefulSet does not have enough Pods scheduled", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.UpdatedReplicas, "totalPods", expectedReplicas)
+ return false
+ }
+
+ if int(sts.Status.ReadyReplicas) != replicas {
+ slog.Debug("StatefulSet does not have enough Pods ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
+ return false
+ }
+ // This check only makes sense when all partitions are being upgraded otherwise during a
+ // partitioned rolling upgrade, this condition will never evaluate to true, leading to
+ // error.
+ if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision {
+ slog.Debug("StatefulSet is not ready, currentRevision does not match updateRevision", "namespace", sts.GetNamespace(), "name", sts.GetName(), "currentRevision", sts.Status.CurrentRevision, "updateRevision", sts.Status.UpdateRevision)
+ return false
+ }
+ slog.Debug("StatefulSet is ready", "namespace", sts.GetNamespace(), "name", sts.GetName(), "readyPods", sts.Status.ReadyReplicas, "totalPods", replicas)
+ return true
+}
+
+func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool {
+ // Verify the generation observed by the replicationController controller matches the spec generation
+ if rc.Status.ObservedGeneration != rc.Generation {
+ slog.Debug("ReplicationController is not ready, observedGeneration doest not match spec generation", "namespace", rc.GetNamespace(), "name", rc.GetName(), "actualGeneration", rc.Status.ObservedGeneration, "expectedGeneration", rc.Generation)
+ return false
+ }
+ return true
+}
+
+func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool {
+ // Verify the generation observed by the replicaSet controller matches the spec generation
+ if rs.Status.ObservedGeneration != rs.Generation {
+ slog.Debug("ReplicaSet is not ready, observedGeneration doest not match spec generation", "namespace", rs.GetNamespace(), "name", rs.GetName(), "actualGeneration", rs.Status.ObservedGeneration, "expectedGeneration", rs.Generation)
+ return false
+ }
+ return true
+}
+
+func getPods(ctx context.Context, client kubernetes.Interface, namespace, selector string) ([]corev1.Pod, error) {
+ list, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
+ LabelSelector: selector,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to list pods: %w", err)
+ }
+ return list.Items, nil
+}
diff --git a/helm/pkg/kube/ready_test.go b/helm/pkg/kube/ready_test.go
new file mode 100644
index 000000000..db0d02cbe
--- /dev/null
+++ b/helm/pkg/kube/ready_test.go
@@ -0,0 +1,1601 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v4/pkg/kube"
+
+import (
+ "context"
+ "testing"
+
+ appsv1 "k8s.io/api/apps/v1"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/fake"
+)
+
+const defaultNamespace = metav1.NamespaceDefault
+
+func Test_ReadyChecker_IsReady_Pod(t *testing.T) {
+ type fields struct {
+ client kubernetes.Interface
+ checkJobs bool
+ pausedAsReady bool
+ }
+ type args struct {
+ ctx context.Context
+ resource *resource.Info
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ pod *corev1.Pod
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "IsReady Pod",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &corev1.Pod{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ pod: newPodWithCondition("foo", corev1.ConditionTrue),
+ want: true,
+ wantErr: false,
+ },
+ {
+ name: "IsReady Pod returns error",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &corev1.Pod{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ pod: newPodWithCondition("bar", corev1.ConditionTrue),
+ want: false,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ReadyChecker{
+ client: tt.fields.client,
+ checkJobs: tt.fields.checkJobs,
+ pausedAsReady: tt.fields.pausedAsReady,
+ }
+ if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(t.Context(), tt.pod, metav1.CreateOptions{}); err != nil {
+ t.Errorf("Failed to create Pod error: %v", err)
+ return
+ }
+ got, err := c.IsReady(tt.args.ctx, tt.args.resource)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("IsReady() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("IsReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_IsReady_Job(t *testing.T) {
+ type fields struct {
+ client kubernetes.Interface
+ checkJobs bool
+ pausedAsReady bool
+ }
+ type args struct {
+ ctx context.Context
+ resource *resource.Info
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ job *batchv1.Job
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "IsReady Job error while getting job",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &batchv1.Job{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ job: newJob("bar", 1, intToInt32(1), 1, 0),
+ want: false,
+ wantErr: true,
+ },
+ {
+ name: "IsReady Job",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &batchv1.Job{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ job: newJob("foo", 1, intToInt32(1), 1, 0),
+ want: true,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ReadyChecker{
+ client: tt.fields.client,
+ checkJobs: tt.fields.checkJobs,
+ pausedAsReady: tt.fields.pausedAsReady,
+ }
+ if _, err := c.client.BatchV1().Jobs(defaultNamespace).Create(t.Context(), tt.job, metav1.CreateOptions{}); err != nil {
+ t.Errorf("Failed to create Job error: %v", err)
+ return
+ }
+ got, err := c.IsReady(tt.args.ctx, tt.args.resource)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("IsReady() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ if got != tt.want {
+ t.Errorf("IsReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_IsReady_Deployment(t *testing.T) {
+ type fields struct {
+ client kubernetes.Interface
+ checkJobs bool
+ pausedAsReady bool
+ }
+ type args struct {
+ ctx context.Context
+ resource *resource.Info
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ replicaSet *appsv1.ReplicaSet
+ deployment *appsv1.Deployment
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "IsReady Deployments error while getting current Deployment",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &appsv1.Deployment{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ replicaSet: newReplicaSet("foo", 0, 0, true),
+ deployment: newDeployment("bar", 1, 1, 0, true),
+ want: false,
+ wantErr: true,
+ },
+ {
+ name: "IsReady Deployments", //TODO fix this one
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &appsv1.Deployment{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ replicaSet: newReplicaSet("foo", 0, 0, true),
+ deployment: newDeployment("foo", 1, 1, 0, true),
+ want: false,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ReadyChecker{
+ client: tt.fields.client,
+ checkJobs: tt.fields.checkJobs,
+ pausedAsReady: tt.fields.pausedAsReady,
+ }
+ if _, err := c.client.AppsV1().Deployments(defaultNamespace).Create(t.Context(), tt.deployment, metav1.CreateOptions{}); err != nil {
+ t.Errorf("Failed to create Deployment error: %v", err)
+ return
+ }
+ if _, err := c.client.AppsV1().ReplicaSets(defaultNamespace).Create(t.Context(), tt.replicaSet, metav1.CreateOptions{}); err != nil {
+ t.Errorf("Failed to create ReplicaSet error: %v", err)
+ return
+ }
+ got, err := c.IsReady(tt.args.ctx, tt.args.resource)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("IsReady() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ if got != tt.want {
+ t.Errorf("IsReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_IsReady_PersistentVolumeClaim(t *testing.T) {
+ type fields struct {
+ client kubernetes.Interface
+ checkJobs bool
+ pausedAsReady bool
+ }
+ type args struct {
+ ctx context.Context
+ resource *resource.Info
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ pvc *corev1.PersistentVolumeClaim
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "IsReady PersistentVolumeClaim",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &corev1.PersistentVolumeClaim{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ pvc: newPersistentVolumeClaim("foo", corev1.ClaimPending),
+ want: false,
+ wantErr: false,
+ },
+ {
+ name: "IsReady PersistentVolumeClaim with error",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &corev1.PersistentVolumeClaim{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ pvc: newPersistentVolumeClaim("bar", corev1.ClaimPending),
+ want: false,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ReadyChecker{
+ client: tt.fields.client,
+ checkJobs: tt.fields.checkJobs,
+ pausedAsReady: tt.fields.pausedAsReady,
+ }
+ if _, err := c.client.CoreV1().PersistentVolumeClaims(defaultNamespace).Create(t.Context(), tt.pvc, metav1.CreateOptions{}); err != nil {
+ t.Errorf("Failed to create PersistentVolumeClaim error: %v", err)
+ return
+ }
+ got, err := c.IsReady(tt.args.ctx, tt.args.resource)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("IsReady() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ if got != tt.want {
+ t.Errorf("IsReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_IsReady_Service(t *testing.T) {
+ type fields struct {
+ client kubernetes.Interface
+ checkJobs bool
+ pausedAsReady bool
+ }
+ type args struct {
+ ctx context.Context
+ resource *resource.Info
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ svc *corev1.Service
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "IsReady Service",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &corev1.Service{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ svc: newService("foo", corev1.ServiceSpec{Type: corev1.ServiceTypeLoadBalancer, ClusterIP: ""}),
+ want: false,
+ wantErr: false,
+ },
+ {
+ name: "IsReady Service with error",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &corev1.Service{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ svc: newService("bar", corev1.ServiceSpec{Type: corev1.ServiceTypeExternalName, ClusterIP: ""}),
+ want: false,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ReadyChecker{
+ client: tt.fields.client,
+ checkJobs: tt.fields.checkJobs,
+ pausedAsReady: tt.fields.pausedAsReady,
+ }
+ if _, err := c.client.CoreV1().Services(defaultNamespace).Create(t.Context(), tt.svc, metav1.CreateOptions{}); err != nil {
+ t.Errorf("Failed to create Service error: %v", err)
+ return
+ }
+ got, err := c.IsReady(tt.args.ctx, tt.args.resource)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("IsReady() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ if got != tt.want {
+ t.Errorf("IsReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_IsReady_DaemonSet(t *testing.T) {
+ type fields struct {
+ client kubernetes.Interface
+ checkJobs bool
+ pausedAsReady bool
+ }
+ type args struct {
+ ctx context.Context
+ resource *resource.Info
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ ds *appsv1.DaemonSet
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "IsReady DaemonSet",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &appsv1.DaemonSet{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ ds: newDaemonSet("foo", 0, 0, 1, 0, true),
+ want: false,
+ wantErr: false,
+ },
+ {
+ name: "IsReady DaemonSet with error",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &appsv1.DaemonSet{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ ds: newDaemonSet("bar", 0, 1, 1, 1, true),
+ want: false,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ReadyChecker{
+ client: tt.fields.client,
+ checkJobs: tt.fields.checkJobs,
+ pausedAsReady: tt.fields.pausedAsReady,
+ }
+ if _, err := c.client.AppsV1().DaemonSets(defaultNamespace).Create(t.Context(), tt.ds, metav1.CreateOptions{}); err != nil {
+ t.Errorf("Failed to create DaemonSet error: %v", err)
+ return
+ }
+ got, err := c.IsReady(tt.args.ctx, tt.args.resource)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("IsReady() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ if got != tt.want {
+ t.Errorf("IsReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_IsReady_StatefulSet(t *testing.T) {
+ type fields struct {
+ client kubernetes.Interface
+ checkJobs bool
+ pausedAsReady bool
+ }
+ type args struct {
+ ctx context.Context
+ resource *resource.Info
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ ss *appsv1.StatefulSet
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "IsReady StatefulSet",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &appsv1.StatefulSet{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ ss: newStatefulSet("foo", 1, 0, 0, 1, true),
+ want: false,
+ wantErr: false,
+ },
+ {
+ name: "IsReady StatefulSet with error",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &appsv1.StatefulSet{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ ss: newStatefulSet("bar", 1, 0, 1, 1, true),
+ want: false,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ReadyChecker{
+ client: tt.fields.client,
+ checkJobs: tt.fields.checkJobs,
+ pausedAsReady: tt.fields.pausedAsReady,
+ }
+ if _, err := c.client.AppsV1().StatefulSets(defaultNamespace).Create(t.Context(), tt.ss, metav1.CreateOptions{}); err != nil {
+ t.Errorf("Failed to create StatefulSet error: %v", err)
+ return
+ }
+ got, err := c.IsReady(tt.args.ctx, tt.args.resource)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("IsReady() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ if got != tt.want {
+ t.Errorf("IsReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_IsReady_ReplicationController(t *testing.T) {
+ type fields struct {
+ client kubernetes.Interface
+ checkJobs bool
+ pausedAsReady bool
+ }
+ type args struct {
+ ctx context.Context
+ resource *resource.Info
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ rc *corev1.ReplicationController
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "IsReady ReplicationController",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &corev1.ReplicationController{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ rc: newReplicationController("foo", false),
+ want: false,
+ wantErr: false,
+ },
+ {
+ name: "IsReady ReplicationController with error",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &corev1.ReplicationController{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ rc: newReplicationController("bar", false),
+ want: false,
+ wantErr: true,
+ },
+ {
+ name: "IsReady ReplicationController and pods not ready for object",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &corev1.ReplicationController{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ rc: newReplicationController("foo", true),
+ want: true,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ReadyChecker{
+ client: tt.fields.client,
+ checkJobs: tt.fields.checkJobs,
+ pausedAsReady: tt.fields.pausedAsReady,
+ }
+ if _, err := c.client.CoreV1().ReplicationControllers(defaultNamespace).Create(t.Context(), tt.rc, metav1.CreateOptions{}); err != nil {
+ t.Errorf("Failed to create ReplicationController error: %v", err)
+ return
+ }
+ got, err := c.IsReady(tt.args.ctx, tt.args.resource)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("IsReady() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ if got != tt.want {
+ t.Errorf("IsReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_IsReady_ReplicaSet(t *testing.T) {
+ type fields struct {
+ client kubernetes.Interface
+ checkJobs bool
+ pausedAsReady bool
+ }
+ type args struct {
+ ctx context.Context
+ resource *resource.Info
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ rs *appsv1.ReplicaSet
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "IsReady ReplicaSet",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &appsv1.ReplicaSet{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ rs: newReplicaSet("foo", 1, 1, true),
+ want: false,
+ wantErr: true,
+ },
+ {
+ name: "IsReady ReplicaSet not ready",
+ fields: fields{
+ client: fake.NewClientset(),
+ checkJobs: true,
+ pausedAsReady: false,
+ },
+ args: args{
+ ctx: t.Context(),
+ resource: &resource.Info{Object: &appsv1.ReplicaSet{}, Name: "foo", Namespace: defaultNamespace},
+ },
+ rs: newReplicaSet("bar", 1, 1, false),
+ want: false,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &ReadyChecker{
+ client: tt.fields.client,
+ checkJobs: tt.fields.checkJobs,
+ pausedAsReady: tt.fields.pausedAsReady,
+ }
+ //
+ got, err := c.IsReady(tt.args.ctx, tt.args.resource)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("IsReady() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ if got != tt.want {
+ t.Errorf("IsReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_deploymentReady(t *testing.T) {
+ type args struct {
+ rs *appsv1.ReplicaSet
+ dep *appsv1.Deployment
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "deployment is ready",
+ args: args{
+ rs: newReplicaSet("foo", 1, 1, true),
+ dep: newDeployment("foo", 1, 1, 0, true),
+ },
+ want: true,
+ },
+ {
+ name: "deployment is not ready",
+ args: args{
+ rs: newReplicaSet("foo", 0, 0, true),
+ dep: newDeployment("foo", 1, 1, 0, true),
+ },
+ want: false,
+ },
+ {
+ name: "deployment is ready when maxUnavailable is set",
+ args: args{
+ rs: newReplicaSet("foo", 2, 1, true),
+ dep: newDeployment("foo", 2, 1, 1, true),
+ },
+ want: true,
+ },
+ {
+ name: "deployment is not ready when replicaset generations are out of sync",
+ args: args{
+ rs: newReplicaSet("foo", 1, 1, false),
+ dep: newDeployment("foo", 1, 1, 0, true),
+ },
+ want: false,
+ },
+ {
+ name: "deployment is not ready when deployment generations are out of sync",
+ args: args{
+ rs: newReplicaSet("foo", 1, 1, true),
+ dep: newDeployment("foo", 1, 1, 0, false),
+ },
+ want: false,
+ },
+ {
+ name: "deployment is not ready when generations are out of sync",
+ args: args{
+ rs: newReplicaSet("foo", 1, 1, false),
+ dep: newDeployment("foo", 1, 1, 0, false),
+ },
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewReadyChecker(fake.NewClientset())
+ if got := c.deploymentReady(tt.args.rs, tt.args.dep); got != tt.want {
+ t.Errorf("deploymentReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_replicaSetReady(t *testing.T) {
+ type args struct {
+ rs *appsv1.ReplicaSet
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "replicaSet is ready",
+ args: args{
+ rs: newReplicaSet("foo", 1, 1, true),
+ },
+ want: true,
+ },
+ {
+ name: "replicaSet is not ready when generations are out of sync",
+ args: args{
+ rs: newReplicaSet("foo", 1, 1, false),
+ },
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewReadyChecker(fake.NewClientset())
+ if got := c.replicaSetReady(tt.args.rs); got != tt.want {
+ t.Errorf("replicaSetReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_replicationControllerReady(t *testing.T) {
+ type args struct {
+ rc *corev1.ReplicationController
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "replicationController is ready",
+ args: args{
+ rc: newReplicationController("foo", true),
+ },
+ want: true,
+ },
+ {
+ name: "replicationController is not ready when generations are out of sync",
+ args: args{
+ rc: newReplicationController("foo", false),
+ },
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewReadyChecker(fake.NewClientset())
+ if got := c.replicationControllerReady(tt.args.rc); got != tt.want {
+ t.Errorf("replicationControllerReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_daemonSetReady(t *testing.T) {
+ type args struct {
+ ds *appsv1.DaemonSet
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "daemonset is ready",
+ args: args{
+ ds: newDaemonSet("foo", 0, 1, 1, 1, true),
+ },
+ want: true,
+ },
+ {
+ name: "daemonset is not ready",
+ args: args{
+ ds: newDaemonSet("foo", 0, 0, 1, 1, true),
+ },
+ want: false,
+ },
+ {
+ name: "daemonset pods have not been scheduled successfully",
+ args: args{
+ ds: newDaemonSet("foo", 0, 0, 1, 0, true),
+ },
+ want: false,
+ },
+ {
+ name: "daemonset is ready when maxUnavailable is set",
+ args: args{
+ ds: newDaemonSet("foo", 1, 1, 2, 2, true),
+ },
+ want: true,
+ },
+ {
+ name: "daemonset is not ready when generations are out of sync",
+ args: args{
+ ds: newDaemonSet("foo", 0, 1, 1, 1, false),
+ },
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewReadyChecker(fake.NewClientset())
+ if got := c.daemonSetReady(tt.args.ds); got != tt.want {
+ t.Errorf("daemonSetReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_statefulSetReady(t *testing.T) {
+ type args struct {
+ sts *appsv1.StatefulSet
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "statefulset is ready",
+ args: args{
+ sts: newStatefulSet("foo", 1, 0, 1, 1, true),
+ },
+ want: true,
+ },
+ {
+ name: "statefulset is not ready",
+ args: args{
+ sts: newStatefulSet("foo", 1, 0, 0, 1, true),
+ },
+ want: false,
+ },
+ {
+ name: "statefulset is ready when partition is specified",
+ args: args{
+ sts: newStatefulSet("foo", 2, 1, 2, 1, true),
+ },
+ want: true,
+ },
+ {
+ name: "statefulset is not ready when partition is set",
+ args: args{
+ sts: newStatefulSet("foo", 2, 1, 1, 0, true),
+ },
+ want: false,
+ },
+ {
+ name: "statefulset is ready when partition is set and no change in template",
+ args: args{
+ sts: newStatefulSet("foo", 2, 1, 2, 2, true),
+ },
+ want: true,
+ },
+ {
+ name: "statefulset is ready when partition is greater than replicas",
+ args: args{
+ sts: newStatefulSet("foo", 1, 2, 1, 1, true),
+ },
+ want: true,
+ },
+ {
+ name: "statefulset is not ready when generations are out of sync",
+ args: args{
+ sts: newStatefulSet("foo", 1, 0, 1, 1, false),
+ },
+ want: false,
+ },
+ {
+ name: "statefulset is ready when current revision for current replicas does not match update revision for updated replicas when using partition !=0",
+ args: args{
+ sts: newStatefulSetWithUpdateRevision("foo", 3, 2, 3, 3, "foo-bbbbbbb", true),
+ },
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewReadyChecker(fake.NewClientset())
+ if got := c.statefulSetReady(tt.args.sts); got != tt.want {
+ t.Errorf("statefulSetReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_podsReadyForObject(t *testing.T) {
+ type args struct {
+ namespace string
+ obj runtime.Object
+ }
+ tests := []struct {
+ name string
+ args args
+ existPods []corev1.Pod
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "pods ready for a replicaset",
+ args: args{
+ namespace: defaultNamespace,
+ obj: newReplicaSet("foo", 1, 1, true),
+ },
+ existPods: []corev1.Pod{
+ *newPodWithCondition("foo", corev1.ConditionTrue),
+ },
+ want: true,
+ wantErr: false,
+ },
+ {
+ name: "pods not ready for a replicaset",
+ args: args{
+ namespace: defaultNamespace,
+ obj: newReplicaSet("foo", 1, 1, true),
+ },
+ existPods: []corev1.Pod{
+ *newPodWithCondition("foo", corev1.ConditionFalse),
+ },
+ want: false,
+ wantErr: false,
+ },
+ {
+ name: "ReplicaSet not set",
+ args: args{
+ namespace: defaultNamespace,
+ obj: nil,
+ },
+ existPods: []corev1.Pod{
+ *newPodWithCondition("foo", corev1.ConditionFalse),
+ },
+ want: false,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewReadyChecker(fake.NewClientset())
+ for _, pod := range tt.existPods {
+ if _, err := c.client.CoreV1().Pods(defaultNamespace).Create(t.Context(), &pod, metav1.CreateOptions{}); err != nil {
+ t.Errorf("Failed to create Pod error: %v", err)
+ return
+ }
+ }
+ got, err := c.podsReadyForObject(t.Context(), tt.args.namespace, tt.args.obj)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("podsReadyForObject() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("podsReadyForObject() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_jobReady(t *testing.T) {
+ type args struct {
+ job *batchv1.Job
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "job is completed",
+ args: args{job: newJob("foo", 1, intToInt32(1), 1, 0)},
+ want: true,
+ wantErr: false,
+ },
+ {
+ name: "job is incomplete",
+ args: args{job: newJob("foo", 1, intToInt32(1), 0, 0)},
+ want: false,
+ wantErr: false,
+ },
+ {
+ name: "job is failed but within BackoffLimit",
+ args: args{job: newJob("foo", 1, intToInt32(1), 0, 1)},
+ want: false,
+ wantErr: false,
+ },
+ {
+ name: "job is completed with retry",
+ args: args{job: newJob("foo", 1, intToInt32(1), 1, 1)},
+ want: true,
+ wantErr: false,
+ },
+ {
+ name: "job is failed and beyond BackoffLimit",
+ args: args{job: newJob("foo", 1, intToInt32(1), 0, 2)},
+ want: false,
+ wantErr: true,
+ },
+ {
+ name: "job is completed single run",
+ args: args{job: newJob("foo", 0, intToInt32(1), 1, 0)},
+ want: true,
+ wantErr: false,
+ },
+ {
+ name: "job is failed single run",
+ args: args{job: newJob("foo", 0, intToInt32(1), 0, 1)},
+ want: false,
+ wantErr: true,
+ },
+ {
+ name: "job with null completions",
+ args: args{job: newJob("foo", 0, nil, 1, 0)},
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewReadyChecker(fake.NewClientset())
+ got, err := c.jobReady(tt.args.job)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("jobReady() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("jobReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_volumeReady(t *testing.T) {
+ type args struct {
+ v *corev1.PersistentVolumeClaim
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "pvc is bound",
+ args: args{
+ v: newPersistentVolumeClaim("foo", corev1.ClaimBound),
+ },
+ want: true,
+ },
+ {
+ name: "pvc is not ready",
+ args: args{
+ v: newPersistentVolumeClaim("foo", corev1.ClaimPending),
+ },
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewReadyChecker(fake.NewClientset())
+ if got := c.volumeReady(tt.args.v); got != tt.want {
+ t.Errorf("volumeReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_serviceReady(t *testing.T) {
+ type args struct {
+ service *corev1.Service
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "service type is of external name",
+ args: args{service: newService("foo", corev1.ServiceSpec{Type: corev1.ServiceTypeExternalName, ClusterIP: ""})},
+ want: true,
+ },
+ {
+ name: "service cluster ip is empty",
+ args: args{service: newService("foo", corev1.ServiceSpec{Type: corev1.ServiceTypeLoadBalancer, ClusterIP: ""})},
+ want: false,
+ },
+ {
+ name: "service has a cluster ip that is greater than 0",
+ args: args{service: newService("foo", corev1.ServiceSpec{Type: corev1.ServiceTypeLoadBalancer, ClusterIP: "bar", ExternalIPs: []string{"bar"}})},
+ want: true,
+ },
+ {
+ name: "service has a cluster ip that is less than 0 and ingress is nil",
+ args: args{service: newService("foo", corev1.ServiceSpec{Type: corev1.ServiceTypeLoadBalancer, ClusterIP: "bar"})},
+ want: false,
+ },
+ {
+ name: "service has a cluster ip that is less than 0 and ingress is nil",
+ args: args{service: newService("foo", corev1.ServiceSpec{Type: corev1.ServiceTypeClusterIP, ClusterIP: "bar"})},
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewReadyChecker(fake.NewClientset())
+ got := c.serviceReady(tt.args.service)
+ if got != tt.want {
+ t.Errorf("serviceReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_crdBetaReady(t *testing.T) {
+ type args struct {
+ crdBeta apiextv1beta1.CustomResourceDefinition
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "crdBeta type is Establish and Conditional is true",
+ args: args{crdBeta: newcrdBetaReady("foo", apiextv1beta1.CustomResourceDefinitionStatus{
+ Conditions: []apiextv1beta1.CustomResourceDefinitionCondition{
+ {
+ Type: apiextv1beta1.Established,
+ Status: apiextv1beta1.ConditionTrue,
+ },
+ },
+ })},
+ want: true,
+ },
+ {
+ name: "crdBeta type is Establish and Conditional is false",
+ args: args{crdBeta: newcrdBetaReady("foo", apiextv1beta1.CustomResourceDefinitionStatus{
+ Conditions: []apiextv1beta1.CustomResourceDefinitionCondition{
+ {
+ Type: apiextv1beta1.Established,
+ Status: apiextv1beta1.ConditionFalse,
+ },
+ },
+ })},
+ want: false,
+ },
+ {
+ name: "crdBeta type is NamesAccepted and Conditional is true",
+ args: args{crdBeta: newcrdBetaReady("foo", apiextv1beta1.CustomResourceDefinitionStatus{
+ Conditions: []apiextv1beta1.CustomResourceDefinitionCondition{
+ {
+ Type: apiextv1beta1.NamesAccepted,
+ Status: apiextv1beta1.ConditionTrue,
+ },
+ },
+ })},
+ want: false,
+ },
+ {
+ name: "crdBeta type is NamesAccepted and Conditional is false",
+ args: args{crdBeta: newcrdBetaReady("foo", apiextv1beta1.CustomResourceDefinitionStatus{
+ Conditions: []apiextv1beta1.CustomResourceDefinitionCondition{
+ {
+ Type: apiextv1beta1.NamesAccepted,
+ Status: apiextv1beta1.ConditionFalse,
+ },
+ },
+ })},
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewReadyChecker(fake.NewClientset())
+ got := c.crdBetaReady(tt.args.crdBeta)
+ if got != tt.want {
+ t.Errorf("crdBetaReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ReadyChecker_crdReady(t *testing.T) {
+ type args struct {
+ crdBeta apiextv1.CustomResourceDefinition
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "crdBeta type is Establish and Conditional is true",
+ args: args{crdBeta: newcrdReady("foo", apiextv1.CustomResourceDefinitionStatus{
+ Conditions: []apiextv1.CustomResourceDefinitionCondition{
+ {
+ Type: apiextv1.Established,
+ Status: apiextv1.ConditionTrue,
+ },
+ },
+ })},
+ want: true,
+ },
+ {
+ name: "crdBeta type is Establish and Conditional is false",
+ args: args{crdBeta: newcrdReady("foo", apiextv1.CustomResourceDefinitionStatus{
+ Conditions: []apiextv1.CustomResourceDefinitionCondition{
+ {
+ Type: apiextv1.Established,
+ Status: apiextv1.ConditionFalse,
+ },
+ },
+ })},
+ want: false,
+ },
+ {
+ name: "crdBeta type is NamesAccepted and Conditional is true",
+ args: args{crdBeta: newcrdReady("foo", apiextv1.CustomResourceDefinitionStatus{
+ Conditions: []apiextv1.CustomResourceDefinitionCondition{
+ {
+ Type: apiextv1.NamesAccepted,
+ Status: apiextv1.ConditionTrue,
+ },
+ },
+ })},
+ want: false,
+ },
+ {
+ name: "crdBeta type is NamesAccepted and Conditional is false",
+ args: args{crdBeta: newcrdReady("foo", apiextv1.CustomResourceDefinitionStatus{
+ Conditions: []apiextv1.CustomResourceDefinitionCondition{
+ {
+ Type: apiextv1.NamesAccepted,
+ Status: apiextv1.ConditionFalse,
+ },
+ },
+ })},
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := NewReadyChecker(fake.NewClientset())
+ got := c.crdReady(tt.args.crdBeta)
+ if got != tt.want {
+ t.Errorf("crdBetaReady() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func newStatefulSetWithUpdateRevision(name string, replicas, partition, readyReplicas, updatedReplicas int, updateRevision string, generationInSync bool) *appsv1.StatefulSet {
+ ss := newStatefulSet(name, replicas, partition, readyReplicas, updatedReplicas, generationInSync)
+ ss.Status.UpdateRevision = updateRevision
+ return ss
+}
+
+func newDaemonSet(name string, maxUnavailable, numberReady, desiredNumberScheduled, updatedNumberScheduled int, generationInSync bool) *appsv1.DaemonSet {
+ var generation, observedGeneration int64 = 1, 1
+ if !generationInSync {
+ generation = 2
+ }
+ return &appsv1.DaemonSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: defaultNamespace,
+ Generation: generation,
+ },
+ Spec: appsv1.DaemonSetSpec{
+ UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
+ Type: appsv1.RollingUpdateDaemonSetStrategyType,
+ RollingUpdate: &appsv1.RollingUpdateDaemonSet{
+ MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(maxUnavailable); return &i }(),
+ },
+ },
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}},
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Labels: map[string]string{"name": name},
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Image: "nginx",
+ },
+ },
+ },
+ },
+ },
+ Status: appsv1.DaemonSetStatus{
+ DesiredNumberScheduled: int32(desiredNumberScheduled),
+ NumberReady: int32(numberReady),
+ UpdatedNumberScheduled: int32(updatedNumberScheduled),
+ ObservedGeneration: observedGeneration,
+ },
+ }
+}
+
+func newStatefulSet(name string, replicas, partition, readyReplicas, updatedReplicas int, generationInSync bool) *appsv1.StatefulSet {
+ var generation, observedGeneration int64 = 1, 1
+ if !generationInSync {
+ generation = 2
+ }
+ return &appsv1.StatefulSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: defaultNamespace,
+ Generation: generation,
+ },
+ Spec: appsv1.StatefulSetSpec{
+ UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
+ Type: appsv1.RollingUpdateStatefulSetStrategyType,
+ RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{
+ Partition: intToInt32(partition),
+ },
+ },
+ Replicas: intToInt32(replicas),
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}},
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Labels: map[string]string{"name": name},
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Image: "nginx",
+ },
+ },
+ },
+ },
+ },
+ Status: appsv1.StatefulSetStatus{
+ UpdatedReplicas: int32(updatedReplicas),
+ ReadyReplicas: int32(readyReplicas),
+ ObservedGeneration: observedGeneration,
+ },
+ }
+}
+
+func newDeployment(name string, replicas, maxSurge, maxUnavailable int, generationInSync bool) *appsv1.Deployment {
+ var generation, observedGeneration int64 = 1, 1
+ if !generationInSync {
+ generation = 2
+ }
+ return &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: defaultNamespace,
+ Generation: generation,
+ },
+ Spec: appsv1.DeploymentSpec{
+ Strategy: appsv1.DeploymentStrategy{
+ Type: appsv1.RollingUpdateDeploymentStrategyType,
+ RollingUpdate: &appsv1.RollingUpdateDeployment{
+ MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(maxUnavailable); return &i }(),
+ MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(maxSurge); return &i }(),
+ },
+ },
+ Replicas: intToInt32(replicas),
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}},
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Labels: map[string]string{"name": name},
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Image: "nginx",
+ },
+ },
+ },
+ },
+ },
+ Status: appsv1.DeploymentStatus{
+ ObservedGeneration: observedGeneration,
+ },
+ }
+}
+
+func newReplicationController(name string, generationInSync bool) *corev1.ReplicationController {
+ var generation, observedGeneration int64 = 1, 1
+ if !generationInSync {
+ generation = 2
+ }
+ return &corev1.ReplicationController{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Generation: generation,
+ },
+ Status: corev1.ReplicationControllerStatus{
+ ObservedGeneration: observedGeneration,
+ },
+ }
+}
+
+func newReplicaSet(name string, replicas int, readyReplicas int, generationInSync bool) *appsv1.ReplicaSet {
+ d := newDeployment(name, replicas, 0, 0, generationInSync)
+ return &appsv1.ReplicaSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: defaultNamespace,
+ Labels: d.Spec.Selector.MatchLabels,
+ OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, d.GroupVersionKind())},
+ Generation: d.Generation,
+ },
+ Spec: appsv1.ReplicaSetSpec{
+ Selector: d.Spec.Selector,
+ Replicas: intToInt32(replicas),
+ Template: d.Spec.Template,
+ },
+ Status: appsv1.ReplicaSetStatus{
+ ReadyReplicas: int32(readyReplicas),
+ ObservedGeneration: d.Status.ObservedGeneration,
+ },
+ }
+}
+
+func newPodWithCondition(name string, podReadyCondition corev1.ConditionStatus) *corev1.Pod {
+ return &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: defaultNamespace,
+ Labels: map[string]string{"name": name},
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Image: "nginx",
+ },
+ },
+ },
+ Status: corev1.PodStatus{
+ Conditions: []corev1.PodCondition{
+ {
+ Type: corev1.PodReady,
+ Status: podReadyCondition,
+ },
+ },
+ },
+ }
+}
+
+func newPersistentVolumeClaim(name string, phase corev1.PersistentVolumeClaimPhase) *corev1.PersistentVolumeClaim {
+ return &corev1.PersistentVolumeClaim{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: defaultNamespace,
+ },
+ Status: corev1.PersistentVolumeClaimStatus{
+ Phase: phase,
+ },
+ }
+}
+
+func newJob(name string, backoffLimit int, completions *int32, succeeded int, failed int) *batchv1.Job {
+ return &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: defaultNamespace,
+ },
+ Spec: batchv1.JobSpec{
+ BackoffLimit: intToInt32(backoffLimit),
+ Completions: completions,
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Labels: map[string]string{"name": name},
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Image: "nginx",
+ },
+ },
+ },
+ },
+ },
+ Status: batchv1.JobStatus{
+ Succeeded: int32(succeeded),
+ Failed: int32(failed),
+ },
+ }
+}
+
+func newService(name string, serviceSpec corev1.ServiceSpec) *corev1.Service {
+ return &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: defaultNamespace,
+ },
+ Spec: serviceSpec,
+ Status: corev1.ServiceStatus{
+ LoadBalancer: corev1.LoadBalancerStatus{
+ Ingress: nil,
+ },
+ },
+ }
+}
+
+func newcrdBetaReady(name string, crdBetaStatus apiextv1beta1.CustomResourceDefinitionStatus) apiextv1beta1.CustomResourceDefinition {
+ return apiextv1beta1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: defaultNamespace,
+ },
+ Spec: apiextv1beta1.CustomResourceDefinitionSpec{},
+ Status: crdBetaStatus,
+ }
+}
+
+func newcrdReady(name string, crdBetaStatus apiextv1.CustomResourceDefinitionStatus) apiextv1.CustomResourceDefinition {
+ return apiextv1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: defaultNamespace,
+ },
+ Spec: apiextv1.CustomResourceDefinitionSpec{},
+ Status: crdBetaStatus,
+ }
+}
+
+func intToInt32(i int) *int32 {
+ i32 := int32(i)
+ return &i32
+}
diff --git a/helm/pkg/kube/resource.go b/helm/pkg/kube/resource.go
new file mode 100644
index 000000000..d88b171f0
--- /dev/null
+++ b/helm/pkg/kube/resource.go
@@ -0,0 +1,85 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v4/pkg/kube"
+
+import "k8s.io/cli-runtime/pkg/resource"
+
+// ResourceList provides convenience methods for comparing collections of Infos.
+type ResourceList []*resource.Info
+
+// Append adds an Info to the Result.
+func (r *ResourceList) Append(val *resource.Info) {
+ *r = append(*r, val)
+}
+
+// Visit implements resource.Visitor. The visitor stops if fn returns an error.
+func (r ResourceList) Visit(fn resource.VisitorFunc) error {
+ for _, i := range r {
+ if err := fn(i, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Filter returns a new Result with Infos that satisfy the predicate fn.
+func (r ResourceList) Filter(fn func(*resource.Info) bool) ResourceList {
+ var result ResourceList
+ for _, i := range r {
+ if fn(i) {
+ result.Append(i)
+ }
+ }
+ return result
+}
+
+// Get returns the Info from the result that matches the name and kind.
+func (r ResourceList) Get(info *resource.Info) *resource.Info {
+ for _, i := range r {
+ if isMatchingInfo(i, info) {
+ return i
+ }
+ }
+ return nil
+}
+
+// Contains checks to see if an object exists.
+func (r ResourceList) Contains(info *resource.Info) bool {
+ for _, i := range r {
+ if isMatchingInfo(i, info) {
+ return true
+ }
+ }
+ return false
+}
+
+// Difference will return a new Result with objects not contained in rs.
+func (r ResourceList) Difference(rs ResourceList) ResourceList {
+ return r.Filter(func(info *resource.Info) bool {
+ return !rs.Contains(info)
+ })
+}
+
+// Intersect will return a new Result with objects contained in both Results.
+func (r ResourceList) Intersect(rs ResourceList) ResourceList {
+ return r.Filter(rs.Contains)
+}
+
+// isMatchingInfo returns true if infos match on Name and GroupVersionKind.
+func isMatchingInfo(a, b *resource.Info) bool {
+ return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind == b.Mapping.GroupVersionKind
+}
diff --git a/helm/pkg/kube/resource_policy.go b/helm/pkg/kube/resource_policy.go
new file mode 100644
index 000000000..fb1089785
--- /dev/null
+++ b/helm/pkg/kube/resource_policy.go
@@ -0,0 +1,27 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v4/pkg/kube"
+
+// ResourcePolicyAnno is the annotation name for a resource policy
+const ResourcePolicyAnno = "helm.sh/resource-policy"
+
+// KeepPolicy is the resource policy type for keep
+//
+// This resource policy type allows resources to skip being deleted
+//
+// during an uninstallRelease action.
+const KeepPolicy = "keep"
diff --git a/helm/pkg/kube/resource_test.go b/helm/pkg/kube/resource_test.go
new file mode 100644
index 000000000..ccc613c1b
--- /dev/null
+++ b/helm/pkg/kube/resource_test.go
@@ -0,0 +1,100 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v4/pkg/kube"
+
+import (
+ "testing"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/cli-runtime/pkg/resource"
+)
+
+func TestResourceList(t *testing.T) {
+ mapping := &meta.RESTMapping{
+ Resource: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "pod"},
+ }
+
+ info := func(name string) *resource.Info {
+ return &resource.Info{Name: name, Mapping: mapping}
+ }
+
+ var r1, r2 ResourceList
+ r1 = []*resource.Info{info("foo"), info("bar")}
+ r2 = []*resource.Info{info("bar")}
+
+ if r1.Get(info("bar")).Mapping.Resource.Resource != "pod" {
+ t.Error("expected get pod")
+ }
+
+ diff := r1.Difference(r2)
+ if len(diff) != 1 {
+ t.Error("expected 1 result")
+ }
+
+ if !diff.Contains(info("foo")) {
+ t.Error("expected diff to return foo")
+ }
+
+ inter := r1.Intersect(r2)
+ if len(inter) != 1 {
+ t.Error("expected 1 result")
+ }
+
+ if !inter.Contains(info("bar")) {
+ t.Error("expected intersect to return bar")
+ }
+}
+
+func TestIsMatchingInfo(t *testing.T) {
+ gvk := schema.GroupVersionKind{Group: "group1", Version: "version1", Kind: "pod"}
+ resourceInfo := resource.Info{Name: "name1", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvk}}
+
+ gvkDiffGroup := schema.GroupVersionKind{Group: "diff", Version: "version1", Kind: "pod"}
+ resourceInfoDiffGroup := resource.Info{Name: "name1", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvkDiffGroup}}
+ if isMatchingInfo(&resourceInfo, &resourceInfoDiffGroup) {
+ t.Error("expected resources not equal")
+ }
+
+ gvkDiffVersion := schema.GroupVersionKind{Group: "group1", Version: "diff", Kind: "pod"}
+ resourceInfoDiffVersion := resource.Info{Name: "name1", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvkDiffVersion}}
+ if isMatchingInfo(&resourceInfo, &resourceInfoDiffVersion) {
+ t.Error("expected resources not equal")
+ }
+
+ gvkDiffKind := schema.GroupVersionKind{Group: "group1", Version: "version1", Kind: "deployment"}
+ resourceInfoDiffKind := resource.Info{Name: "name1", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvkDiffKind}}
+ if isMatchingInfo(&resourceInfo, &resourceInfoDiffKind) {
+ t.Error("expected resources not equal")
+ }
+
+ resourceInfoDiffName := resource.Info{Name: "diff", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvk}}
+ if isMatchingInfo(&resourceInfo, &resourceInfoDiffName) {
+ t.Error("expected resources not equal")
+ }
+
+ resourceInfoDiffNamespace := resource.Info{Name: "name1", Namespace: "diff", Mapping: &meta.RESTMapping{GroupVersionKind: gvk}}
+ if isMatchingInfo(&resourceInfo, &resourceInfoDiffNamespace) {
+ t.Error("expected resources not equal")
+ }
+
+ gvkEqual := schema.GroupVersionKind{Group: "group1", Version: "version1", Kind: "pod"}
+ resourceInfoEqual := resource.Info{Name: "name1", Namespace: "namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvkEqual}}
+ if !isMatchingInfo(&resourceInfo, &resourceInfoEqual) {
+ t.Error("expected resources to be equal")
+ }
+}
diff --git a/helm/pkg/kube/result.go b/helm/pkg/kube/result.go
new file mode 100644
index 000000000..c3e171c2e
--- /dev/null
+++ b/helm/pkg/kube/result.go
@@ -0,0 +1,28 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube
+
+// Result contains the information of created, updated, and deleted resources
+// for various kube API calls along with helper methods for using those
+// resources
+type Result struct {
+ Created ResourceList
+ Updated ResourceList
+ Deleted ResourceList
+}
+
+// If needed, we can add methods to the Result type for things like diffing
diff --git a/helm/pkg/kube/roundtripper.go b/helm/pkg/kube/roundtripper.go
new file mode 100644
index 000000000..52cb5bad2
--- /dev/null
+++ b/helm/pkg/kube/roundtripper.go
@@ -0,0 +1,80 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "net/http"
+ "strings"
+)
+
+type RetryingRoundTripper struct {
+ Wrapped http.RoundTripper
+}
+
+func (rt *RetryingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ return rt.roundTrip(req, 1, nil)
+}
+
+func (rt *RetryingRoundTripper) roundTrip(req *http.Request, retry int, prevResp *http.Response) (*http.Response, error) {
+ if retry < 0 {
+ return prevResp, nil
+ }
+ resp, rtErr := rt.Wrapped.RoundTrip(req)
+ if rtErr != nil {
+ return resp, rtErr
+ }
+ if resp.StatusCode < 500 {
+ return resp, rtErr
+ }
+ if resp.Header.Get("content-type") != "application/json" {
+ return resp, rtErr
+ }
+ b, err := io.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ return resp, err
+ }
+
+ var ke kubernetesError
+ r := bytes.NewReader(b)
+ err = json.NewDecoder(r).Decode(&ke)
+ r.Seek(0, io.SeekStart)
+ resp.Body = io.NopCloser(r)
+ if err != nil {
+ return resp, err
+ }
+ if ke.Code < 500 {
+ return resp, nil
+ }
+ // Matches messages like "etcdserver: leader changed"
+ if strings.HasSuffix(ke.Message, "etcdserver: leader changed") {
+ return rt.roundTrip(req, retry-1, resp)
+ }
+ // Matches messages like "rpc error: code = Unknown desc = raft proposal dropped"
+ if strings.HasSuffix(ke.Message, "raft proposal dropped") {
+ return rt.roundTrip(req, retry-1, resp)
+ }
+ return resp, nil
+}
+
+type kubernetesError struct {
+ Message string `json:"message"`
+ Code int `json:"code"`
+}
diff --git a/helm/pkg/kube/roundtripper_test.go b/helm/pkg/kube/roundtripper_test.go
new file mode 100644
index 000000000..96602c1f4
--- /dev/null
+++ b/helm/pkg/kube/roundtripper_test.go
@@ -0,0 +1,161 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube
+
+import (
+ "encoding/json"
+ "errors"
+ "io"
+ "net/http"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type fakeRoundTripper struct {
+ resp *http.Response
+ err error
+ calls int
+}
+
+func (f *fakeRoundTripper) RoundTrip(_ *http.Request) (*http.Response, error) {
+ f.calls++
+ return f.resp, f.err
+}
+
+func newRespWithBody(statusCode int, contentType, body string) *http.Response {
+ return &http.Response{
+ StatusCode: statusCode,
+ Header: http.Header{"Content-Type": []string{contentType}},
+ Body: io.NopCloser(strings.NewReader(body)),
+ }
+}
+
+func TestRetryingRoundTripper_RoundTrip(t *testing.T) {
+ marshalErr := func(code int, msg string) string {
+ b, _ := json.Marshal(kubernetesError{
+ Code: code,
+ Message: msg,
+ })
+ return string(b)
+ }
+
+ tests := []struct {
+ name string
+ resp *http.Response
+ err error
+ expectedCalls int
+ expectedErr string
+ expectedCode int
+ }{
+ {
+ name: "no retry, status < 500 returns response",
+ resp: newRespWithBody(200, "application/json", `{"message":"ok","code":200}`),
+ err: nil,
+ expectedCalls: 1,
+ expectedCode: 200,
+ },
+ {
+ name: "error from wrapped RoundTripper propagates",
+ resp: nil,
+ err: errors.New("wrapped error"),
+ expectedCalls: 1,
+ expectedErr: "wrapped error",
+ },
+ {
+ name: "no retry, content-type not application/json",
+ resp: newRespWithBody(500, "text/plain", "server error"),
+ err: nil,
+ expectedCalls: 1,
+ expectedCode: 500,
+ },
+ {
+ name: "error reading body returns error",
+ resp: &http.Response{
+ StatusCode: http.StatusInternalServerError,
+ Header: http.Header{"Content-Type": []string{"application/json"}},
+ Body: &errReader{},
+ },
+ err: nil,
+ expectedCalls: 1,
+ expectedErr: "read error",
+ },
+ {
+ name: "error decoding JSON returns error",
+ resp: newRespWithBody(500, "application/json", `invalid-json`),
+ err: nil,
+ expectedCalls: 1,
+ expectedErr: "invalid character",
+ },
+ {
+ name: "retry on etcdserver leader changed message",
+ resp: newRespWithBody(500, "application/json", marshalErr(500, "some error etcdserver: leader changed")),
+ err: nil,
+ expectedCalls: 2,
+ expectedCode: 500,
+ },
+ {
+ name: "retry on raft proposal dropped message",
+ resp: newRespWithBody(500, "application/json", marshalErr(500, "rpc error: code = Unknown desc = raft proposal dropped")),
+ err: nil,
+ expectedCalls: 2,
+ expectedCode: 500,
+ },
+ {
+ name: "no retry on other error message",
+ resp: newRespWithBody(500, "application/json", marshalErr(500, "other server error")),
+ err: nil,
+ expectedCalls: 1,
+ expectedCode: 500,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ fakeRT := &fakeRoundTripper{
+ resp: tt.resp,
+ err: tt.err,
+ }
+ rt := RetryingRoundTripper{
+ Wrapped: fakeRT,
+ }
+ req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil)
+ resp, err := rt.RoundTrip(req)
+
+ if tt.expectedErr != "" {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), tt.expectedErr)
+ return
+ }
+ assert.NoError(t, err)
+
+ assert.Equal(t, tt.expectedCode, resp.StatusCode)
+ assert.Equal(t, tt.expectedCalls, fakeRT.calls)
+ })
+ }
+}
+
+type errReader struct{}
+
+func (e *errReader) Read(_ []byte) (int, error) {
+ return 0, errors.New("read error")
+}
+
+func (e *errReader) Close() error {
+ return nil
+}
diff --git a/helm/pkg/kube/statuswait.go b/helm/pkg/kube/statuswait.go
new file mode 100644
index 000000000..6915852b7
--- /dev/null
+++ b/helm/pkg/kube/statuswait.go
@@ -0,0 +1,291 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v3/pkg/kube"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "sort"
+ "time"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/aggregator"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/collector"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/watcher"
+ "github.com/fluxcd/cli-utils/pkg/object"
+ appsv1 "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/client-go/dynamic"
+ watchtools "k8s.io/client-go/tools/watch"
+
+ helmStatusReaders "helm.sh/helm/v4/internal/statusreaders"
+)
+
+type statusWaiter struct {
+ client dynamic.Interface
+ restMapper meta.RESTMapper
+ ctx context.Context
+ readers []engine.StatusReader
+}
+
+// DefaultStatusWatcherTimeout is the timeout used by the status waiter when a
+// zero timeout is provided. This prevents callers from accidentally passing a
+// zero value (which would immediately cancel the context) and getting
+// "context deadline exceeded" errors. SDK callers can rely on this default
+// when they don't set a timeout.
+var DefaultStatusWatcherTimeout = 30 * time.Second
+
+func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) {
+ return &status.Result{
+ Status: status.CurrentStatus,
+ Message: "Resource is current",
+ }, nil
+}
+
+func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error {
+ if timeout == 0 {
+ timeout = DefaultStatusWatcherTimeout
+ }
+ ctx, cancel := w.contextWithTimeout(timeout)
+ defer cancel()
+ slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
+ sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
+ jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
+ podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper)
+ // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks.
+ // If custom readers are defined they can be used as Helm hooks support any resource.
+ // We put them in front since the DelegatingStatusReader uses the first reader that matches.
+ genericSR := statusreaders.NewGenericStatusReader(w.restMapper, alwaysReady)
+
+ sr := &statusreaders.DelegatingStatusReader{
+ StatusReaders: append(w.readers, jobSR, podSR, genericSR),
+ }
+ sw.StatusReader = sr
+ return w.wait(ctx, resourceList, sw)
+}
+
+func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error {
+ if timeout == 0 {
+ timeout = DefaultStatusWatcherTimeout
+ }
+ ctx, cancel := w.contextWithTimeout(timeout)
+ defer cancel()
+ slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
+ sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
+ sw.StatusReader = statusreaders.NewStatusReader(w.restMapper, w.readers...)
+ return w.wait(ctx, resourceList, sw)
+}
+
+func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error {
+ if timeout == 0 {
+ timeout = DefaultStatusWatcherTimeout
+ }
+ ctx, cancel := w.contextWithTimeout(timeout)
+ defer cancel()
+ slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout)
+ sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
+ newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
+ readers := append([]engine.StatusReader(nil), w.readers...)
+ readers = append(readers, newCustomJobStatusReader)
+ customSR := statusreaders.NewStatusReader(w.restMapper, readers...)
+ sw.StatusReader = customSR
+ return w.wait(ctx, resourceList, sw)
+}
+
+func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error {
+ if timeout == 0 {
+ timeout = DefaultStatusWatcherTimeout
+ }
+ ctx, cancel := w.contextWithTimeout(timeout)
+ defer cancel()
+ slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout)
+ sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
+ return w.waitForDelete(ctx, resourceList, sw)
+}
+
+func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error {
+ cancelCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ resources := []object.ObjMetadata{}
+ for _, resource := range resourceList {
+ obj, err := object.RuntimeToObjMeta(resource.Object)
+ if err != nil {
+ return err
+ }
+ resources = append(resources, obj)
+ }
+ eventCh := sw.Watch(cancelCtx, resources, watcher.Options{
+ RESTScopeStrategy: watcher.RESTScopeNamespace,
+ })
+ statusCollector := collector.NewResourceStatusCollector(resources)
+ done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus))
+ <-done
+
+ if statusCollector.Error != nil {
+ return statusCollector.Error
+ }
+
+ errs := []error{}
+ for _, id := range resources {
+ rs := statusCollector.ResourceStatuses[id]
+ if rs.Status == status.NotFoundStatus || rs.Status == status.UnknownStatus {
+ continue
+ }
+ errs = append(errs, fmt.Errorf("resource %s/%s/%s still exists. status: %s, message: %s",
+ rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, rs.Identifier.Name, rs.Status, rs.Message))
+ }
+ if err := ctx.Err(); err != nil {
+ errs = append(errs, err)
+ }
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errors.Join(errs...)
+ }
+}
+
+func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error {
+ cancelCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ resources := []object.ObjMetadata{}
+ for _, resource := range resourceList {
+ switch value := AsVersioned(resource).(type) {
+ case *appsv1.Deployment:
+ if value.Spec.Paused {
+ continue
+ }
+ }
+ obj, err := object.RuntimeToObjMeta(resource.Object)
+ if err != nil {
+ return err
+ }
+ resources = append(resources, obj)
+ }
+
+ eventCh := sw.Watch(cancelCtx, resources, watcher.Options{
+ RESTScopeStrategy: watcher.RESTScopeNamespace,
+ })
+ statusCollector := collector.NewResourceStatusCollector(resources)
+ done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus))
+ <-done
+
+ if statusCollector.Error != nil {
+ return statusCollector.Error
+ }
+
+ errs := []error{}
+ for _, id := range resources {
+ rs := statusCollector.ResourceStatuses[id]
+ if rs.Status == status.CurrentStatus {
+ continue
+ }
+ errs = append(errs, fmt.Errorf("resource %s/%s/%s not ready. status: %s, message: %s",
+ rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, rs.Identifier.Name, rs.Status, rs.Message))
+ }
+ if err := ctx.Err(); err != nil {
+ errs = append(errs, err)
+ }
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errors.Join(errs...)
+ }
+}
+
+func (w *statusWaiter) contextWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) {
+ return contextWithTimeout(w.ctx, timeout)
+}
+
+func contextWithTimeout(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ return watchtools.ContextWithOptionalTimeout(ctx, timeout)
+}
+
+func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc {
+ return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) {
+ var rss []*event.ResourceStatus
+ var nonDesiredResources []*event.ResourceStatus
+ for _, rs := range statusCollector.ResourceStatuses {
+ if rs == nil {
+ continue
+ }
+ // If a resource is already deleted before waiting has started, it will show as unknown.
+ // This check ensures we don't wait forever for a resource that is already deleted
+ if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus {
+ continue
+ }
+ // Failed is a terminal state. This check ensures we don't wait forever for a resource
+ // that has already failed, as intervention is required to resolve the failure.
+ if rs.Status == status.FailedStatus && desired == status.CurrentStatus {
+ continue
+ }
+ rss = append(rss, rs)
+ if rs.Status != desired {
+ nonDesiredResources = append(nonDesiredResources, rs)
+ }
+ }
+
+ if aggregator.AggregateStatus(rss, desired) == desired {
+ slog.Debug("all resources achieved desired status", "desiredStatus", desired, "resourceCount", len(rss))
+ cancel()
+ return
+ }
+
+ if len(nonDesiredResources) > 0 {
+ // Log a single resource so the user knows what they're waiting for without an overwhelming amount of output
+ sort.Slice(nonDesiredResources, func(i, j int) bool {
+ return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name
+ })
+ first := nonDesiredResources[0]
+ slog.Debug("waiting for resource", "namespace", first.Identifier.Namespace, "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status)
+ }
+ }
+}
+
+type hookOnlyWaiter struct {
+ sw *statusWaiter
+}
+
+func (w *hookOnlyWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error {
+ return w.sw.WatchUntilReady(resourceList, timeout)
+}
+
+func (w *hookOnlyWaiter) Wait(_ ResourceList, _ time.Duration) error {
+ return nil
+}
+
+func (w *hookOnlyWaiter) WaitWithJobs(_ ResourceList, _ time.Duration) error {
+ return nil
+}
+
+func (w *hookOnlyWaiter) WaitForDelete(_ ResourceList, _ time.Duration) error {
+ return nil
+}
diff --git a/helm/pkg/kube/statuswait_test.go b/helm/pkg/kube/statuswait_test.go
new file mode 100644
index 000000000..781e254a9
--- /dev/null
+++ b/helm/pkg/kube/statuswait_test.go
@@ -0,0 +1,1342 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v4/pkg/kube"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
+ "github.com/fluxcd/cli-utils/pkg/kstatus/status"
+ "github.com/fluxcd/cli-utils/pkg/object"
+ "github.com/fluxcd/cli-utils/pkg/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ appsv1 "k8s.io/api/apps/v1"
+ batchv1 "k8s.io/api/batch/v1"
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/yaml"
+ "k8s.io/apimachinery/pkg/watch"
+ dynamicfake "k8s.io/client-go/dynamic/fake"
+ clienttesting "k8s.io/client-go/testing"
+ "k8s.io/kubectl/pkg/scheme"
+)
+
+var podCurrentManifest = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: current-pod
+ namespace: ns
+status:
+ conditions:
+ - type: Ready
+ status: "True"
+ phase: Running
+`
+
+var podNoStatusManifest = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: in-progress-pod
+ namespace: ns
+`
+
+var jobNoStatusManifest = `
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: test
+ namespace: qual
+ generation: 1
+`
+
+var jobReadyManifest = `
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: ready-not-complete
+ namespace: default
+ generation: 1
+status:
+ startTime: 2025-02-06T16:34:20-05:00
+ active: 1
+ ready: 1
+`
+
+var jobCompleteManifest = `
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: test
+ namespace: qual
+ generation: 1
+status:
+ succeeded: 1
+ active: 0
+ conditions:
+ - type: Complete
+ status: "True"
+`
+
+var jobFailedManifest = `
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: failed-job
+ namespace: default
+ generation: 1
+status:
+ failed: 1
+ active: 0
+ conditions:
+ - type: Failed
+ status: "True"
+ reason: BackoffLimitExceeded
+ message: "Job has reached the specified backoff limit"
+`
+
+var podCompleteManifest = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: good-pod
+ namespace: ns
+status:
+ phase: Succeeded
+`
+
+var pausedDeploymentManifest = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: paused
+ namespace: ns-1
+ generation: 1
+spec:
+ paused: true
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.19.6
+ ports:
+ - containerPort: 80
+`
+
+var notReadyDeploymentManifest = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: not-ready
+ namespace: ns-1
+ generation: 1
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.19.6
+ ports:
+ - containerPort: 80
+`
+
+var podNamespace1Manifest = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-ns1
+ namespace: namespace-1
+status:
+ conditions:
+ - type: Ready
+ status: "True"
+ phase: Running
+`
+
+var podNamespace2Manifest = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-ns2
+ namespace: namespace-2
+status:
+ conditions:
+ - type: Ready
+ status: "True"
+ phase: Running
+`
+
+var podNamespace1NoStatusManifest = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-ns1
+ namespace: namespace-1
+`
+
+var jobNamespace1CompleteManifest = `
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: job-ns1
+ namespace: namespace-1
+ generation: 1
+status:
+ succeeded: 1
+ active: 0
+ conditions:
+ - type: Complete
+ status: "True"
+`
+
+var podNamespace2SucceededManifest = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-ns2
+ namespace: namespace-2
+status:
+ phase: Succeeded
+`
+
+var clusterRoleManifest = `
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: test-cluster-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get", "list"]
+`
+
+var namespaceManifest = `
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: test-namespace
+`
+
+func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured) schema.GroupVersionResource {
+ t.Helper()
+ gvk := obj.GroupVersionKind()
+ mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+ require.NoError(t, err)
+ return mapping.Resource
+}
+
+func getRuntimeObjFromManifests(t *testing.T, manifests []string) []runtime.Object {
+ t.Helper()
+ objects := []runtime.Object{}
+ for _, manifest := range manifests {
+ m := make(map[string]interface{})
+ err := yaml.Unmarshal([]byte(manifest), &m)
+ assert.NoError(t, err)
+ resource := &unstructured.Unstructured{Object: m}
+ objects = append(objects, resource)
+ }
+ return objects
+}
+
+func getResourceListFromRuntimeObjs(t *testing.T, c *Client, objs []runtime.Object) ResourceList {
+ t.Helper()
+ resourceList := ResourceList{}
+ for _, obj := range objs {
+ list, err := c.Build(objBody(obj), false)
+ assert.NoError(t, err)
+ resourceList = append(resourceList, list...)
+ }
+ return resourceList
+}
+
+func TestStatusWaitForDelete(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ manifestsToCreate []string
+ manifestsToDelete []string
+ expectErrStrs []string
+ }{
+ {
+ name: "wait for pod to be deleted",
+ manifestsToCreate: []string{podCurrentManifest},
+ manifestsToDelete: []string{podCurrentManifest},
+ expectErrStrs: nil,
+ },
+ {
+ name: "error when not all objects are deleted",
+ manifestsToCreate: []string{jobCompleteManifest, podCurrentManifest},
+ manifestsToDelete: []string{jobCompleteManifest},
+ expectErrStrs: []string{"resource Pod/ns/current-pod still exists. status: Current", "context deadline exceeded"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ timeout := time.Second
+ timeUntilPodDelete := time.Millisecond * 500
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ statusWaiter := statusWaiter{
+ restMapper: fakeMapper,
+ client: fakeClient,
+ }
+ objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate)
+ for _, objToCreate := range objsToCreate {
+ u := objToCreate.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ objsToDelete := getRuntimeObjFromManifests(t, tt.manifestsToDelete)
+ for _, objToDelete := range objsToDelete {
+ u := objToDelete.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ go func(gvr schema.GroupVersionResource, u *unstructured.Unstructured) {
+ time.Sleep(timeUntilPodDelete)
+ err := fakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName())
+ assert.NoError(t, err)
+ }(gvr, u)
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objsToCreate)
+ err := statusWaiter.WaitForDelete(resourceList, timeout)
+ if tt.expectErrStrs != nil {
+ require.Error(t, err)
+ for _, expectedErrStr := range tt.expectErrStrs {
+ assert.Contains(t, err.Error(), expectedErrStr)
+ }
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestStatusWaitForDeleteNonExistentObject(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ timeout := time.Second
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ )
+ statusWaiter := statusWaiter{
+ restMapper: fakeMapper,
+ client: fakeClient,
+ }
+ // Don't create the object to test that the wait for delete works when the object doesn't exist
+ objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest})
+ resourceList := getResourceListFromRuntimeObjs(t, c, objManifest)
+ err := statusWaiter.WaitForDelete(resourceList, timeout)
+ assert.NoError(t, err)
+}
+
+func TestStatusWait(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ expectErrStrs []string
+ waitForJobs bool
+ }{
+ {
+ name: "Job is not complete",
+ objManifests: []string{jobNoStatusManifest},
+ expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"},
+ waitForJobs: true,
+ },
+ {
+ name: "Job is ready but not complete",
+ objManifests: []string{jobReadyManifest},
+ expectErrStrs: nil,
+ waitForJobs: false,
+ },
+ {
+ name: "Pod is ready",
+ objManifests: []string{podCurrentManifest},
+ },
+ {
+ name: "one of the pods never becomes ready",
+ objManifests: []string{podNoStatusManifest, podCurrentManifest},
+ expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: InProgress", "context deadline exceeded"},
+ },
+ {
+ name: "paused deployment passes",
+ objManifests: []string{pausedDeploymentManifest},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ appsv1.SchemeGroupVersion.WithKind("Deployment"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ statusWaiter := statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := statusWaiter.Wait(resourceList, time.Second*3)
+ if tt.expectErrStrs != nil {
+ require.Error(t, err)
+ for _, expectedErrStr := range tt.expectErrStrs {
+ assert.Contains(t, err.Error(), expectedErrStr)
+ }
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestWaitForJobComplete(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ expectErrStrs []string
+ }{
+ {
+ name: "Job is complete",
+ objManifests: []string{jobCompleteManifest},
+ },
+ {
+ name: "Job is not ready",
+ objManifests: []string{jobNoStatusManifest},
+ expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"},
+ },
+ {
+ name: "Job is ready but not complete",
+ objManifests: []string{jobReadyManifest},
+ expectErrStrs: []string{"resource Job/default/ready-not-complete not ready. status: InProgress", "context deadline exceeded"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ statusWaiter := statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := statusWaiter.WaitWithJobs(resourceList, time.Second*3)
+ if tt.expectErrStrs != nil {
+ require.Error(t, err)
+ for _, expectedErrStr := range tt.expectErrStrs {
+ assert.Contains(t, err.Error(), expectedErrStr)
+ }
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestWatchForReady(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ expectErrStrs []string
+ }{
+ {
+ name: "succeeds if pod and job are complete",
+ objManifests: []string{jobCompleteManifest, podCompleteManifest},
+ },
+ {
+ name: "succeeds when a resource that's not a pod or job is not ready",
+ objManifests: []string{notReadyDeploymentManifest},
+ },
+ {
+ name: "Fails if job is not complete",
+ objManifests: []string{jobReadyManifest},
+ expectErrStrs: []string{"resource Job/default/ready-not-complete not ready. status: InProgress", "context deadline exceeded"},
+ },
+ {
+ name: "Fails if pod is not complete",
+ objManifests: []string{podCurrentManifest},
+ expectErrStrs: []string{"resource Pod/ns/current-pod not ready. status: InProgress", "context deadline exceeded"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ appsv1.SchemeGroupVersion.WithKind("Deployment"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ statusWaiter := statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := statusWaiter.WatchUntilReady(resourceList, time.Second*3)
+ if tt.expectErrStrs != nil {
+ require.Error(t, err)
+ for _, expectedErrStr := range tt.expectErrStrs {
+ assert.Contains(t, err.Error(), expectedErrStr)
+ }
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestStatusWaitMultipleNamespaces(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ expectErrStrs []string
+ testFunc func(statusWaiter, ResourceList, time.Duration) error
+ }{
+ {
+ name: "pods in multiple namespaces",
+ objManifests: []string{podNamespace1Manifest, podNamespace2Manifest},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ {
+ name: "hooks in multiple namespaces",
+ objManifests: []string{jobNamespace1CompleteManifest, podNamespace2SucceededManifest},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.WatchUntilReady(rl, timeout)
+ },
+ },
+ {
+ name: "error when resource not ready in one namespace",
+ objManifests: []string{podNamespace1NoStatusManifest, podNamespace2Manifest},
+ expectErrStrs: []string{"resource Pod/namespace-1/pod-ns1 not ready. status: InProgress", "context deadline exceeded"},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ {
+ name: "delete resources in multiple namespaces",
+ objManifests: []string{podNamespace1Manifest, podNamespace2Manifest},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.WaitForDelete(rl, timeout)
+ },
+ },
+ {
+ name: "cluster-scoped resources work correctly with unrestricted permissions",
+ objManifests: []string{podNamespace1Manifest, clusterRoleManifest},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ {
+ name: "namespace-scoped and cluster-scoped resources work together",
+ objManifests: []string{podNamespace1Manifest, podNamespace2Manifest, clusterRoleManifest},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ {
+ name: "delete cluster-scoped resources works correctly",
+ objManifests: []string{podNamespace1Manifest, namespaceManifest},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.WaitForDelete(rl, timeout)
+ },
+ },
+ {
+ name: "watch cluster-scoped resources works correctly",
+ objManifests: []string{clusterRoleManifest},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.WatchUntilReady(rl, timeout)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ schema.GroupVersion{Group: "rbac.authorization.k8s.io", Version: "v1"}.WithKind("ClusterRole"),
+ v1.SchemeGroupVersion.WithKind("Namespace"),
+ )
+ sw := statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+
+ if strings.Contains(tt.name, "delete") {
+ timeUntilDelete := time.Millisecond * 500
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ go func(gvr schema.GroupVersionResource, u *unstructured.Unstructured) {
+ time.Sleep(timeUntilDelete)
+ err := fakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName())
+ assert.NoError(t, err)
+ }(gvr, u)
+ }
+ }
+
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := tt.testFunc(sw, resourceList, time.Second*3)
+ if tt.expectErrStrs != nil {
+ require.Error(t, err)
+ for _, expectedErrStr := range tt.expectErrStrs {
+ assert.Contains(t, err.Error(), expectedErrStr)
+ }
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+// restrictedClientConfig holds the configuration for RBAC simulation on a fake dynamic client
+type restrictedClientConfig struct {
+ allowedNamespaces map[string]bool
+ clusterScopedListAttempted bool
+}
+
+// setupRestrictedClient configures a fake dynamic client to simulate RBAC restrictions
+// by using PrependReactor and PrependWatchReactor to intercept list/watch operations.
+func setupRestrictedClient(fakeClient *dynamicfake.FakeDynamicClient, allowedNamespaces []string) *restrictedClientConfig {
+ allowed := make(map[string]bool)
+ for _, ns := range allowedNamespaces {
+ allowed[ns] = true
+ }
+ config := &restrictedClientConfig{
+ allowedNamespaces: allowed,
+ }
+
+ // Intercept list operations
+ fakeClient.PrependReactor("list", "*", func(action clienttesting.Action) (bool, runtime.Object, error) {
+ listAction := action.(clienttesting.ListAction)
+ ns := listAction.GetNamespace()
+ if ns == "" {
+ // Cluster-scoped list
+ config.clusterScopedListAttempted = true
+ return true, nil, apierrors.NewForbidden(
+ action.GetResource().GroupResource(),
+ "",
+ fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources"),
+ )
+ }
+ if !config.allowedNamespaces[ns] {
+ return true, nil, apierrors.NewForbidden(
+ action.GetResource().GroupResource(),
+ "",
+ fmt.Errorf("user does not have LIST permissions in namespace %q", ns),
+ )
+ }
+ // Fall through to the default handler
+ return false, nil, nil
+ })
+
+ // Intercept watch operations
+ fakeClient.PrependWatchReactor("*", func(action clienttesting.Action) (bool, watch.Interface, error) {
+ watchAction := action.(clienttesting.WatchAction)
+ ns := watchAction.GetNamespace()
+ if ns == "" {
+ // Cluster-scoped watch
+ config.clusterScopedListAttempted = true
+ return true, nil, apierrors.NewForbidden(
+ action.GetResource().GroupResource(),
+ "",
+ fmt.Errorf("user does not have cluster-wide WATCH permissions for cluster-scoped resources"),
+ )
+ }
+ if !config.allowedNamespaces[ns] {
+ return true, nil, apierrors.NewForbidden(
+ action.GetResource().GroupResource(),
+ "",
+ fmt.Errorf("user does not have WATCH permissions in namespace %q", ns),
+ )
+ }
+ // Fall through to the default handler
+ return false, nil, nil
+ })
+
+ return config
+}
+
+func TestStatusWaitRestrictedRBAC(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ allowedNamespaces []string
+ expectErrs []error
+ testFunc func(statusWaiter, ResourceList, time.Duration) error
+ }{
+ {
+ name: "pods in multiple namespaces with namespace permissions",
+ objManifests: []string{podNamespace1Manifest, podNamespace2Manifest},
+ allowedNamespaces: []string{"namespace-1", "namespace-2"},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ {
+ name: "delete pods in multiple namespaces with namespace permissions",
+ objManifests: []string{podNamespace1Manifest, podNamespace2Manifest},
+ allowedNamespaces: []string{"namespace-1", "namespace-2"},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.WaitForDelete(rl, timeout)
+ },
+ },
+ {
+ name: "hooks in multiple namespaces with namespace permissions",
+ objManifests: []string{jobNamespace1CompleteManifest, podNamespace2SucceededManifest},
+ allowedNamespaces: []string{"namespace-1", "namespace-2"},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.WatchUntilReady(rl, timeout)
+ },
+ },
+ {
+ name: "error when cluster-scoped resource included",
+ objManifests: []string{podNamespace1Manifest, clusterRoleManifest},
+ allowedNamespaces: []string{"namespace-1"},
+ expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ {
+ name: "error when deleting cluster-scoped resource",
+ objManifests: []string{podNamespace1Manifest, namespaceManifest},
+ allowedNamespaces: []string{"namespace-1"},
+ expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.WaitForDelete(rl, timeout)
+ },
+ },
+ {
+ name: "error when accessing disallowed namespace",
+ objManifests: []string{podNamespace1Manifest, podNamespace2Manifest},
+ allowedNamespaces: []string{"namespace-1"},
+ expectErrs: []error{fmt.Errorf("user does not have LIST permissions in namespace %q", "namespace-2")},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ baseFakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ schema.GroupVersion{Group: "rbac.authorization.k8s.io", Version: "v1"}.WithKind("ClusterRole"),
+ v1.SchemeGroupVersion.WithKind("Namespace"),
+ )
+ restrictedConfig := setupRestrictedClient(baseFakeClient, tt.allowedNamespaces)
+ sw := statusWaiter{
+ client: baseFakeClient,
+ restMapper: fakeMapper,
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := baseFakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+
+ if strings.Contains(tt.name, "delet") {
+ timeUntilDelete := time.Millisecond * 500
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ go func(gvr schema.GroupVersionResource, u *unstructured.Unstructured) {
+ time.Sleep(timeUntilDelete)
+ err := baseFakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName())
+ assert.NoError(t, err)
+ }(gvr, u)
+ }
+ }
+
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := tt.testFunc(sw, resourceList, time.Second*3)
+ if tt.expectErrs != nil {
+ require.Error(t, err)
+ for _, expectedErr := range tt.expectErrs {
+ assert.Contains(t, err.Error(), expectedErr.Error())
+ }
+ return
+ }
+ assert.NoError(t, err)
+ assert.False(t, restrictedConfig.clusterScopedListAttempted)
+ })
+ }
+}
+
+func TestStatusWaitMixedResources(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ allowedNamespaces []string
+ expectErrs []error
+ testFunc func(statusWaiter, ResourceList, time.Duration) error
+ }{
+ {
+ name: "wait succeeds with namespace-scoped resources only",
+ objManifests: []string{podNamespace1Manifest, podNamespace2Manifest},
+ allowedNamespaces: []string{"namespace-1", "namespace-2"},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ {
+ name: "wait fails when cluster-scoped resource included",
+ objManifests: []string{podNamespace1Manifest, clusterRoleManifest},
+ allowedNamespaces: []string{"namespace-1"},
+ expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ {
+ name: "waitForDelete fails when cluster-scoped resource included",
+ objManifests: []string{podNamespace1Manifest, clusterRoleManifest},
+ allowedNamespaces: []string{"namespace-1"},
+ expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.WaitForDelete(rl, timeout)
+ },
+ },
+ {
+ name: "wait fails when namespace resource included",
+ objManifests: []string{podNamespace1Manifest, namespaceManifest},
+ allowedNamespaces: []string{"namespace-1"},
+ expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ {
+ name: "error when accessing disallowed namespace",
+ objManifests: []string{podNamespace1Manifest, podNamespace2Manifest},
+ allowedNamespaces: []string{"namespace-1"},
+ expectErrs: []error{fmt.Errorf("user does not have LIST permissions in namespace %q", "namespace-2")},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ baseFakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ schema.GroupVersion{Group: "rbac.authorization.k8s.io", Version: "v1"}.WithKind("ClusterRole"),
+ v1.SchemeGroupVersion.WithKind("Namespace"),
+ )
+ restrictedConfig := setupRestrictedClient(baseFakeClient, tt.allowedNamespaces)
+ sw := statusWaiter{
+ client: baseFakeClient,
+ restMapper: fakeMapper,
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := baseFakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+
+ if strings.Contains(tt.name, "delet") {
+ timeUntilDelete := time.Millisecond * 500
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ go func(gvr schema.GroupVersionResource, u *unstructured.Unstructured) {
+ time.Sleep(timeUntilDelete)
+ err := baseFakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName())
+ assert.NoError(t, err)
+ }(gvr, u)
+ }
+ }
+
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := tt.testFunc(sw, resourceList, time.Second*3)
+ if tt.expectErrs != nil {
+ require.Error(t, err)
+ for _, expectedErr := range tt.expectErrs {
+ assert.Contains(t, err.Error(), expectedErr.Error())
+ }
+ return
+ }
+ assert.NoError(t, err)
+ assert.False(t, restrictedConfig.clusterScopedListAttempted)
+ })
+ }
+}
+
+// mockStatusReader is a custom status reader for testing that tracks when it's used
+// and returns a configurable status for resources it supports.
+type mockStatusReader struct {
+ supportedGK schema.GroupKind
+ status status.Status
+ callCount atomic.Int32
+}
+
+func (m *mockStatusReader) Supports(gk schema.GroupKind) bool {
+ return gk == m.supportedGK
+}
+
+func (m *mockStatusReader) ReadStatus(_ context.Context, _ engine.ClusterReader, id object.ObjMetadata) (*event.ResourceStatus, error) {
+ m.callCount.Add(1)
+ return &event.ResourceStatus{
+ Identifier: id,
+ Status: m.status,
+ Message: "mock status reader",
+ }, nil
+}
+
+func (m *mockStatusReader) ReadStatusForObject(_ context.Context, _ engine.ClusterReader, u *unstructured.Unstructured) (*event.ResourceStatus, error) {
+ m.callCount.Add(1)
+ id := object.ObjMetadata{
+ Namespace: u.GetNamespace(),
+ Name: u.GetName(),
+ GroupKind: u.GroupVersionKind().GroupKind(),
+ }
+ return &event.ResourceStatus{
+ Identifier: id,
+ Status: m.status,
+ Message: "mock status reader",
+ }, nil
+}
+
+func TestStatusWaitWithCustomReaders(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ customReader *mockStatusReader
+ expectErrStrs []string
+ }{
+ {
+ name: "custom reader makes pod immediately current",
+ objManifests: []string{podNoStatusManifest},
+ customReader: &mockStatusReader{
+ supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+ status: status.CurrentStatus,
+ },
+ },
+ {
+ name: "custom reader returns in-progress status",
+ objManifests: []string{podCurrentManifest},
+ customReader: &mockStatusReader{
+ supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+ status: status.InProgressStatus,
+ },
+ expectErrStrs: []string{"resource Pod/ns/current-pod not ready. status: InProgress", "context deadline exceeded"},
+ },
+ {
+ name: "custom reader for different resource type is not used",
+ objManifests: []string{podCurrentManifest},
+ customReader: &mockStatusReader{
+ supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(),
+ status: status.InProgressStatus,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ statusWaiter := statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ readers: []engine.StatusReader{tt.customReader},
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := statusWaiter.Wait(resourceList, time.Second*3)
+ if tt.expectErrStrs != nil {
+ require.Error(t, err)
+ for _, expectedErrStr := range tt.expectErrStrs {
+ assert.Contains(t, err.Error(), expectedErrStr)
+ }
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestStatusWaitWithJobsAndCustomReaders(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ customReader *mockStatusReader
+ expectErrs []error
+ }{
+ {
+ name: "custom reader makes job immediately current",
+ objManifests: []string{jobNoStatusManifest},
+ customReader: &mockStatusReader{
+ supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(),
+ status: status.CurrentStatus,
+ },
+ expectErrs: nil,
+ },
+ {
+ name: "custom reader for pod works with WaitWithJobs",
+ objManifests: []string{podNoStatusManifest},
+ customReader: &mockStatusReader{
+ supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+ status: status.CurrentStatus,
+ },
+ expectErrs: nil,
+ },
+ {
+ name: "built-in job reader is still appended after custom readers",
+ objManifests: []string{jobCompleteManifest},
+ customReader: &mockStatusReader{
+ supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+ status: status.CurrentStatus,
+ },
+ expectErrs: nil,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ statusWaiter := statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ readers: []engine.StatusReader{tt.customReader},
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := statusWaiter.WaitWithJobs(resourceList, time.Second*3)
+ if tt.expectErrs != nil {
+ assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestStatusWaitWithFailedResources(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ customReader *mockStatusReader
+ expectErrStrs []string
+ testFunc func(statusWaiter, ResourceList, time.Duration) error
+ }{
+ {
+ name: "Wait returns error when resource has failed",
+ objManifests: []string{podNoStatusManifest},
+ customReader: &mockStatusReader{
+ supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+ status: status.FailedStatus,
+ },
+ expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ {
+ name: "WaitWithJobs returns error when job has failed",
+ objManifests: []string{jobFailedManifest},
+ customReader: nil, // Use the built-in job status reader
+ expectErrStrs: []string{
+ "resource Job/default/failed-job not ready. status: Failed",
+ },
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.WaitWithJobs(rl, timeout)
+ },
+ },
+ {
+ name: "Wait returns errors when multiple resources fail",
+ objManifests: []string{podNoStatusManifest, podCurrentManifest},
+ customReader: &mockStatusReader{
+ supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+ status: status.FailedStatus,
+ },
+ // The mock reader will make both pods return FailedStatus
+ expectErrStrs: []string{
+ "resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader",
+ "resource Pod/ns/current-pod not ready. status: Failed, message: mock status reader",
+ },
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.Wait(rl, timeout)
+ },
+ },
+ {
+ name: "WatchUntilReady returns error when resource has failed",
+ objManifests: []string{podNoStatusManifest},
+ customReader: &mockStatusReader{
+ supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+ status: status.FailedStatus,
+ },
+ // WatchUntilReady also waits for CurrentStatus, so failed resources should return error
+ expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"},
+ testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error {
+ return sw.WatchUntilReady(rl, timeout)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ var readers []engine.StatusReader
+ if tt.customReader != nil {
+ readers = []engine.StatusReader{tt.customReader}
+ }
+ sw := statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ readers: readers,
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := tt.testFunc(sw, resourceList, time.Second*3)
+ if tt.expectErrStrs != nil {
+ require.Error(t, err)
+ for _, expectedErrStr := range tt.expectErrStrs {
+ assert.Contains(t, err.Error(), expectedErrStr)
+ }
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestWatchUntilReadyWithCustomReaders(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ objManifests []string
+ customReader *mockStatusReader
+ expectErrStrs []string
+ }{
+ {
+ name: "custom reader makes job immediately current for hooks",
+ objManifests: []string{jobNoStatusManifest},
+ customReader: &mockStatusReader{
+ supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(),
+ status: status.CurrentStatus,
+ },
+ },
+ {
+ name: "custom reader makes pod immediately current for hooks",
+ objManifests: []string{podCurrentManifest},
+ customReader: &mockStatusReader{
+ supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+ status: status.CurrentStatus,
+ },
+ },
+ {
+ name: "custom reader takes precedence over built-in pod reader",
+ objManifests: []string{podCompleteManifest},
+ customReader: &mockStatusReader{
+ supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+ status: status.InProgressStatus,
+ },
+ expectErrStrs: []string{"resource Pod/ns/good-pod not ready. status: InProgress", "context deadline exceeded"},
+ },
+ {
+ name: "custom reader takes precedence over built-in job reader",
+ objManifests: []string{jobCompleteManifest},
+ customReader: &mockStatusReader{
+ supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(),
+ status: status.InProgressStatus,
+ },
+ expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"},
+ },
+ {
+ name: "custom reader for different resource type does not affect pods",
+ objManifests: []string{podCompleteManifest},
+ customReader: &mockStatusReader{
+ supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(),
+ status: status.InProgressStatus,
+ },
+ },
+ {
+ name: "built-in readers still work when custom reader does not match",
+ objManifests: []string{jobCompleteManifest},
+ customReader: &mockStatusReader{
+ supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+ status: status.InProgressStatus,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ c := newTestClient(t)
+ fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+ fakeMapper := testutil.NewFakeRESTMapper(
+ v1.SchemeGroupVersion.WithKind("Pod"),
+ batchv1.SchemeGroupVersion.WithKind("Job"),
+ )
+ statusWaiter := statusWaiter{
+ client: fakeClient,
+ restMapper: fakeMapper,
+ readers: []engine.StatusReader{tt.customReader},
+ }
+ objs := getRuntimeObjFromManifests(t, tt.objManifests)
+ for _, obj := range objs {
+ u := obj.(*unstructured.Unstructured)
+ gvr := getGVR(t, fakeMapper, u)
+ err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
+ assert.NoError(t, err)
+ }
+ resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+ err := statusWaiter.WatchUntilReady(resourceList, time.Second*3)
+ if tt.expectErrStrs != nil {
+ require.Error(t, err)
+ for _, expectedErrStr := range tt.expectErrStrs {
+ assert.Contains(t, err.Error(), expectedErrStr)
+ }
+ return
+ }
+ assert.NoError(t, err)
+ })
+ }
+}
diff --git a/helm/pkg/kube/wait.go b/helm/pkg/kube/wait.go
new file mode 100644
index 000000000..9a276a459
--- /dev/null
+++ b/helm/pkg/kube/wait.go
@@ -0,0 +1,345 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube // import "helm.sh/helm/v4/pkg/kube"
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "time"
+
+ appsv1 "k8s.io/api/apps/v1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/cli-runtime/pkg/resource"
+ "k8s.io/client-go/kubernetes"
+ cachetools "k8s.io/client-go/tools/cache"
+ watchtools "k8s.io/client-go/tools/watch"
+
+ "k8s.io/apimachinery/pkg/util/wait"
+)
+
+// legacyWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3
+// Helm 4 now uses the StatusWaiter implementation instead
+type legacyWaiter struct {
+ c ReadyChecker
+ kubeClient *kubernetes.Clientset
+ ctx context.Context
+}
+
+func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error {
+ hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true))
+ return hw.waitForResources(resources, timeout)
+}
+
+func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
+ hw.c = NewReadyChecker(hw.kubeClient, PausedAsReady(true), CheckJobs(true))
+ return hw.waitForResources(resources, timeout)
+}
+
+// waitForResources polls to get the current status of all pods, PVCs, Services and
+// Jobs(optional) until all are ready or a timeout is reached
+func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error {
+ slog.Debug("beginning wait for resources", "count", len(created), "timeout", timeout)
+
+ ctx, cancel := hw.contextWithTimeout(timeout)
+ defer cancel()
+
+ numberOfErrors := make([]int, len(created))
+ for i := range numberOfErrors {
+ numberOfErrors[i] = 0
+ }
+
+ return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) {
+ waitRetries := 30
+ for i, v := range created {
+ ready, err := hw.c.IsReady(ctx, v)
+
+ if waitRetries > 0 && hw.isRetryableError(err, v) {
+ numberOfErrors[i]++
+ if numberOfErrors[i] > waitRetries {
+ slog.Debug("max number of retries reached", "resource", v.Name, "retries", numberOfErrors[i])
+ return false, err
+ }
+ slog.Debug("retrying resource readiness", "resource", v.Name, "currentRetries", numberOfErrors[i]-1, "maxRetries", waitRetries)
+ return false, nil
+ }
+ numberOfErrors[i] = 0
+ if !ready {
+ return false, err
+ }
+ }
+ return true, nil
+ })
+}
+
+func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) bool {
+ if err == nil {
+ return false
+ }
+ slog.Debug(
+ "error received when checking resource status",
+ slog.String("resource", resource.Name),
+ slog.Any("error", err),
+ )
+ if ev, ok := err.(*apierrors.StatusError); ok {
+ statusCode := ev.Status().Code
+ retryable := hw.isRetryableHTTPStatusCode(statusCode)
+ slog.Debug(
+ "status code received",
+ slog.String("resource", resource.Name),
+ slog.Int("statusCode", int(statusCode)),
+ slog.Bool("retryable", retryable),
+ )
+ return retryable
+ }
+ slog.Debug("retryable error assumed", "resource", resource.Name)
+ return true
+}
+
+func (hw *legacyWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool {
+ return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented)
+}
+
+// WaitForDelete polls to check if all the resources are deleted or a timeout is reached
+func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error {
+ slog.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", timeout)
+
+ startTime := time.Now()
+ ctx, cancel := hw.contextWithTimeout(timeout)
+ defer cancel()
+
+ err := wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) {
+ for _, v := range deleted {
+ err := v.Get()
+ if err == nil || !apierrors.IsNotFound(err) {
+ return false, err
+ }
+ }
+ return true, nil
+ })
+
+ elapsed := time.Since(startTime).Round(time.Second)
+ if err != nil {
+ slog.Debug("wait for resources failed", slog.Duration("elapsed", elapsed), slog.Any("error", err))
+ } else {
+ slog.Debug("wait for resources succeeded", slog.Duration("elapsed", elapsed))
+ }
+
+ return err
+}
+
+// SelectorsForObject returns the pod label selector for a given object
+//
+// Modified version of https://github.com/kubernetes/kubernetes/blob/v1.14.1/pkg/kubectl/polymorphichelpers/helpers.go#L84
+func SelectorsForObject(object runtime.Object) (selector labels.Selector, err error) {
+ switch t := object.(type) {
+ case *extensionsv1beta1.ReplicaSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1.ReplicaSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta2.ReplicaSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *corev1.ReplicationController:
+ selector = labels.SelectorFromSet(t.Spec.Selector)
+ case *appsv1.StatefulSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta1.StatefulSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta2.StatefulSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *extensionsv1beta1.DaemonSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1.DaemonSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta2.DaemonSet:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *extensionsv1beta1.Deployment:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1.Deployment:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta1.Deployment:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *appsv1beta2.Deployment:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *batchv1.Job:
+ selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector)
+ case *corev1.Service:
+ if len(t.Spec.Selector) == 0 {
+ return nil, fmt.Errorf("invalid service '%s': Service is defined without a selector", t.Name)
+ }
+ selector = labels.SelectorFromSet(t.Spec.Selector)
+
+ default:
+ return nil, fmt.Errorf("selector for %T not implemented", object)
+ }
+
+ if err != nil {
+ return selector, fmt.Errorf("invalid label selector: %w", err)
+ }
+
+ return selector, nil
+}
+
+func (hw *legacyWaiter) watchTimeout(t time.Duration) func(*resource.Info) error {
+ return func(info *resource.Info) error {
+ return hw.watchUntilReady(t, info)
+ }
+}
+
+// WatchUntilReady watches the resources given and waits until it is ready.
+//
+// This method is mainly for hook implementations. It watches for a resource to
+// hit a particular milestone. The milestone depends on the Kind.
+//
+// For most kinds, it checks to see if the resource is marked as Added or Modified
+// by the Kubernetes event stream. For some kinds, it does more:
+//
+// - Jobs: A job is marked "Ready" when it has successfully completed. This is
+// ascertained by watching the Status fields in a job's output.
+// - Pods: A pod is marked "Ready" when it has successfully completed. This is
+// ascertained by watching the status.phase field in a pod's output.
+//
+// Handling for other kinds will be added as necessary.
+func (hw *legacyWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error {
+ // For jobs, there's also the option to do poll c.Jobs(namespace).Get():
+ // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300
+ return perform(resources, hw.watchTimeout(timeout))
+}
+
+func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.Info) error {
+ kind := info.Mapping.GroupVersionKind.Kind
+ switch kind {
+ case "Job", "Pod":
+ default:
+ return nil
+ }
+
+ slog.Debug("watching for resource changes", "kind", kind, "resource", info.Name, "timeout", timeout)
+
+ // Use a selector on the name of the resource. This should be unique for the
+ // given version and kind
+ selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name))
+ if err != nil {
+ return err
+ }
+ lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector)
+
+ // What we watch for depends on the Kind.
+ // - For a Job, we watch for completion.
+ // - For all else, we watch until Ready.
+ // In the future, we might want to add some special logic for types
+ // like Ingress, Volume, etc.
+
+ ctx, cancel := hw.contextWithTimeout(timeout)
+ defer cancel()
+ _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {
+ // Make sure the incoming object is versioned as we use unstructured
+ // objects when we build manifests
+ obj := convertWithMapper(e.Object, info.Mapping)
+ switch e.Type {
+ case watch.Added, watch.Modified:
+ // For things like a secret or a config map, this is the best indicator
+ // we get. We care mostly about jobs, where what we want to see is
+ // the status go into a good state. For other types, like ReplicaSet
+ // we don't really do anything to support these as hooks.
+ slog.Debug("add/modify event received", "resource", info.Name, "eventType", e.Type)
+
+ switch kind {
+ case "Job":
+ return hw.waitForJob(obj, info.Name)
+ case "Pod":
+ return hw.waitForPodSuccess(obj, info.Name)
+ }
+ return true, nil
+ case watch.Deleted:
+ slog.Debug("deleted event received", "resource", info.Name)
+ return true, nil
+ case watch.Error:
+ // Handle error and return with an error.
+ slog.Error("error event received", "resource", info.Name)
+ return true, fmt.Errorf("failed to deploy %s", info.Name)
+ default:
+ return false, nil
+ }
+ })
+ return err
+}
+
+// waitForJob is a helper that waits for a job to complete.
+//
+// This operates on an event returned from a watcher.
+func (hw *legacyWaiter) waitForJob(obj runtime.Object, name string) (bool, error) {
+ o, ok := obj.(*batchv1.Job)
+ if !ok {
+ return true, fmt.Errorf("expected %s to be a *batch.Job, got %T", name, obj)
+ }
+
+ for _, c := range o.Status.Conditions {
+ if c.Type == batchv1.JobComplete && c.Status == "True" {
+ return true, nil
+ } else if c.Type == batchv1.JobFailed && c.Status == "True" {
+ slog.Error("job failed", "job", name, "reason", c.Reason)
+ return true, fmt.Errorf("job %s failed: %s", name, c.Reason)
+ }
+ }
+
+ slog.Debug("job status update", "job", name, "active", o.Status.Active, "failed", o.Status.Failed, "succeeded", o.Status.Succeeded)
+ return false, nil
+}
+
+// waitForPodSuccess is a helper that waits for a pod to complete.
+//
+// This operates on an event returned from a watcher.
+func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) {
+ o, ok := obj.(*corev1.Pod)
+ if !ok {
+ return true, fmt.Errorf("expected %s to be a *v1.Pod, got %T", name, obj)
+ }
+
+ switch o.Status.Phase {
+ case corev1.PodSucceeded:
+ slog.Debug("pod succeeded", "pod", o.Name)
+ return true, nil
+ case corev1.PodFailed:
+ slog.Error("pod failed", "pod", o.Name)
+ return true, fmt.Errorf("pod %s failed", o.Name)
+ case corev1.PodPending:
+ slog.Debug("pod pending", "pod", o.Name)
+ case corev1.PodRunning:
+ slog.Debug("pod running", "pod", o.Name)
+ case corev1.PodUnknown:
+ slog.Debug("pod unknown", "pod", o.Name)
+ }
+
+ return false, nil
+}
+
+func (hw *legacyWaiter) contextWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) {
+ return contextWithTimeout(hw.ctx, timeout)
+}
diff --git a/helm/pkg/kube/wait_test.go b/helm/pkg/kube/wait_test.go
new file mode 100644
index 000000000..d96f2c486
--- /dev/null
+++ b/helm/pkg/kube/wait_test.go
@@ -0,0 +1,467 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kube
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ appsv1 "k8s.io/api/apps/v1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/cli-runtime/pkg/resource"
+)
+
+func TestSelectorsForObject(t *testing.T) {
+ tests := []struct {
+ name string
+ object interface{}
+ expectError bool
+ errorContains string
+ expectedLabels map[string]string
+ }{
+ {
+ name: "appsv1 ReplicaSet",
+ object: &appsv1.ReplicaSet{
+ Spec: appsv1.ReplicaSetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "test"},
+ },
+ },
+ },
+ expectError: false,
+ expectedLabels: map[string]string{"app": "test"},
+ },
+ {
+ name: "extensionsv1beta1 ReplicaSet",
+ object: &extensionsv1beta1.ReplicaSet{
+ Spec: extensionsv1beta1.ReplicaSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "ext-rs"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "ext-rs"},
+ },
+ {
+ name: "appsv1beta2 ReplicaSet",
+ object: &appsv1beta2.ReplicaSet{
+ Spec: appsv1beta2.ReplicaSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "beta2-rs"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "beta2-rs"},
+ },
+ {
+ name: "corev1 ReplicationController",
+ object: &corev1.ReplicationController{
+ Spec: corev1.ReplicationControllerSpec{
+ Selector: map[string]string{"rc": "test"},
+ },
+ },
+ expectError: false,
+ expectedLabels: map[string]string{"rc": "test"},
+ },
+ {
+ name: "appsv1 StatefulSet",
+ object: &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "statefulset-v1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "statefulset-v1"},
+ },
+ {
+ name: "appsv1beta1 StatefulSet",
+ object: &appsv1beta1.StatefulSet{
+ Spec: appsv1beta1.StatefulSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "statefulset-beta1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "statefulset-beta1"},
+ },
+ {
+ name: "appsv1beta2 StatefulSet",
+ object: &appsv1beta2.StatefulSet{
+ Spec: appsv1beta2.StatefulSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "statefulset-beta2"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "statefulset-beta2"},
+ },
+ {
+ name: "extensionsv1beta1 DaemonSet",
+ object: &extensionsv1beta1.DaemonSet{
+ Spec: extensionsv1beta1.DaemonSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "daemonset-ext-beta1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "daemonset-ext-beta1"},
+ },
+ {
+ name: "appsv1 DaemonSet",
+ object: &appsv1.DaemonSet{
+ Spec: appsv1.DaemonSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "daemonset-v1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "daemonset-v1"},
+ },
+ {
+ name: "appsv1beta2 DaemonSet",
+ object: &appsv1beta2.DaemonSet{
+ Spec: appsv1beta2.DaemonSetSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "daemonset-beta2"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "daemonset-beta2"},
+ },
+ {
+ name: "extensionsv1beta1 Deployment",
+ object: &extensionsv1beta1.Deployment{
+ Spec: extensionsv1beta1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "deployment-ext-beta1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "deployment-ext-beta1"},
+ },
+ {
+ name: "appsv1 Deployment",
+ object: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "deployment-v1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "deployment-v1"},
+ },
+ {
+ name: "appsv1beta1 Deployment",
+ object: &appsv1beta1.Deployment{
+ Spec: appsv1beta1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "deployment-beta1"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "deployment-beta1"},
+ },
+ {
+ name: "appsv1beta2 Deployment",
+ object: &appsv1beta2.Deployment{
+ Spec: appsv1beta2.DeploymentSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "deployment-beta2"}},
+ },
+ },
+ expectedLabels: map[string]string{"app": "deployment-beta2"},
+ },
+ {
+ name: "batchv1 Job",
+ object: &batchv1.Job{
+ Spec: batchv1.JobSpec{
+ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"job": "batch-job"}},
+ },
+ },
+ expectedLabels: map[string]string{"job": "batch-job"},
+ },
+ {
+ name: "corev1 Service with selector",
+ object: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{Name: "svc"},
+ Spec: corev1.ServiceSpec{
+ Selector: map[string]string{"svc": "yes"},
+ },
+ },
+ expectError: false,
+ expectedLabels: map[string]string{"svc": "yes"},
+ },
+ {
+ name: "corev1 Service without selector",
+ object: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{Name: "svc"},
+ Spec: corev1.ServiceSpec{Selector: map[string]string{}},
+ },
+ expectError: true,
+ errorContains: "invalid service 'svc': Service is defined without a selector",
+ },
+ {
+ name: "invalid label selector",
+ object: &appsv1.ReplicaSet{
+ Spec: appsv1.ReplicaSetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "foo",
+ Operator: "InvalidOperator",
+ Values: []string{"bar"},
+ },
+ },
+ },
+ },
+ },
+ expectError: true,
+ errorContains: "invalid label selector:",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ selector, err := SelectorsForObject(tt.object.(runtime.Object))
+ if tt.expectError {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), tt.errorContains)
+ } else {
+ assert.NoError(t, err)
+ expected := labels.Set(tt.expectedLabels)
+ assert.True(t, selector.Matches(expected), "expected selector to match")
+ }
+ })
+ }
+}
+
+func TestLegacyWaiter_waitForPodSuccess(t *testing.T) {
+ lw := &legacyWaiter{}
+
+ tests := []struct {
+ name string
+ obj runtime.Object
+ wantDone bool
+ wantErr bool
+ errMessage string
+ }{
+ {
+ name: "pod succeeded",
+ obj: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod1"},
+ Status: corev1.PodStatus{Phase: corev1.PodSucceeded},
+ },
+ wantDone: true,
+ wantErr: false,
+ },
+ {
+ name: "pod failed",
+ obj: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod2"},
+ Status: corev1.PodStatus{Phase: corev1.PodFailed},
+ },
+ wantDone: true,
+ wantErr: true,
+ errMessage: "pod pod2 failed",
+ },
+ {
+ name: "pod pending",
+ obj: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod3"},
+ Status: corev1.PodStatus{Phase: corev1.PodPending},
+ },
+ wantDone: false,
+ wantErr: false,
+ },
+ {
+ name: "pod running",
+ obj: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "pod4"},
+ Status: corev1.PodStatus{Phase: corev1.PodRunning},
+ },
+ wantDone: false,
+ wantErr: false,
+ },
+ {
+ name: "wrong object type",
+ obj: &metav1.Status{},
+ wantDone: true,
+ wantErr: true,
+ errMessage: "expected foo to be a *v1.Pod, got *v1.Status",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ done, err := lw.waitForPodSuccess(tt.obj, "foo")
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("expected error, got none")
+ } else if !strings.Contains(err.Error(), tt.errMessage) {
+ t.Errorf("expected error to contain %q, got %q", tt.errMessage, err.Error())
+ }
+ } else if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ if done != tt.wantDone {
+ t.Errorf("got done=%v, want %v", done, tt.wantDone)
+ }
+ })
+ }
+}
+
+func TestLegacyWaiter_waitForJob(t *testing.T) {
+ lw := &legacyWaiter{}
+
+ tests := []struct {
+ name string
+ obj runtime.Object
+ wantDone bool
+ wantErr bool
+ errMessage string
+ }{
+ {
+ name: "job complete",
+ obj: &batchv1.Job{
+ Status: batchv1.JobStatus{
+ Conditions: []batchv1.JobCondition{
+ {
+ Type: batchv1.JobComplete,
+ Status: "True",
+ },
+ },
+ },
+ },
+ wantDone: true,
+ wantErr: false,
+ },
+ {
+ name: "job failed",
+ obj: &batchv1.Job{
+ Status: batchv1.JobStatus{
+ Conditions: []batchv1.JobCondition{
+ {
+ Type: batchv1.JobFailed,
+ Status: "True",
+ Reason: "FailedReason",
+ },
+ },
+ },
+ },
+ wantDone: true,
+ wantErr: true,
+ errMessage: "job test-job failed: FailedReason",
+ },
+ {
+ name: "job in progress",
+ obj: &batchv1.Job{
+ Status: batchv1.JobStatus{
+ Active: 1,
+ Failed: 0,
+ Succeeded: 0,
+ Conditions: []batchv1.JobCondition{
+ {
+ Type: batchv1.JobComplete,
+ Status: "False",
+ },
+ {
+ Type: batchv1.JobFailed,
+ Status: "False",
+ },
+ },
+ },
+ },
+ wantDone: false,
+ wantErr: false,
+ },
+ {
+ name: "wrong object type",
+ obj: &metav1.Status{},
+ wantDone: true,
+ wantErr: true,
+ errMessage: "expected test-job to be a *batch.Job, got *v1.Status",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ done, err := lw.waitForJob(tt.obj, "test-job")
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("expected error, got none")
+ } else if !strings.Contains(err.Error(), tt.errMessage) {
+ t.Errorf("expected error to contain %q, got %q", tt.errMessage, err.Error())
+ }
+ } else if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+
+ if done != tt.wantDone {
+ t.Errorf("got done=%v, want %v", done, tt.wantDone)
+ }
+ })
+ }
+}
+
+func TestLegacyWaiter_isRetryableError(t *testing.T) {
+ lw := &legacyWaiter{}
+
+ info := &resource.Info{
+ Name: "test-resource",
+ }
+
+ tests := []struct {
+ name string
+ err error
+ wantRetry bool
+ description string
+ }{
+ {
+ name: "nil error",
+ err: nil,
+ wantRetry: false,
+ },
+ {
+ name: "status error - 0 code",
+ err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: 0}},
+ wantRetry: true,
+ },
+ {
+ name: "status error - 429 (TooManyRequests)",
+ err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusTooManyRequests}},
+ wantRetry: true,
+ },
+ {
+ name: "status error - 503",
+ err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusServiceUnavailable}},
+ wantRetry: true,
+ },
+ {
+ name: "status error - 501 (NotImplemented)",
+ err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusNotImplemented}},
+ wantRetry: false,
+ },
+ {
+ name: "status error - 400 (Bad Request)",
+ err: &apierrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusBadRequest}},
+ wantRetry: false,
+ },
+ {
+ name: "non-status error",
+ err: fmt.Errorf("some generic error"),
+ wantRetry: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := lw.isRetryableError(tt.err, info)
+ if got != tt.wantRetry {
+ t.Errorf("isRetryableError() = %v, want %v", got, tt.wantRetry)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/postrenderer/postrenderer.go b/helm/pkg/postrenderer/postrenderer.go
new file mode 100644
index 000000000..55e6d3adf
--- /dev/null
+++ b/helm/pkg/postrenderer/postrenderer.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package postrenderer
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "path/filepath"
+
+ "helm.sh/helm/v4/internal/plugin/schema"
+
+ "helm.sh/helm/v4/internal/plugin"
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+// PostRenderer is an interface different plugin runtimes
+// it may be also be used without the factory for custom post-renderers
+type PostRenderer interface {
+ // Run expects a single buffer filled with Helm rendered manifests. It
+ // expects the modified results to be returned on a separate buffer or an
+ // error if there was an issue or failure while running the post render step
+ Run(renderedManifests *bytes.Buffer) (modifiedManifests *bytes.Buffer, err error)
+}
+
+// NewPostRendererPlugin creates a PostRenderer that uses the plugin's Runtime
+func NewPostRendererPlugin(settings *cli.EnvSettings, pluginName string, args ...string) (PostRenderer, error) {
+ descriptor := plugin.Descriptor{
+ Name: pluginName,
+ Type: "postrenderer/v1",
+ }
+ p, err := plugin.FindPlugin(filepath.SplitList(settings.PluginsDirectory), descriptor)
+ if err != nil {
+ return nil, err
+ }
+
+ return &postRendererPlugin{
+ plugin: p,
+ args: args,
+ settings: settings,
+ }, nil
+}
+
+// postRendererPlugin implements PostRenderer by delegating to the plugin's Runtime
+type postRendererPlugin struct {
+ plugin plugin.Plugin
+ args []string
+ settings *cli.EnvSettings
+}
+
+// Run implements PostRenderer by using the plugin's Runtime
+func (r *postRendererPlugin) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) {
+ input := &plugin.Input{
+ Message: schema.InputMessagePostRendererV1{
+ ExtraArgs: r.args,
+ Manifests: renderedManifests,
+ },
+ }
+ output, err := r.plugin.Invoke(context.Background(), input)
+ if err != nil {
+ return nil, fmt.Errorf("failed to invoke post-renderer plugin %q: %w", r.plugin.Metadata().Name, err)
+ }
+
+ outputMessage := output.Message.(schema.OutputMessagePostRendererV1)
+
+ // If the binary returned almost nothing, it's likely that it didn't
+ // successfully render anything
+ if len(bytes.TrimSpace(outputMessage.Manifests.Bytes())) == 0 {
+ return nil, fmt.Errorf("post-renderer %q produced empty output", r.plugin.Metadata().Name)
+ }
+
+ return outputMessage.Manifests, nil
+}
diff --git a/helm/pkg/postrenderer/postrenderer_test.go b/helm/pkg/postrenderer/postrenderer_test.go
new file mode 100644
index 000000000..824a1d179
--- /dev/null
+++ b/helm/pkg/postrenderer/postrenderer_test.go
@@ -0,0 +1,81 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package postrenderer
+
+import (
+ "bytes"
+ "runtime"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "helm.sh/helm/v4/pkg/cli"
+)
+
+func TestNewPostRenderPluginRunWithNoOutput(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // the actual Run test uses a basic sed example, so skip this test on windows
+ t.Skip("skipping on windows")
+ }
+ is := assert.New(t)
+ s := cli.New()
+ s.PluginsDirectory = "testdata/plugins"
+ name := "postrenderer-v1"
+
+ renderer, err := NewPostRendererPlugin(s, name, "")
+ require.NoError(t, err)
+
+ _, err = renderer.Run(bytes.NewBufferString(""))
+ is.Error(err)
+}
+
+func TestNewPostRenderPluginWithOneArgsRun(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // the actual Run test uses a basic sed example, so skip this test on windows
+ t.Skip("skipping on windows")
+ }
+ is := assert.New(t)
+ s := cli.New()
+ s.PluginsDirectory = "testdata/plugins"
+ name := "postrenderer-v1"
+
+ renderer, err := NewPostRendererPlugin(s, name, "ARG1")
+ require.NoError(t, err)
+
+ output, err := renderer.Run(bytes.NewBufferString("FOOTEST"))
+ is.NoError(err)
+ is.Contains(output.String(), "ARG1")
+}
+
+func TestNewPostRenderPluginWithTwoArgsRun(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // the actual Run test uses a basic sed example, so skip this test on windows
+ t.Skip("skipping on windows")
+ }
+ is := assert.New(t)
+ s := cli.New()
+ s.PluginsDirectory = "testdata/plugins"
+ name := "postrenderer-v1"
+
+ renderer, err := NewPostRendererPlugin(s, name, "ARG1", "ARG2")
+ require.NoError(t, err)
+
+ output, err := renderer.Run(bytes.NewBufferString("FOOTEST"))
+ is.NoError(err)
+ is.Contains(output.String(), "ARG1 ARG2")
+}
diff --git a/helm/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml b/helm/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml
new file mode 100644
index 000000000..423a5191e
--- /dev/null
+++ b/helm/pkg/postrenderer/testdata/plugins/postrenderer-v1/plugin.yaml
@@ -0,0 +1,8 @@
+name: "postrenderer-v1"
+version: "1.2.3"
+type: postrenderer/v1
+apiVersion: v1
+runtime: subprocess
+runtimeConfig:
+ platformCommand:
+ - command: "${HELM_PLUGIN_DIR}/sed-test.sh"
diff --git a/helm/pkg/postrenderer/testdata/plugins/postrenderer-v1/sed-test.sh b/helm/pkg/postrenderer/testdata/plugins/postrenderer-v1/sed-test.sh
new file mode 100755
index 000000000..a016e398f
--- /dev/null
+++ b/helm/pkg/postrenderer/testdata/plugins/postrenderer-v1/sed-test.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+if [ $# -eq 0 ]; then
+ sed s/FOOTEST/BARTEST/g <&0
+else
+ sed s/FOOTEST/"$*"/g <&0
+fi
diff --git a/helm/pkg/provenance/doc.go b/helm/pkg/provenance/doc.go
new file mode 100644
index 000000000..dd14568d9
--- /dev/null
+++ b/helm/pkg/provenance/doc.go
@@ -0,0 +1,38 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package provenance provides tools for establishing the authenticity of packages.
+
+In Helm, provenance is established via several factors. The primary factor is the
+cryptographic signature of a package. Package authors may sign packages, which in turn
+provide the necessary metadata to ensure the integrity of the package file, the
+metadata, and the referenced Docker images.
+
+A provenance file is clear-signed. This provides cryptographic verification that
+a particular block of information (metadata, archive file, images) have not
+been tampered with or altered. To learn more, read the GnuPG documentation on
+clear signatures:
+https://www.gnupg.org/gph/en/manual/x135.html
+
+The cryptography used by Helm should be compatible with OpenGPG. For example,
+you should be able to verify a signature by importing the desired public key
+and using `gpg --verify`, `keybase pgp verify`, or similar:
+
+ $ gpg --verify some.sig
+ gpg: Signature made Mon Jul 25 17:23:44 2016 MDT using RSA key ID 1FC18762
+ gpg: Good signature from "Helm Testing (This key should only be used for testing. DO NOT TRUST.) " [ultimate]
+*/
+package provenance // import "helm.sh/helm/v4/pkg/provenance"
diff --git a/helm/pkg/provenance/sign.go b/helm/pkg/provenance/sign.go
new file mode 100644
index 000000000..57af1ad42
--- /dev/null
+++ b/helm/pkg/provenance/sign.go
@@ -0,0 +1,394 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provenance
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/ProtonMail/go-crypto/openpgp" //nolint
+ "github.com/ProtonMail/go-crypto/openpgp/clearsign" //nolint
+ "github.com/ProtonMail/go-crypto/openpgp/packet" //nolint
+ "sigs.k8s.io/yaml"
+)
+
+var defaultPGPConfig = packet.Config{
+ DefaultHash: crypto.SHA512,
+}
+
+// SumCollection represents a collection of file and image checksums.
+//
+// Files are of the form:
+//
+// FILENAME: "sha256:SUM"
+//
+// Images are of the form:
+//
+// "IMAGE:TAG": "sha256:SUM"
+//
+// Docker optionally supports sha512, and if this is the case, the hash marker
+// will be 'sha512' instead of 'sha256'.
+type SumCollection struct {
+ Files map[string]string `json:"files"`
+ Images map[string]string `json:"images,omitempty"`
+}
+
+// Verification contains information about a verification operation.
+type Verification struct {
+ // SignedBy contains the entity that signed a package.
+ SignedBy *openpgp.Entity
+ // FileHash is the hash, prepended with the scheme, for the file that was verified.
+ FileHash string
+ // FileName is the name of the file that FileHash verifies.
+ FileName string
+}
+
+// Signatory signs things.
+//
+// Signatories can be constructed from a PGP private key file using NewFromFiles,
+// or they can be constructed manually by setting the Entity to a valid
+// PGP entity.
+//
+// The same Signatory can be used to sign or validate multiple packages.
+type Signatory struct {
+ // The signatory for this instance of Helm. This is used for signing.
+ Entity *openpgp.Entity
+ // The keyring for this instance of Helm. This is used for verification.
+ KeyRing openpgp.EntityList
+}
+
+// NewFromFiles constructs a new Signatory from the PGP key in the given filename.
+//
+// This will emit an error if it cannot find a valid GPG keyfile (entity) at the
+// given location.
+//
+// Note that the keyfile may have just a public key, just a private key, or
+// both. The Signatory methods may have different requirements of the keys. For
+// example, ClearSign must have a valid `openpgp.Entity.PrivateKey` before it
+// can sign something.
+func NewFromFiles(keyfile, keyringfile string) (*Signatory, error) {
+ e, err := loadKey(keyfile)
+ if err != nil {
+ return nil, err
+ }
+
+ ring, err := loadKeyRing(keyringfile)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Signatory{
+ Entity: e,
+ KeyRing: ring,
+ }, nil
+}
+
+// NewFromKeyring reads a keyring file and creates a Signatory.
+//
+// If id is not the empty string, this will also try to find an Entity in the
+// keyring whose name matches, and set that as the signing entity. It will return
+// an error if the id is not empty and also not found.
+func NewFromKeyring(keyringfile, id string) (*Signatory, error) {
+ ring, err := loadKeyRing(keyringfile)
+ if err != nil {
+ return nil, err
+ }
+
+ s := &Signatory{KeyRing: ring}
+
+ // If the ID is empty, we can return now.
+ if id == "" {
+ return s, nil
+ }
+
+ // We're gonna go all GnuPG on this and look for a string that _contains_. If
+ // two or more keys contain the string and none are a direct match, we error
+ // out.
+ var candidate *openpgp.Entity
+ vague := false
+ for _, e := range ring {
+ for n := range e.Identities {
+ if n == id {
+ s.Entity = e
+ return s, nil
+ }
+ if strings.Contains(n, id) {
+ if candidate != nil {
+ vague = true
+ }
+ candidate = e
+ }
+ }
+ }
+ if vague {
+ return s, fmt.Errorf("more than one key contain the id %q", id)
+ }
+
+ s.Entity = candidate
+ return s, nil
+}
+
+// PassphraseFetcher returns a passphrase for decrypting keys.
+//
+// This is used as a callback to read a passphrase from some other location. The
+// given name is the Name field on the key, typically of the form:
+//
+// USER_NAME (COMMENT)
+type PassphraseFetcher func(name string) ([]byte, error)
+
+// DecryptKey decrypts a private key in the Signatory.
+//
+// If the key is not encrypted, this will return without error.
+//
+// If the key does not exist, this will return an error.
+//
+// If the key exists, but cannot be unlocked with the passphrase returned by
+// the PassphraseFetcher, this will return an error.
+//
+// If the key is successfully unlocked, it will return nil.
+func (s *Signatory) DecryptKey(fn PassphraseFetcher) error {
+ if s.Entity == nil {
+ return errors.New("private key not found")
+ } else if s.Entity.PrivateKey == nil {
+ return errors.New("provided key is not a private key. Try providing a keyring with secret keys")
+ }
+
+ // Nothing else to do if key is not encrypted.
+ if !s.Entity.PrivateKey.Encrypted {
+ return nil
+ }
+
+ fname := "Unknown"
+ for i := range s.Entity.Identities {
+ if i != "" {
+ fname = i
+ break
+ }
+ }
+
+ p, err := fn(fname)
+ if err != nil {
+ return err
+ }
+
+ return s.Entity.PrivateKey.Decrypt(p)
+}
+
+// ClearSign signs package data with the given key and pre-marshalled metadata.
+//
+// This is the core signing method that works with data in memory.
+// The Signatory must have a valid Entity.PrivateKey for this to work.
+func (s *Signatory) ClearSign(archiveData []byte, filename string, metadataBytes []byte) (string, error) {
+ if s.Entity == nil {
+ return "", errors.New("private key not found")
+ } else if s.Entity.PrivateKey == nil {
+ return "", errors.New("provided key is not a private key. Try providing a keyring with secret keys")
+ }
+
+ out := bytes.NewBuffer(nil)
+
+ b, err := messageBlock(archiveData, filename, metadataBytes)
+ if err != nil {
+ return "", err
+ }
+
+ // Sign the buffer
+ w, err := clearsign.Encode(out, s.Entity.PrivateKey, &defaultPGPConfig)
+ if err != nil {
+ return "", err
+ }
+
+ _, err = io.Copy(w, b)
+
+ if err != nil {
+ // NB: We intentionally don't call `w.Close()` here! `w.Close()` is the method which
+ // actually does the PGP signing, and therefore is the part which uses the private key.
+ // In other words, if we call Close here, there's a risk that there's an attempt to use the
+ // private key to sign garbage data (since we know that io.Copy failed, `w` won't contain
+ // anything useful).
+ return "", fmt.Errorf("failed to write to clearsign encoder: %w", err)
+ }
+
+ err = w.Close()
+ if err != nil {
+ return "", fmt.Errorf("failed to either sign or armor message block: %w", err)
+ }
+
+ return out.String(), nil
+}
+
+// Verify checks a signature and verifies that it is legit for package data.
+// This is the core verification method that works with data in memory.
+func (s *Signatory) Verify(archiveData, provData []byte, filename string) (*Verification, error) {
+ ver := &Verification{}
+
+ // First verify the signature
+ block, _ := clearsign.Decode(provData)
+ if block == nil {
+ return ver, errors.New("signature block not found")
+ }
+
+ by, err := s.verifySignature(block)
+ if err != nil {
+ return ver, err
+ }
+ ver.SignedBy = by
+
+ // Second, verify the hash of the data.
+ sum, err := Digest(bytes.NewBuffer(archiveData))
+ if err != nil {
+ return ver, err
+ }
+ sums, err := parseMessageBlock(block.Plaintext)
+ if err != nil {
+ return ver, err
+ }
+
+ sum = "sha256:" + sum
+ if sha, ok := sums.Files[filename]; !ok {
+ return ver, fmt.Errorf("provenance does not contain a SHA for a file named %q", filename)
+ } else if sha != sum {
+ return ver, fmt.Errorf("sha256 sum does not match for %s: %q != %q", filename, sha, sum)
+ }
+ ver.FileHash = sum
+ ver.FileName = filename
+
+ // TODO: when image signing is added, verify that here.
+
+ return ver, nil
+}
+
+// verifySignature verifies that the given block is validly signed, and returns the signer.
+func (s *Signatory) verifySignature(block *clearsign.Block) (*openpgp.Entity, error) {
+ return openpgp.CheckDetachedSignature(
+ s.KeyRing,
+ bytes.NewReader(block.Bytes),
+ block.ArmoredSignature.Body,
+ &defaultPGPConfig,
+ )
+}
+
+// messageBlock creates a message block from archive data and pre-marshalled metadata
+func messageBlock(archiveData []byte, filename string, metadataBytes []byte) (*bytes.Buffer, error) {
+ // Checksum the archive data
+ chash, err := Digest(bytes.NewBuffer(archiveData))
+ if err != nil {
+ return nil, err
+ }
+
+ sums := &SumCollection{
+ Files: map[string]string{
+ filename: "sha256:" + chash,
+ },
+ }
+
+ // Buffer the metadata + checksums YAML file
+ // FIXME: YAML uses ---\n as a file start indicator, but this is not legal in a PGP
+ // clearsign block. So we use ...\n, which is the YAML document end marker.
+ // http://yaml.org/spec/1.2/spec.html#id2800168
+ b := bytes.NewBuffer(metadataBytes)
+ b.WriteString("\n...\n")
+
+ data, err := yaml.Marshal(sums)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(data)
+
+ return b, nil
+}
+
+// parseMessageBlock parses a message block and returns only checksums (metadata ignored like upstream)
+func parseMessageBlock(data []byte) (*SumCollection, error) {
+ sc := &SumCollection{}
+
+ // We ignore metadata, just like upstream - only need checksums for verification
+ if err := ParseMessageBlock(data, nil, sc); err != nil {
+ return sc, err
+ }
+ return sc, nil
+}
+
+// ParseMessageBlock parses a message block containing metadata and checksums.
+//
+// This is the generic version that can work with any metadata type.
+// The metadata parameter should be a pointer to a struct that can be unmarshaled from YAML.
+func ParseMessageBlock(data []byte, metadata interface{}, sums *SumCollection) error {
+ parts := bytes.Split(data, []byte("\n...\n"))
+ if len(parts) < 2 {
+ return errors.New("message block must have at least two parts")
+ }
+
+ if metadata != nil {
+ if err := yaml.Unmarshal(parts[0], metadata); err != nil {
+ return err
+ }
+ }
+ return yaml.Unmarshal(parts[1], sums)
+}
+
+// loadKey loads a GPG key found at a particular path.
+func loadKey(keypath string) (*openpgp.Entity, error) {
+ f, err := os.Open(keypath)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ pr := packet.NewReader(f)
+ return openpgp.ReadEntity(pr)
+}
+
+func loadKeyRing(ringpath string) (openpgp.EntityList, error) {
+ f, err := os.Open(ringpath)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return openpgp.ReadKeyRing(f)
+}
+
+// DigestFile calculates a SHA256 hash (like Docker) for a given file.
+//
+// It takes the path to the archive file, and returns a string representation of
+// the SHA256 sum.
+//
+// This function can be used to generate a sum of any package archive file.
+func DigestFile(filename string) (string, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ return Digest(f)
+}
+
+// Digest hashes a reader and returns a SHA256 digest.
+//
+// Helm uses SHA256 as its default hash for all non-cryptographic applications.
+func Digest(in io.Reader) (string, error) {
+ hash := crypto.SHA256.New()
+ if _, err := io.Copy(hash, in); err != nil {
+ return "", nil
+ }
+ return hex.EncodeToString(hash.Sum(nil)), nil
+}
diff --git a/helm/pkg/provenance/sign_test.go b/helm/pkg/provenance/sign_test.go
new file mode 100644
index 000000000..1985e9eea
--- /dev/null
+++ b/helm/pkg/provenance/sign_test.go
@@ -0,0 +1,419 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package provenance
+
+import (
+ "crypto"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ pgperrors "github.com/ProtonMail/go-crypto/openpgp/errors" //nolint
+ "github.com/ProtonMail/go-crypto/openpgp/packet" //nolint
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+)
+
+const (
+ // testKeyFile is the secret key.
+ // Generating keys should be done with `gpg --gen-key`. The current key
+ // was generated to match Go's defaults (RSA/RSA 2048). It has no pass
+ // phrase. Use `gpg --export-secret-keys helm-test` to export the secret.
+ testKeyfile = "testdata/helm-test-key.secret"
+
+ // testPasswordKeyfile is a keyfile with a password.
+ testPasswordKeyfile = "testdata/helm-password-key.secret"
+
+ // testPubfile is the public key file.
+ // Use `gpg --export helm-test` to export the public key.
+ testPubfile = "testdata/helm-test-key.pub"
+
+ // Generated name for the PGP key in testKeyFile.
+ testKeyName = `Helm Testing (This key should only be used for testing. DO NOT TRUST.) `
+
+ testPasswordKeyName = `password key (fake) `
+
+ testChartfile = "testdata/hashtest-1.2.3.tgz"
+
+ // testSigBlock points to a signature generated by an external tool.
+ // This file was generated with GnuPG:
+ // gpg --clearsign -u helm-test --openpgp testdata/msgblock.yaml
+ testSigBlock = "testdata/msgblock.yaml.asc"
+
+ // testTamperedSigBlock is a tampered copy of msgblock.yaml.asc
+ testTamperedSigBlock = "testdata/msgblock.yaml.tampered"
+
+ // testMixedKeyring points to a keyring containing RSA and ed25519 keys.
+ testMixedKeyring = "testdata/helm-mixed-keyring.pub"
+
+ // testSumfile points to a SHA256 sum generated by an external tool.
+ // We always want to validate against an external tool's representation to
+ // verify that we haven't done something stupid. This file was generated
+ // with shasum.
+ // shasum -a 256 hashtest-1.2.3.tgz > testdata/hashtest.sha256
+ testSumfile = "testdata/hashtest.sha256"
+)
+
+// testMessageBlock represents the expected message block for the testdata/hashtest chart.
+const testMessageBlock = `apiVersion: v1
+description: Test chart versioning
+name: hashtest
+version: 1.2.3
+
+...
+files:
+ hashtest-1.2.3.tgz: sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888
+`
+
+// loadChartMetadataForSigning is a test helper that loads chart metadata and marshals it to YAML bytes
+func loadChartMetadataForSigning(t *testing.T, chartPath string) []byte {
+ t.Helper()
+
+ chart, err := loader.LoadFile(chartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ metadataBytes, err := yaml.Marshal(chart.Metadata)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return metadataBytes
+}
+
+func TestMessageBlock(t *testing.T) {
+ metadataBytes := loadChartMetadataForSigning(t, testChartfile)
+
+ // Read the chart file data
+ archiveData, err := os.ReadFile(testChartfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ out, err := messageBlock(archiveData, filepath.Base(testChartfile), metadataBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := out.String()
+
+ if got != testMessageBlock {
+ t.Errorf("Expected:\n%q\nGot\n%q\n", testMessageBlock, got)
+ }
+}
+
+func TestParseMessageBlock(t *testing.T) {
+ sc, err := parseMessageBlock([]byte(testMessageBlock))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // parseMessageBlock only returns checksums, not metadata (like upstream)
+
+ if lsc := len(sc.Files); lsc != 1 {
+ t.Errorf("Expected 1 file, got %d", lsc)
+ }
+
+ if hash, ok := sc.Files["hashtest-1.2.3.tgz"]; !ok {
+ t.Errorf("hashtest file not found in Files")
+ } else if hash != "sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888" {
+ t.Errorf("Unexpected hash: %q", hash)
+ }
+}
+
+func TestLoadKey(t *testing.T) {
+ k, err := loadKey(testKeyfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, ok := k.Identities[testKeyName]; !ok {
+ t.Errorf("Expected to load a key for user %q", testKeyName)
+ }
+}
+
+func TestLoadKeyRing(t *testing.T) {
+ k, err := loadKeyRing(testPubfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(k) > 1 {
+ t.Errorf("Expected 1, got %d", len(k))
+ }
+
+ for _, e := range k {
+ if ii, ok := e.Identities[testKeyName]; !ok {
+ t.Errorf("Expected %s in %v", testKeyName, ii)
+ }
+ }
+}
+
+func TestDigest(t *testing.T) {
+ f, err := os.Open(testChartfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ hash, err := Digest(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sig, err := readSumFile(testSumfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !strings.Contains(sig, hash) {
+ t.Errorf("Expected %s to be in %s", hash, sig)
+ }
+}
+
+func TestNewFromFiles(t *testing.T) {
+ s, err := NewFromFiles(testKeyfile, testPubfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, ok := s.Entity.Identities[testKeyName]; !ok {
+ t.Errorf("Expected to load a key for user %q", testKeyName)
+ }
+}
+
+func TestDigestFile(t *testing.T) {
+ hash, err := DigestFile(testChartfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sig, err := readSumFile(testSumfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !strings.Contains(sig, hash) {
+ t.Errorf("Expected %s to be in %s", hash, sig)
+ }
+}
+
+func TestDecryptKey(t *testing.T) {
+ k, err := NewFromKeyring(testPasswordKeyfile, testPasswordKeyName)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !k.Entity.PrivateKey.Encrypted {
+ t.Fatal("Key is not encrypted")
+ }
+
+ // We give this a simple callback that returns the password.
+ if err := k.DecryptKey(func(_ string) ([]byte, error) {
+ return []byte("secret"), nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Re-read the key (since we already unlocked it)
+ k, err = NewFromKeyring(testPasswordKeyfile, testPasswordKeyName)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Now we give it a bogus password.
+ if err := k.DecryptKey(func(_ string) ([]byte, error) {
+ return []byte("secrets_and_lies"), nil
+ }); err == nil {
+ t.Fatal("Expected an error when giving a bogus passphrase")
+ }
+}
+
+func TestClearSign(t *testing.T) {
+ signer, err := NewFromFiles(testKeyfile, testPubfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ metadataBytes := loadChartMetadataForSigning(t, testChartfile)
+
+ // Read the chart file data
+ archiveData, err := os.ReadFile(testChartfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sig, err := signer.ClearSign(archiveData, filepath.Base(testChartfile), metadataBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("Sig:\n%s", sig)
+
+ if !strings.Contains(sig, testMessageBlock) {
+ t.Errorf("expected message block to be in sig: %s", sig)
+ }
+}
+
+func TestMixedKeyringRSASigningAndVerification(t *testing.T) {
+ signer, err := NewFromFiles(testKeyfile, testMixedKeyring)
+ require.NoError(t, err)
+
+ require.NotEmpty(t, signer.KeyRing, "expected signer keyring to be loaded")
+
+ hasEdDSA := false
+ for _, entity := range signer.KeyRing {
+ if entity.PrimaryKey != nil && entity.PrimaryKey.PubKeyAlgo == packet.PubKeyAlgoEdDSA {
+ hasEdDSA = true
+ break
+ }
+
+ for _, subkey := range entity.Subkeys {
+ if subkey.PublicKey != nil && subkey.PublicKey.PubKeyAlgo == packet.PubKeyAlgoEdDSA {
+ hasEdDSA = true
+ break
+ }
+ }
+
+ if hasEdDSA {
+ break
+ }
+ }
+
+ assert.True(t, hasEdDSA, "expected %s to include an Ed25519 public key", testMixedKeyring)
+
+ require.NotNil(t, signer.Entity, "expected signer entity to be loaded")
+ require.NotNil(t, signer.Entity.PrivateKey, "expected signer private key to be loaded")
+ assert.Equal(t, packet.PubKeyAlgoRSA, signer.Entity.PrivateKey.PubKeyAlgo, "expected RSA key")
+
+ metadataBytes := loadChartMetadataForSigning(t, testChartfile)
+
+ archiveData, err := os.ReadFile(testChartfile)
+ require.NoError(t, err)
+
+ sig, err := signer.ClearSign(archiveData, filepath.Base(testChartfile), metadataBytes)
+ require.NoError(t, err, "failed to sign chart")
+
+ verification, err := signer.Verify(archiveData, []byte(sig), filepath.Base(testChartfile))
+ require.NoError(t, err, "failed to verify chart signature")
+
+ require.NotNil(t, verification.SignedBy, "expected verification to include signer")
+ require.NotNil(t, verification.SignedBy.PrimaryKey, "expected verification to include signer primary key")
+ assert.Equal(t, packet.PubKeyAlgoRSA, verification.SignedBy.PrimaryKey.PubKeyAlgo, "expected verification to report RSA key")
+
+ _, ok := verification.SignedBy.Identities[testKeyName]
+ assert.True(t, ok, "expected verification to be signed by %q", testKeyName)
+}
+
+// failSigner always fails to sign and returns an error
+type failSigner struct{}
+
+func (s failSigner) Public() crypto.PublicKey {
+ return nil
+}
+
+func (s failSigner) Sign(_ io.Reader, _ []byte, _ crypto.SignerOpts) ([]byte, error) {
+ return nil, fmt.Errorf("always fails")
+}
+
+func TestClearSignError(t *testing.T) {
+ signer, err := NewFromFiles(testKeyfile, testPubfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // ensure that signing always fails
+ signer.Entity.PrivateKey.PrivateKey = failSigner{}
+
+ metadataBytes := loadChartMetadataForSigning(t, testChartfile)
+
+ // Read the chart file data
+ archiveData, err := os.ReadFile(testChartfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sig, err := signer.ClearSign(archiveData, filepath.Base(testChartfile), metadataBytes)
+ if err == nil {
+ t.Fatal("didn't get an error from ClearSign but expected one")
+ }
+
+ if sig != "" {
+ t.Fatalf("expected an empty signature after failed ClearSign but got %q", sig)
+ }
+}
+
+func TestVerify(t *testing.T) {
+ signer, err := NewFromFiles(testKeyfile, testPubfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the chart file data
+ archiveData, err := os.ReadFile(testChartfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the signature file data
+ sigData, err := os.ReadFile(testSigBlock)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if ver, err := signer.Verify(archiveData, sigData, filepath.Base(testChartfile)); err != nil {
+ t.Errorf("Failed to pass verify. Err: %s", err)
+ } else if len(ver.FileHash) == 0 {
+ t.Error("Verification is missing hash.")
+ } else if ver.SignedBy == nil {
+ t.Error("No SignedBy field")
+ } else if ver.FileName != filepath.Base(testChartfile) {
+ t.Errorf("FileName is unexpectedly %q", ver.FileName)
+ }
+
+ // Read the tampered signature file data
+ tamperedSigData, err := os.ReadFile(testTamperedSigBlock)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = signer.Verify(archiveData, tamperedSigData, filepath.Base(testChartfile)); err == nil {
+ t.Errorf("Expected %s to fail.", testTamperedSigBlock)
+ }
+
+ switch err.(type) {
+ case pgperrors.SignatureError:
+ t.Logf("Tampered sig block error: %s (%T)", err, err)
+ default:
+ t.Errorf("Expected invalid signature error, got %q (%T)", err, err)
+ }
+}
+
+// readSumFile reads a file containing a sum generated by the UNIX shasum tool.
+func readSumFile(sumfile string) (string, error) {
+ data, err := os.ReadFile(sumfile)
+ if err != nil {
+ return "", err
+ }
+
+ sig := string(data)
+ parts := strings.SplitN(sig, " ", 2)
+ return parts[0], nil
+}
diff --git a/helm/pkg/provenance/testdata/hashtest-1.2.3.tgz b/helm/pkg/provenance/testdata/hashtest-1.2.3.tgz
new file mode 100644
index 000000000..7bbc533ca
Binary files /dev/null and b/helm/pkg/provenance/testdata/hashtest-1.2.3.tgz differ
diff --git a/helm/pkg/provenance/testdata/hashtest-1.2.3.tgz.prov b/helm/pkg/provenance/testdata/hashtest-1.2.3.tgz.prov
new file mode 100755
index 000000000..3a788cd2e
--- /dev/null
+++ b/helm/pkg/provenance/testdata/hashtest-1.2.3.tgz.prov
@@ -0,0 +1,21 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA512
+
+apiVersion: v1
+description: Test chart versioning
+name: hashtest
+version: 1.2.3
+
+...
+files:
+ hashtest-1.2.3.tgz: sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888
+-----BEGIN PGP SIGNATURE-----
+
+wsBcBAEBCgAQBQJcon2ICRCEO7+YH8GHYgAASEAIAHD4Rad+LF47qNydI+k7x3aC
+/qkdsqxE9kCUHtTJkZObE/Zmj2w3Opq0gcQftz4aJ2G9raqPDvwOzxnTxOkGfUdK
+qIye48gFHzr2a7HnMTWr+HLQc4Gg+9kysIwkW4TM8wYV10osysYjBrhcafrHzFSK
+791dBHhXP/aOrJQbFRob0GRFQ4pXdaSww1+kVaZLiKSPkkMKt9uk9Po1ggJYSIDX
+uzXNcr78jTWACqkAtwx8+CJ8yzcGeuXSVNABDgbmAgpY0YT+Bz/UOWq4Q7tyuWnS
+x9BKrvcb+Gc/6S0oK0Ffp8K4iSWYp79uH1bZ2oBS1yajA0c5h5i7qI3N4cabREw=
+=YgnR
+-----END PGP SIGNATURE-----
\ No newline at end of file
diff --git a/helm/pkg/provenance/testdata/hashtest.sha256 b/helm/pkg/provenance/testdata/hashtest.sha256
new file mode 100644
index 000000000..05173edf8
--- /dev/null
+++ b/helm/pkg/provenance/testdata/hashtest.sha256
@@ -0,0 +1 @@
+c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888 hashtest-1.2.3.tgz
diff --git a/helm/pkg/provenance/testdata/hashtest/.helmignore b/helm/pkg/provenance/testdata/hashtest/.helmignore
new file mode 100644
index 000000000..435b756d8
--- /dev/null
+++ b/helm/pkg/provenance/testdata/hashtest/.helmignore
@@ -0,0 +1,5 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+.git
diff --git a/helm/pkg/provenance/testdata/hashtest/Chart.yaml b/helm/pkg/provenance/testdata/hashtest/Chart.yaml
new file mode 100644
index 000000000..6edf5f8b6
--- /dev/null
+++ b/helm/pkg/provenance/testdata/hashtest/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: Test chart versioning
+name: hashtest
+version: 1.2.3
diff --git a/helm/pkg/provenance/testdata/hashtest/values.yaml b/helm/pkg/provenance/testdata/hashtest/values.yaml
new file mode 100644
index 000000000..0827a01fb
--- /dev/null
+++ b/helm/pkg/provenance/testdata/hashtest/values.yaml
@@ -0,0 +1,4 @@
+# Default values for hashtest.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
diff --git a/helm/pkg/provenance/testdata/helm-mixed-keyring.pub b/helm/pkg/provenance/testdata/helm-mixed-keyring.pub
new file mode 100644
index 000000000..7985bd20f
Binary files /dev/null and b/helm/pkg/provenance/testdata/helm-mixed-keyring.pub differ
diff --git a/helm/pkg/provenance/testdata/helm-password-key.secret b/helm/pkg/provenance/testdata/helm-password-key.secret
new file mode 100644
index 000000000..03c8aa583
Binary files /dev/null and b/helm/pkg/provenance/testdata/helm-password-key.secret differ
diff --git a/helm/pkg/provenance/testdata/helm-test-key.pub b/helm/pkg/provenance/testdata/helm-test-key.pub
new file mode 100644
index 000000000..38714f25a
Binary files /dev/null and b/helm/pkg/provenance/testdata/helm-test-key.pub differ
diff --git a/helm/pkg/provenance/testdata/helm-test-key.secret b/helm/pkg/provenance/testdata/helm-test-key.secret
new file mode 100644
index 000000000..a966aef93
Binary files /dev/null and b/helm/pkg/provenance/testdata/helm-test-key.secret differ
diff --git a/helm/pkg/provenance/testdata/msgblock.yaml b/helm/pkg/provenance/testdata/msgblock.yaml
new file mode 100644
index 000000000..c16293ffc
--- /dev/null
+++ b/helm/pkg/provenance/testdata/msgblock.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+description: Test chart versioning
+name: hashtest
+version: 1.2.3
+
+...
+files:
+ hashtest-1.2.3.tgz: sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888
diff --git a/helm/pkg/provenance/testdata/msgblock.yaml.asc b/helm/pkg/provenance/testdata/msgblock.yaml.asc
new file mode 100644
index 000000000..b4187b742
--- /dev/null
+++ b/helm/pkg/provenance/testdata/msgblock.yaml.asc
@@ -0,0 +1,22 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA512
+
+apiVersion: v1
+description: Test chart versioning
+name: hashtest
+version: 1.2.3
+
+...
+files:
+ hashtest-1.2.3.tgz: sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888
+-----BEGIN PGP SIGNATURE-----
+
+iQFJBAEBCgAzFiEEXmFTibU8o38O5gvThDu/mB/Bh2IFAlyiiDcVHGhlbG0tdGVz
+dGluZ0BoZWxtLnNoAAoJEIQ7v5gfwYdiILAH/2f3GMVh+ZY5a+szOBudcuivjTcz
+0Im1MwWQZfB1po3Yu7smWZbf5tJCzvVpYtvRlfa0nguuIh763MwOh9Q7dBXOLAxm
+VCxqHm3svnNenBNfOpIygaMTgMZKxI4RrsKBgwPOTmlNtKg2lVaCiJAI30TXE6bB
+/DwEYX0wmTssrAcSpTzOOSC+zHnPKew+5A3SY3ms+gAtVAcLepmJjI7RS7RhQxDl
+AG+rWYis5gpDrk3U9OG1EOxqbftOAMqUl/kwI9eu5cPouN85rWwMe5pvHAvuyr/y
+caYdlXDHTZsXmBuvfiUX6gqXtrpPCyKTCP+RzNf3+bXJM8m3u3gbMjGvKjU=
+=vHcU
+-----END PGP SIGNATURE-----
diff --git a/helm/pkg/provenance/testdata/msgblock.yaml.tampered b/helm/pkg/provenance/testdata/msgblock.yaml.tampered
new file mode 100644
index 000000000..f15811bb2
--- /dev/null
+++ b/helm/pkg/provenance/testdata/msgblock.yaml.tampered
@@ -0,0 +1,21 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA512
+
+description: Test chart versioning
+name: hashtest
+version: 1.2.3+tampered
+
+...
+files:
+ hashtest-1.2.3.tgz: sha256:8e90e879e2a04b1900570e1c198755e46e4706d70b0e79f5edabfac7900e4e75
+-----BEGIN PGP SIGNATURE-----
+Comment: GPGTools - https://gpgtools.org
+
+iQEcBAEBCgAGBQJXlp8KAAoJEIQ7v5gfwYdiE7sIAJYDiza+asekeooSXLvQiK+G
+PKnveqQpx49EZ6L7Y7UlW25SyH8EjXXHeJysDywCXF3w4luxN9n56ffU0KEW11IY
+F+JSjmgIWLS6ti7ZAGEi6JInQ/30rOAIpTEBRBL2IueW3m63mezrGK6XkBlGqpor
+C9WKeqLi+DWlMoBtsEy3Uk0XP6pn/qBFICYAbLQQU0sCCUT8CBA8f8aidxi7aw9t
+i404yYF+Dvc6i4JlSG77SV0ZJBWllUvsWoCd9Jli0NAuaMqmE7mzcEt/dE+Fm2Ql
+Bx3tr1WS4xTRiFQdcOttOl93H+OaHTh+Y0qqLTzzpCvqmttG0HfI6lMeCs7LeyA=
+=vEK+
+-----END PGP SIGNATURE-----
diff --git a/helm/pkg/provenance/testdata/regen-hashtest.sh b/helm/pkg/provenance/testdata/regen-hashtest.sh
new file mode 100755
index 000000000..4381fd0b1
--- /dev/null
+++ b/helm/pkg/provenance/testdata/regen-hashtest.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+helm package hashtest
+shasum -a 256 hashtest-1.2.3.tgz > hashtest.sha256
diff --git a/helm/pkg/pusher/doc.go b/helm/pkg/pusher/doc.go
new file mode 100644
index 000000000..df89ab112
--- /dev/null
+++ b/helm/pkg/pusher/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package pusher provides a generalized tool for uploading data by scheme.
+This provides a method by which the plugin system can load arbitrary protocol
+handlers based upon a URL scheme.
+*/
+package pusher
diff --git a/helm/pkg/pusher/ocipusher.go b/helm/pkg/pusher/ocipusher.go
new file mode 100644
index 000000000..f03188391
--- /dev/null
+++ b/helm/pkg/pusher/ocipusher.go
@@ -0,0 +1,160 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pusher
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "net"
+ "net/http"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "helm.sh/helm/v4/internal/tlsutil"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+// OCIPusher is the default OCI backend handler
+type OCIPusher struct {
+ opts options
+}
+
+// Push performs a Push from repo.Pusher.
+func (pusher *OCIPusher) Push(chartRef, href string, options ...Option) error {
+ for _, opt := range options {
+ opt(&pusher.opts)
+ }
+ return pusher.push(chartRef, href)
+}
+
+func (pusher *OCIPusher) push(chartRef, href string) error {
+ stat, err := os.Stat(chartRef)
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ return fmt.Errorf("%s: no such file", chartRef)
+ }
+ return err
+ }
+ if stat.IsDir() {
+ return errors.New("cannot push directory, must provide chart archive (.tgz)")
+ }
+
+ meta, err := loader.Load(chartRef)
+ if err != nil {
+ return err
+ }
+
+ client := pusher.opts.registryClient
+ if client == nil {
+ c, err := pusher.newRegistryClient()
+ if err != nil {
+ return err
+ }
+ client = c
+ }
+
+ chartBytes, err := os.ReadFile(chartRef)
+ if err != nil {
+ return err
+ }
+
+ var pushOpts []registry.PushOption
+ provRef := fmt.Sprintf("%s.prov", chartRef)
+ if _, err := os.Stat(provRef); err == nil {
+ provBytes, err := os.ReadFile(provRef)
+ if err != nil {
+ return err
+ }
+ pushOpts = append(pushOpts, registry.PushOptProvData(provBytes))
+ }
+
+ ref := fmt.Sprintf("%s:%s",
+ path.Join(strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme)), meta.Metadata.Name),
+ meta.Metadata.Version)
+
+ // The time the chart was "created" is semantically the time the chart archive file was last written(modified)
+ chartArchiveFileCreatedTime := stat.ModTime()
+ pushOpts = append(pushOpts, registry.PushOptCreationTime(chartArchiveFileCreatedTime.Format(time.RFC3339)))
+
+ _, err = client.Push(chartBytes, ref, pushOpts...)
+ return err
+}
+
+// NewOCIPusher constructs a valid OCI client as a Pusher
+func NewOCIPusher(ops ...Option) (Pusher, error) {
+ var client OCIPusher
+
+ for _, opt := range ops {
+ opt(&client.opts)
+ }
+
+ return &client, nil
+}
+
+func (pusher *OCIPusher) newRegistryClient() (*registry.Client, error) {
+ if (pusher.opts.certFile != "" && pusher.opts.keyFile != "") || pusher.opts.caFile != "" || pusher.opts.insecureSkipTLSVerify {
+ tlsConf, err := tlsutil.NewTLSConfig(
+ tlsutil.WithInsecureSkipVerify(pusher.opts.insecureSkipTLSVerify),
+ tlsutil.WithCertKeyPairFiles(pusher.opts.certFile, pusher.opts.keyFile),
+ tlsutil.WithCAFile(pusher.opts.caFile),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("can't create TLS config for client: %w", err)
+ }
+
+ registryClient, err := registry.NewClient(
+ registry.ClientOptHTTPClient(&http.Client{
+ // From https://github.com/google/go-containerregistry/blob/31786c6cbb82d6ec4fb8eb79cd9387905130534e/pkg/v1/remote/options.go#L87
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ // By default we wrap the transport in retries, so reduce the
+ // default dial timeout to 5s to avoid 5x 30s of connection
+ // timeouts when doing the "ping" on certain http registries.
+ Timeout: 5 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ TLSClientConfig: tlsConf,
+ },
+ }),
+ registry.ClientOptEnableCache(true),
+ )
+ if err != nil {
+ return nil, err
+ }
+ return registryClient, nil
+ }
+
+ opts := []registry.ClientOption{registry.ClientOptEnableCache(true)}
+ if pusher.opts.plainHTTP {
+ opts = append(opts, registry.ClientOptPlainHTTP())
+ }
+
+ registryClient, err := registry.NewClient(opts...)
+ if err != nil {
+ return nil, err
+ }
+ return registryClient, nil
+}
diff --git a/helm/pkg/pusher/ocipusher_test.go b/helm/pkg/pusher/ocipusher_test.go
new file mode 100644
index 000000000..b7d362681
--- /dev/null
+++ b/helm/pkg/pusher/ocipusher_test.go
@@ -0,0 +1,428 @@
+//go:build !windows
+
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pusher
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+func TestNewOCIPusher(t *testing.T) {
+ p, err := NewOCIPusher()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, ok := p.(*OCIPusher); !ok {
+ t.Fatal("Expected NewOCIPusher to produce an *OCIPusher")
+ }
+
+ cd := "../../testdata"
+ join := filepath.Join
+ ca, pub, priv := join(cd, "rootca.crt"), join(cd, "crt.pem"), join(cd, "key.pem")
+ insecureSkipTLSVerify := false
+ plainHTTP := false
+
+ // Test with options
+ p, err = NewOCIPusher(
+ WithTLSClientConfig(pub, priv, ca),
+ WithInsecureSkipTLSVerify(insecureSkipTLSVerify),
+ WithPlainHTTP(plainHTTP),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ op, ok := p.(*OCIPusher)
+ if !ok {
+ t.Fatal("Expected NewOCIPusher to produce an *OCIPusher")
+ }
+
+ if op.opts.certFile != pub {
+ t.Errorf("Expected NewOCIPusher to contain %q as the public key file, got %q", pub, op.opts.certFile)
+ }
+
+ if op.opts.keyFile != priv {
+ t.Errorf("Expected NewOCIPusher to contain %q as the private key file, got %q", priv, op.opts.keyFile)
+ }
+
+ if op.opts.caFile != ca {
+ t.Errorf("Expected NewOCIPusher to contain %q as the CA file, got %q", ca, op.opts.caFile)
+ }
+
+ if op.opts.plainHTTP != plainHTTP {
+ t.Errorf("Expected NewOCIPusher to have plainHTTP as %t, got %t", plainHTTP, op.opts.plainHTTP)
+ }
+
+ if op.opts.insecureSkipTLSVerify != insecureSkipTLSVerify {
+ t.Errorf("Expected NewOCIPusher to have insecureSkipVerifyTLS as %t, got %t", insecureSkipTLSVerify, op.opts.insecureSkipTLSVerify)
+ }
+
+ // Test if setting registryClient is being passed to the ops
+ registryClient, err := registry.NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ p, err = NewOCIPusher(
+ WithRegistryClient(registryClient),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ op, ok = p.(*OCIPusher)
+ if !ok {
+ t.Fatal("expected NewOCIPusher to produce an *OCIPusher")
+ }
+
+ if op.opts.registryClient != registryClient {
+ t.Errorf("Expected NewOCIPusher to contain %p as RegistryClient, got %p", registryClient, op.opts.registryClient)
+ }
+}
+
+func TestOCIPusher_Push_ErrorHandling(t *testing.T) {
+ tests := []struct {
+ name string
+ chartRef string
+ expectedError string
+ setupFunc func() string
+ }{
+ {
+ name: "non-existent file",
+ chartRef: "/non/existent/file.tgz",
+ expectedError: "no such file",
+ },
+ {
+ name: "directory instead of file",
+ expectedError: "cannot push directory, must provide chart archive (.tgz)",
+ setupFunc: func() string {
+ tempDir := t.TempDir()
+ return tempDir
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ pusher, err := NewOCIPusher()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ chartRef := tt.chartRef
+ if tt.setupFunc != nil {
+ chartRef = tt.setupFunc()
+ }
+
+ err = pusher.Push(chartRef, "oci://localhost:5000/test")
+ if err == nil {
+ t.Fatal("Expected error but got none")
+ }
+
+ if !strings.Contains(err.Error(), tt.expectedError) {
+ t.Errorf("Expected error containing %q, got %q", tt.expectedError, err.Error())
+ }
+ })
+ }
+}
+
+func TestOCIPusher_newRegistryClient(t *testing.T) {
+ cd := "../../testdata"
+ join := filepath.Join
+ ca, pub, priv := join(cd, "rootca.crt"), join(cd, "crt.pem"), join(cd, "key.pem")
+
+ tests := []struct {
+ name string
+ opts []Option
+ expectError bool
+ errorContains string
+ }{
+ {
+ name: "plain HTTP",
+ opts: []Option{WithPlainHTTP(true)},
+ },
+ {
+ name: "with TLS client config",
+ opts: []Option{
+ WithTLSClientConfig(pub, priv, ca),
+ },
+ },
+ {
+ name: "with insecure skip TLS verify",
+ opts: []Option{
+ WithInsecureSkipTLSVerify(true),
+ },
+ },
+ {
+ name: "with cert and key only",
+ opts: []Option{
+ WithTLSClientConfig(pub, priv, ""),
+ },
+ },
+ {
+ name: "with CA file only",
+ opts: []Option{
+ WithTLSClientConfig("", "", ca),
+ },
+ },
+ {
+ name: "default client without options",
+ opts: []Option{},
+ },
+ {
+ name: "invalid cert file",
+ opts: []Option{
+ WithTLSClientConfig("/non/existent/cert.pem", priv, ca),
+ },
+ expectError: true,
+ errorContains: "can't create TLS config",
+ },
+ {
+ name: "invalid key file",
+ opts: []Option{
+ WithTLSClientConfig(pub, "/non/existent/key.pem", ca),
+ },
+ expectError: true,
+ errorContains: "can't create TLS config",
+ },
+ {
+ name: "invalid CA file",
+ opts: []Option{
+ WithTLSClientConfig("", "", "/non/existent/ca.crt"),
+ },
+ expectError: true,
+ errorContains: "can't create TLS config",
+ },
+ {
+ name: "combined TLS options",
+ opts: []Option{
+ WithTLSClientConfig(pub, priv, ca),
+ WithInsecureSkipTLSVerify(true),
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ pusher, err := NewOCIPusher(tt.opts...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ op, ok := pusher.(*OCIPusher)
+ if !ok {
+ t.Fatal("Expected *OCIPusher")
+ }
+
+ client, err := op.newRegistryClient()
+ if tt.expectError {
+ if err == nil {
+ t.Fatal("Expected error but got none")
+ }
+ if tt.errorContains != "" && !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("Expected error containing %q, got %q", tt.errorContains, err.Error())
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if client == nil {
+ t.Fatal("Expected non-nil registry client")
+ }
+ }
+ })
+ }
+}
+
+func TestOCIPusher_Push_ChartOperations(t *testing.T) {
+ // Path to test charts
+ chartPath := "../../pkg/cmd/testdata/testcharts/compressedchart-0.1.0.tgz"
+ chartWithProvPath := "../../pkg/cmd/testdata/testcharts/signtest-0.1.0.tgz"
+
+ tests := []struct {
+ name string
+ chartRef string
+ href string
+ options []Option
+ setupFunc func(t *testing.T) (string, func())
+ expectError bool
+ errorContains string
+ }{
+ {
+ name: "invalid chart file",
+ chartRef: "../../pkg/action/testdata/charts/corrupted-compressed-chart.tgz",
+ href: "oci://localhost:5000/test",
+ expectError: true,
+ errorContains: "does not appear to be a gzipped archive",
+ },
+ {
+ name: "chart read error",
+ setupFunc: func(t *testing.T) (string, func()) {
+ t.Helper()
+ // Create a valid chart file that we'll make unreadable
+ tempDir := t.TempDir()
+ tempChart := filepath.Join(tempDir, "temp-chart.tgz")
+
+ // Copy a valid chart
+ src, err := os.Open(chartPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer src.Close()
+
+ dst, err := os.Create(tempChart)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := io.Copy(dst, src); err != nil {
+ t.Fatal(err)
+ }
+ dst.Close()
+
+ // Make the file unreadable
+ if err := os.Chmod(tempChart, 0000); err != nil {
+ t.Fatal(err)
+ }
+
+ return tempChart, func() {
+ os.Chmod(tempChart, 0644) // Restore permissions for cleanup
+ }
+ },
+ href: "oci://localhost:5000/test",
+ expectError: true,
+ errorContains: "permission denied",
+ },
+ {
+ name: "push with provenance file - loading phase",
+ chartRef: chartWithProvPath,
+ href: "oci://registry.example.com/charts",
+ setupFunc: func(t *testing.T) (string, func()) {
+ t.Helper()
+ // Copy chart and create a .prov file for it
+ tempDir := t.TempDir()
+ tempChart := filepath.Join(tempDir, "signtest-0.1.0.tgz")
+ tempProv := filepath.Join(tempDir, "signtest-0.1.0.tgz.prov")
+
+ // Copy chart file
+ src, err := os.Open(chartWithProvPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer src.Close()
+
+ dst, err := os.Create(tempChart)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := io.Copy(dst, src); err != nil {
+ t.Fatal(err)
+ }
+ dst.Close()
+
+ // Create provenance file
+ if err := os.WriteFile(tempProv, []byte("test provenance data"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ return tempChart, func() {}
+ },
+ expectError: true, // Will fail at the registry push step
+ errorContains: "", // Error depends on registry client behavior
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ chartRef := tt.chartRef
+ var cleanup func()
+
+ if tt.setupFunc != nil {
+ chartRef, cleanup = tt.setupFunc(t)
+ if cleanup != nil {
+ defer cleanup()
+ }
+ }
+
+ // Skip test if chart file doesn't exist and we're not expecting an error
+ if _, err := os.Stat(chartRef); err != nil && !tt.expectError {
+ t.Skipf("Test chart %s not found, skipping test", chartRef)
+ }
+
+ pusher, err := NewOCIPusher(tt.options...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = pusher.Push(chartRef, tt.href)
+
+ if tt.expectError {
+ if err == nil {
+ t.Fatal("Expected error but got none")
+ }
+ if tt.errorContains != "" && !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("Expected error containing %q, got %q", tt.errorContains, err.Error())
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ }
+ })
+ }
+}
+
+func TestOCIPusher_Push_MultipleOptions(t *testing.T) {
+ chartPath := "../../pkg/cmd/testdata/testcharts/compressedchart-0.1.0.tgz"
+
+ // Skip test if chart file doesn't exist
+ if _, err := os.Stat(chartPath); err != nil {
+ t.Skipf("Test chart %s not found, skipping test", chartPath)
+ }
+
+ pusher, err := NewOCIPusher()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Test that multiple options are applied correctly
+ err = pusher.Push(chartPath, "oci://localhost:5000/test",
+ WithPlainHTTP(true),
+ WithInsecureSkipTLSVerify(true),
+ )
+
+ // We expect an error since we're not actually pushing to a registry
+ if err == nil {
+ t.Fatal("Expected error when pushing without a valid registry")
+ }
+
+ // Verify options were applied
+ op := pusher.(*OCIPusher)
+ if !op.opts.plainHTTP {
+ t.Error("Expected plainHTTP option to be applied")
+ }
+ if !op.opts.insecureSkipTLSVerify {
+ t.Error("Expected insecureSkipTLSVerify option to be applied")
+ }
+}
diff --git a/helm/pkg/pusher/pusher.go b/helm/pkg/pusher/pusher.go
new file mode 100644
index 000000000..8ce78b011
--- /dev/null
+++ b/helm/pkg/pusher/pusher.go
@@ -0,0 +1,118 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pusher
+
+import (
+ "fmt"
+ "slices"
+
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+// options are generic parameters to be provided to the pusher during instantiation.
+//
+// Pushers may or may not ignore these parameters as they are passed in.
+type options struct {
+ registryClient *registry.Client
+ certFile string
+ keyFile string
+ caFile string
+ insecureSkipTLSVerify bool
+ plainHTTP bool
+}
+
+// Option allows specifying various settings configurable by the user for overriding the defaults
+// used when performing Push operations with the Pusher.
+type Option func(*options)
+
+// WithRegistryClient sets the registryClient option.
+func WithRegistryClient(client *registry.Client) Option {
+ return func(opts *options) {
+ opts.registryClient = client
+ }
+}
+
+// WithTLSClientConfig sets the client auth with the provided credentials.
+func WithTLSClientConfig(certFile, keyFile, caFile string) Option {
+ return func(opts *options) {
+ opts.certFile = certFile
+ opts.keyFile = keyFile
+ opts.caFile = caFile
+ }
+}
+
+// WithInsecureSkipTLSVerify determines if a TLS Certificate will be checked
+func WithInsecureSkipTLSVerify(insecureSkipTLSVerify bool) Option {
+ return func(opts *options) {
+ opts.insecureSkipTLSVerify = insecureSkipTLSVerify
+ }
+}
+
+func WithPlainHTTP(plainHTTP bool) Option {
+ return func(opts *options) {
+ opts.plainHTTP = plainHTTP
+ }
+}
+
+// Pusher is an interface to support upload to the specified URL.
+type Pusher interface {
+ // Push file content by url string
+ Push(chartRef, url string, options ...Option) error
+}
+
+// Constructor is the function for every pusher which creates a specific instance
+// according to the configuration
+type Constructor func(options ...Option) (Pusher, error)
+
+// Provider represents any pusher and the schemes that it supports.
+type Provider struct {
+ Schemes []string
+ New Constructor
+}
+
+// Provides returns true if the given scheme is supported by this Provider.
+func (p Provider) Provides(scheme string) bool {
+ return slices.Contains(p.Schemes, scheme)
+}
+
+// Providers is a collection of Provider objects.
+type Providers []Provider
+
+// ByScheme returns a Provider that handles the given scheme.
+//
+// If no provider handles this scheme, this will return an error.
+func (p Providers) ByScheme(scheme string) (Pusher, error) {
+ for _, pp := range p {
+ if pp.Provides(scheme) {
+ return pp.New()
+ }
+ }
+ return nil, fmt.Errorf("scheme %q not supported", scheme)
+}
+
+var ociProvider = Provider{
+ Schemes: []string{registry.OCIScheme},
+ New: NewOCIPusher,
+}
+
+// All finds all of the registered pushers as a list of Provider instances.
+// Currently, just the built-in pushers are collected.
+func All(_ *cli.EnvSettings) Providers {
+ result := Providers{ociProvider}
+ return result
+}
diff --git a/helm/pkg/pusher/pusher_test.go b/helm/pkg/pusher/pusher_test.go
new file mode 100644
index 000000000..71fab8694
--- /dev/null
+++ b/helm/pkg/pusher/pusher_test.go
@@ -0,0 +1,68 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pusher
+
+import (
+ "testing"
+
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+func TestProvider(t *testing.T) {
+ p := Provider{
+ []string{"one", "three"},
+ func(_ ...Option) (Pusher, error) { return nil, nil },
+ }
+
+ if !p.Provides("three") {
+ t.Error("Expected provider to provide three")
+ }
+}
+
+func TestProviders(t *testing.T) {
+ ps := Providers{
+ {[]string{"one", "three"}, func(_ ...Option) (Pusher, error) { return nil, nil }},
+ {[]string{"two", "four"}, func(_ ...Option) (Pusher, error) { return nil, nil }},
+ }
+
+ if _, err := ps.ByScheme("one"); err != nil {
+ t.Error(err)
+ }
+ if _, err := ps.ByScheme("four"); err != nil {
+ t.Error(err)
+ }
+
+ if _, err := ps.ByScheme("five"); err == nil {
+ t.Error("Did not expect handler for five")
+ }
+}
+
+func TestAll(t *testing.T) {
+ env := cli.New()
+ all := All(env)
+ if len(all) != 1 {
+ t.Errorf("expected 1 provider (OCI), got %d", len(all))
+ }
+}
+
+func TestByScheme(t *testing.T) {
+ env := cli.New()
+ g := All(env)
+ if _, err := g.ByScheme(registry.OCIScheme); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/helm/pkg/registry/chart.go b/helm/pkg/registry/chart.go
new file mode 100644
index 000000000..b00fc616d
--- /dev/null
+++ b/helm/pkg/registry/chart.go
@@ -0,0 +1,124 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry // import "helm.sh/helm/v4/pkg/registry"
+
+import (
+ "bytes"
+ "strings"
+ "time"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+var immutableOciAnnotations = []string{
+ ocispec.AnnotationVersion,
+ ocispec.AnnotationTitle,
+}
+
+// extractChartMeta is used to extract a chart metadata from a byte array
+func extractChartMeta(chartData []byte) (*chart.Metadata, error) {
+ ch, err := loader.LoadArchive(bytes.NewReader(chartData))
+ if err != nil {
+ return nil, err
+ }
+ return ch.Metadata, nil
+}
+
+// generateOCIAnnotations will generate OCI annotations to include within the OCI manifest
+func generateOCIAnnotations(meta *chart.Metadata, creationTime string) map[string]string {
+
+ // Get annotations from Chart attributes
+ ociAnnotations := generateChartOCIAnnotations(meta, creationTime)
+
+ // Copy Chart annotations
+annotations:
+ for chartAnnotationKey, chartAnnotationValue := range meta.Annotations {
+
+ // Avoid overriding key properties
+ for _, immutableOciKey := range immutableOciAnnotations {
+ if immutableOciKey == chartAnnotationKey {
+ continue annotations
+ }
+ }
+
+ // Add chart annotation
+ ociAnnotations[chartAnnotationKey] = chartAnnotationValue
+ }
+
+ return ociAnnotations
+}
+
+// generateChartOCIAnnotations will generate OCI annotations from the provided chart
+func generateChartOCIAnnotations(meta *chart.Metadata, creationTime string) map[string]string {
+ chartOCIAnnotations := map[string]string{}
+
+ chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationDescription, meta.Description)
+ chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationTitle, meta.Name)
+ chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationVersion, meta.Version)
+ chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationURL, meta.Home)
+
+ if len(creationTime) == 0 {
+ creationTime = time.Now().UTC().Format(time.RFC3339)
+ }
+
+ chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationCreated, creationTime)
+
+ if len(meta.Sources) > 0 {
+ chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationSource, meta.Sources[0])
+ }
+
+ if len(meta.Maintainers) > 0 {
+ var maintainerSb strings.Builder
+
+ for maintainerIdx, maintainer := range meta.Maintainers {
+
+ if len(maintainer.Name) > 0 {
+ maintainerSb.WriteString(maintainer.Name)
+ }
+
+ if len(maintainer.Email) > 0 {
+ maintainerSb.WriteString(" (")
+ maintainerSb.WriteString(maintainer.Email)
+ maintainerSb.WriteString(")")
+ }
+
+ if maintainerIdx < len(meta.Maintainers)-1 {
+ maintainerSb.WriteString(", ")
+ }
+
+ }
+
+ chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationAuthors, maintainerSb.String())
+
+ }
+
+ return chartOCIAnnotations
+}
+
+// addToMap takes an existing map and adds an item if the value is not empty
+func addToMap(inputMap map[string]string, newKey string, newValue string) map[string]string {
+
+ // Add item to map if its
+ if len(strings.TrimSpace(newValue)) > 0 {
+ inputMap[newKey] = newValue
+ }
+
+ return inputMap
+}
diff --git a/helm/pkg/registry/chart_test.go b/helm/pkg/registry/chart_test.go
new file mode 100644
index 000000000..77ccdaab7
--- /dev/null
+++ b/helm/pkg/registry/chart_test.go
@@ -0,0 +1,274 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry // import "helm.sh/helm/v4/pkg/registry"
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+)
+
+func TestGenerateOCIChartAnnotations(t *testing.T) {
+
+ nowString := time.Now().Format(time.RFC3339)
+
+ tests := []struct {
+ name string
+ chart *chart.Metadata
+ expect map[string]string
+ }{
+ {
+ "Baseline chart",
+ &chart.Metadata{
+ Name: "oci",
+ Version: "0.0.1",
+ },
+ map[string]string{
+ "org.opencontainers.image.title": "oci",
+ "org.opencontainers.image.version": "0.0.1",
+ "org.opencontainers.image.created": nowString,
+ },
+ },
+ {
+ "Simple chart values",
+ &chart.Metadata{
+ Name: "oci",
+ Version: "0.0.1",
+ Description: "OCI Helm Chart",
+ Home: "https://helm.sh",
+ },
+ map[string]string{
+ "org.opencontainers.image.title": "oci",
+ "org.opencontainers.image.version": "0.0.1",
+ "org.opencontainers.image.created": nowString,
+ "org.opencontainers.image.description": "OCI Helm Chart",
+ "org.opencontainers.image.url": "https://helm.sh",
+ },
+ },
+ {
+ "Maintainer without email",
+ &chart.Metadata{
+ Name: "oci",
+ Version: "0.0.1",
+ Description: "OCI Helm Chart",
+ Home: "https://helm.sh",
+ Maintainers: []*chart.Maintainer{
+ {
+ Name: "John Snow",
+ },
+ },
+ },
+ map[string]string{
+ "org.opencontainers.image.title": "oci",
+ "org.opencontainers.image.version": "0.0.1",
+ "org.opencontainers.image.created": nowString,
+ "org.opencontainers.image.description": "OCI Helm Chart",
+ "org.opencontainers.image.url": "https://helm.sh",
+ "org.opencontainers.image.authors": "John Snow",
+ },
+ },
+ {
+ "Maintainer with email",
+ &chart.Metadata{
+ Name: "oci",
+ Version: "0.0.1",
+ Description: "OCI Helm Chart",
+ Home: "https://helm.sh",
+ Maintainers: []*chart.Maintainer{
+ {Name: "John Snow", Email: "john@winterfell.com"},
+ },
+ },
+ map[string]string{
+ "org.opencontainers.image.title": "oci",
+ "org.opencontainers.image.version": "0.0.1",
+ "org.opencontainers.image.created": nowString,
+ "org.opencontainers.image.description": "OCI Helm Chart",
+ "org.opencontainers.image.url": "https://helm.sh",
+ "org.opencontainers.image.authors": "John Snow (john@winterfell.com)",
+ },
+ },
+ {
+ "Multiple Maintainers",
+ &chart.Metadata{
+ Name: "oci",
+ Version: "0.0.1",
+ Description: "OCI Helm Chart",
+ Home: "https://helm.sh",
+ Maintainers: []*chart.Maintainer{
+ {Name: "John Snow", Email: "john@winterfell.com"},
+ {Name: "Jane Snow"},
+ },
+ },
+ map[string]string{
+ "org.opencontainers.image.title": "oci",
+ "org.opencontainers.image.version": "0.0.1",
+ "org.opencontainers.image.created": nowString,
+ "org.opencontainers.image.description": "OCI Helm Chart",
+ "org.opencontainers.image.url": "https://helm.sh",
+ "org.opencontainers.image.authors": "John Snow (john@winterfell.com), Jane Snow",
+ },
+ },
+ {
+ "Chart with Sources",
+ &chart.Metadata{
+ Name: "oci",
+ Version: "0.0.1",
+ Description: "OCI Helm Chart",
+ Sources: []string{
+ "https://github.com/helm/helm",
+ },
+ },
+ map[string]string{
+ "org.opencontainers.image.title": "oci",
+ "org.opencontainers.image.version": "0.0.1",
+ "org.opencontainers.image.created": nowString,
+ "org.opencontainers.image.description": "OCI Helm Chart",
+ "org.opencontainers.image.source": "https://github.com/helm/helm",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+
+ result := generateChartOCIAnnotations(tt.chart, nowString)
+
+ if !reflect.DeepEqual(tt.expect, result) {
+ t.Errorf("%s: expected map %v, got %v", tt.name, tt.expect, result)
+ }
+
+ }
+}
+
+func TestGenerateOCIAnnotations(t *testing.T) {
+
+ nowString := time.Now().Format(time.RFC3339)
+
+ tests := []struct {
+ name string
+ chart *chart.Metadata
+ expect map[string]string
+ }{
+ {
+ "Baseline chart",
+ &chart.Metadata{
+ Name: "oci",
+ Version: "0.0.1",
+ },
+ map[string]string{
+ "org.opencontainers.image.title": "oci",
+ "org.opencontainers.image.version": "0.0.1",
+ "org.opencontainers.image.created": nowString,
+ },
+ },
+ {
+ "Simple chart values with custom Annotations",
+ &chart.Metadata{
+ Name: "oci",
+ Version: "0.0.1",
+ Description: "OCI Helm Chart",
+ Annotations: map[string]string{
+ "extrakey": "extravlue",
+ "anotherkey": "anothervalue",
+ },
+ },
+ map[string]string{
+ "org.opencontainers.image.title": "oci",
+ "org.opencontainers.image.version": "0.0.1",
+ "org.opencontainers.image.description": "OCI Helm Chart",
+ "org.opencontainers.image.created": nowString,
+ "extrakey": "extravlue",
+ "anotherkey": "anothervalue",
+ },
+ },
+ {
+ "Verify Chart Name and Version cannot be overridden from annotations",
+ &chart.Metadata{
+ Name: "oci",
+ Version: "0.0.1",
+ Description: "OCI Helm Chart",
+ Annotations: map[string]string{
+ "org.opencontainers.image.title": "badchartname",
+ "org.opencontainers.image.version": "1.0.0",
+ "extrakey": "extravlue",
+ },
+ },
+ map[string]string{
+ "org.opencontainers.image.title": "oci",
+ "org.opencontainers.image.version": "0.0.1",
+ "org.opencontainers.image.description": "OCI Helm Chart",
+ "org.opencontainers.image.created": nowString,
+ "extrakey": "extravlue",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+
+ result := generateOCIAnnotations(tt.chart, nowString)
+
+ if !reflect.DeepEqual(tt.expect, result) {
+ t.Errorf("%s: expected map %v, got %v", tt.name, tt.expect, result)
+ }
+
+ }
+}
+
+func TestGenerateOCICreatedAnnotations(t *testing.T) {
+
+ nowTime := time.Now()
+ nowTimeString := nowTime.Format(time.RFC3339)
+
+ testChart := &chart.Metadata{
+ Name: "oci",
+ Version: "0.0.1",
+ }
+
+ result := generateOCIAnnotations(testChart, nowTimeString)
+
+ // Check that created annotation exists
+ if _, ok := result[ocispec.AnnotationCreated]; !ok {
+ t.Errorf("%s annotation not created", ocispec.AnnotationCreated)
+ }
+
+ // Verify value of created artifact in RFC3339 format
+ if _, err := time.Parse(time.RFC3339, result[ocispec.AnnotationCreated]); err != nil {
+ t.Errorf("%s annotation with value '%s' not in RFC3339 format", ocispec.AnnotationCreated, result[ocispec.AnnotationCreated])
+ }
+
+ // Verify default creation time set
+ result = generateOCIAnnotations(testChart, "")
+
+ // Check that created annotation exists
+ if _, ok := result[ocispec.AnnotationCreated]; !ok {
+ t.Errorf("%s annotation not created", ocispec.AnnotationCreated)
+ }
+
+ if createdTimeAnnotation, err := time.Parse(time.RFC3339, result[ocispec.AnnotationCreated]); err != nil {
+ t.Errorf("%s annotation with value '%s' not in RFC3339 format", ocispec.AnnotationCreated, result[ocispec.AnnotationCreated])
+
+ // Verify creation annotation after time test began
+ if !nowTime.Before(createdTimeAnnotation) {
+ t.Errorf("%s annotation with value '%s' not configured properly. Annotation value is not after %s", ocispec.AnnotationCreated, result[ocispec.AnnotationCreated], nowTimeString)
+ }
+
+ }
+
+}
diff --git a/helm/pkg/registry/client.go b/helm/pkg/registry/client.go
new file mode 100644
index 000000000..750bb9715
--- /dev/null
+++ b/helm/pkg/registry/client.go
@@ -0,0 +1,927 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry // import "helm.sh/helm/v4/pkg/registry"
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/Masterminds/semver/v3"
+ "github.com/opencontainers/image-spec/specs-go"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "oras.land/oras-go/v2"
+ "oras.land/oras-go/v2/content/memory"
+ "oras.land/oras-go/v2/registry"
+ "oras.land/oras-go/v2/registry/remote"
+ "oras.land/oras-go/v2/registry/remote/auth"
+ "oras.land/oras-go/v2/registry/remote/credentials"
+ "oras.land/oras-go/v2/registry/remote/retry"
+
+ "helm.sh/helm/v4/internal/version"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+// See https://github.com/helm/helm/issues/10166
+const registryUnderscoreMessage = `
+OCI artifact references (e.g. tags) do not support the plus sign (+). To support
+storing semantic versions, Helm adopts the convention of changing plus (+) to
+an underscore (_) in chart version tags when pushing to a registry and back to
+a plus (+) when pulling from a registry.`
+
+type (
+ // RemoteClient shadows the ORAS remote.Client interface
+ // (hiding the ORAS type from Helm client visibility)
+ // https://pkg.go.dev/oras.land/oras-go/pkg/registry/remote#Client
+ RemoteClient interface {
+ Do(req *http.Request) (*http.Response, error)
+ }
+
+ // Client works with OCI-compliant registries
+ Client struct {
+ debug bool
+ enableCache bool
+ // path to repository config file e.g. ~/.docker/config.json
+ credentialsFile string
+ username string
+ password string
+ out io.Writer
+ authorizer *auth.Client
+ registryAuthorizer RemoteClient
+ credentialsStore credentials.Store
+ httpClient *http.Client
+ plainHTTP bool
+ }
+
+ // ClientOption allows specifying various settings configurable by the user for overriding the defaults
+ // used when creating a new default client
+ // TODO(TerryHowe): ClientOption should return error in v5
+ ClientOption func(*Client)
+)
+
+// NewClient returns a new registry client with config
+func NewClient(options ...ClientOption) (*Client, error) {
+ client := &Client{
+ out: io.Discard,
+ }
+ for _, option := range options {
+ option(client)
+ }
+ if client.credentialsFile == "" {
+ client.credentialsFile = helmpath.ConfigPath(CredentialsFileBasename)
+ }
+ if client.httpClient == nil {
+ client.httpClient = &http.Client{
+ Transport: NewTransport(client.debug),
+ }
+ }
+
+ storeOptions := credentials.StoreOptions{
+ AllowPlaintextPut: true,
+ DetectDefaultNativeStore: true,
+ }
+ store, err := credentials.NewStore(client.credentialsFile, storeOptions)
+ if err != nil {
+ return nil, err
+ }
+ dockerStore, err := credentials.NewStoreFromDocker(storeOptions)
+ if err != nil {
+ // should only fail if user home directory can't be determined
+ client.credentialsStore = store
+ } else {
+ // use Helm credentials with fallback to Docker
+ client.credentialsStore = credentials.NewStoreWithFallbacks(store, dockerStore)
+ }
+
+ if client.authorizer == nil {
+ authorizer := auth.Client{
+ Client: client.httpClient,
+ }
+ authorizer.SetUserAgent(version.GetUserAgent())
+
+ if client.username != "" && client.password != "" {
+ authorizer.Credential = func(_ context.Context, _ string) (auth.Credential, error) {
+ return auth.Credential{Username: client.username, Password: client.password}, nil
+ }
+ } else {
+ authorizer.Credential = credentials.Credential(client.credentialsStore)
+ }
+
+ if client.enableCache {
+ authorizer.Cache = auth.NewCache()
+ }
+ client.authorizer = &authorizer
+ }
+
+ return client, nil
+}
+
+// Generic returns a GenericClient for low-level OCI operations
+func (c *Client) Generic() *GenericClient {
+ return NewGenericClient(c)
+}
+
+// ClientOptDebug returns a function that sets the debug setting on client options set
+func ClientOptDebug(debug bool) ClientOption {
+ return func(client *Client) {
+ client.debug = debug
+ }
+}
+
+// ClientOptEnableCache returns a function that sets the enableCache setting on a client options set
+func ClientOptEnableCache(enableCache bool) ClientOption {
+ return func(client *Client) {
+ client.enableCache = enableCache
+ }
+}
+
+// ClientOptBasicAuth returns a function that sets the username and password setting on client options set
+func ClientOptBasicAuth(username, password string) ClientOption {
+ return func(client *Client) {
+ client.username = username
+ client.password = password
+ }
+}
+
+// ClientOptWriter returns a function that sets the writer setting on client options set
+func ClientOptWriter(out io.Writer) ClientOption {
+ return func(client *Client) {
+ client.out = out
+ }
+}
+
+// ClientOptAuthorizer returns a function that sets the authorizer setting on a client options set. This
+// can be used to override the default authorization mechanism.
+//
+// Depending on the use-case you may need to set both ClientOptAuthorizer and ClientOptRegistryAuthorizer.
+func ClientOptAuthorizer(authorizer auth.Client) ClientOption {
+ return func(client *Client) {
+ client.authorizer = &authorizer
+ }
+}
+
+// ClientOptRegistryAuthorizer returns a function that sets the registry authorizer setting on a client options set. This
+// can be used to override the default authorization mechanism.
+//
+// Depending on the use-case you may need to set both ClientOptAuthorizer and ClientOptRegistryAuthorizer.
+func ClientOptRegistryAuthorizer(registryAuthorizer RemoteClient) ClientOption {
+ return func(client *Client) {
+ client.registryAuthorizer = registryAuthorizer
+ }
+}
+
+// ClientOptCredentialsFile returns a function that sets the credentialsFile setting on a client options set
+func ClientOptCredentialsFile(credentialsFile string) ClientOption {
+ return func(client *Client) {
+ client.credentialsFile = credentialsFile
+ }
+}
+
+// ClientOptHTTPClient returns a function that sets the httpClient setting on a client options set
+func ClientOptHTTPClient(httpClient *http.Client) ClientOption {
+ return func(client *Client) {
+ client.httpClient = httpClient
+ }
+}
+
+func ClientOptPlainHTTP() ClientOption {
+ return func(c *Client) {
+ c.plainHTTP = true
+ }
+}
+
+type (
+ // LoginOption allows specifying various settings on login
+ LoginOption func(*loginOperation)
+
+ loginOperation struct {
+ host string
+ client *Client
+ }
+)
+
+// warnIfHostHasPath checks if the host contains a repository path and logs a warning if it does.
+// Returns true if the host contains a path component (i.e., contains a '/').
+func warnIfHostHasPath(host string) bool {
+ if strings.Contains(host, "/") {
+ registryHost := strings.Split(host, "/")[0]
+ slog.Warn("registry login currently only supports registry hostname, not a repository path", "host", host, "suggested", registryHost)
+ return true
+ }
+ return false
+}
+
+// Login logs into a registry
+func (c *Client) Login(host string, options ...LoginOption) error {
+ for _, option := range options {
+ option(&loginOperation{host, c})
+ }
+
+ warnIfHostHasPath(host)
+
+ reg, err := remote.NewRegistry(host)
+ if err != nil {
+ return err
+ }
+ reg.PlainHTTP = c.plainHTTP
+ cred := auth.Credential{Username: c.username, Password: c.password}
+ c.authorizer.ForceAttemptOAuth2 = true
+ reg.Client = c.authorizer
+
+ ctx := context.Background()
+ if err := reg.Ping(ctx); err != nil {
+ c.authorizer.ForceAttemptOAuth2 = false
+ if err := reg.Ping(ctx); err != nil {
+ return fmt.Errorf("authenticating to %q: %w", host, err)
+ }
+ }
+ // Always restore to false after probing, to avoid forcing POST to token endpoints like GHCR.
+ c.authorizer.ForceAttemptOAuth2 = false
+
+ key := credentials.ServerAddressFromRegistry(host)
+ key = credentials.ServerAddressFromHostname(key)
+ if err := c.credentialsStore.Put(ctx, key, cred); err != nil {
+ return err
+ }
+
+ _, _ = fmt.Fprintln(c.out, "Login Succeeded")
+ return nil
+}
+
+// LoginOptBasicAuth returns a function that sets the username/password settings on login
+func LoginOptBasicAuth(username string, password string) LoginOption {
+ return func(o *loginOperation) {
+ o.client.username = username
+ o.client.password = password
+ o.client.authorizer.Credential = auth.StaticCredential(o.host, auth.Credential{Username: username, Password: password})
+ }
+}
+
+// LoginOptPlainText returns a function that allows plaintext (HTTP) login
+func LoginOptPlainText(isPlainText bool) LoginOption {
+ return func(o *loginOperation) {
+ o.client.plainHTTP = isPlainText
+ }
+}
+
+func ensureTLSConfig(client *auth.Client, setConfig *tls.Config) (*tls.Config, error) {
+ var transport *http.Transport
+
+ switch t := client.Client.Transport.(type) {
+ case *http.Transport:
+ transport = t
+ case *retry.Transport:
+ switch t := t.Base.(type) {
+ case *http.Transport:
+ transport = t
+ case *LoggingTransport:
+ switch t := t.RoundTripper.(type) {
+ case *http.Transport:
+ transport = t
+ }
+ }
+ }
+
+ if transport == nil {
+ // we don't know how to access the http.Transport, most likely the
+ // auth.Client.Client was provided by API user
+ return nil, fmt.Errorf("unable to access TLS client configuration, the provided HTTP Transport is not supported, given: %T", client.Client.Transport)
+ }
+
+ switch {
+ case setConfig != nil:
+ transport.TLSClientConfig = setConfig
+ case transport.TLSClientConfig == nil:
+ transport.TLSClientConfig = &tls.Config{}
+ }
+
+ return transport.TLSClientConfig, nil
+}
+
+// LoginOptInsecure returns a function that sets the insecure setting on login
+func LoginOptInsecure(insecure bool) LoginOption {
+ return func(o *loginOperation) {
+ tlsConfig, err := ensureTLSConfig(o.client.authorizer, nil)
+
+ if err != nil {
+ panic(err)
+ }
+
+ tlsConfig.InsecureSkipVerify = insecure
+ }
+}
+
+// LoginOptTLSClientConfig returns a function that sets the TLS settings on login.
+func LoginOptTLSClientConfig(certFile, keyFile, caFile string) LoginOption {
+ return func(o *loginOperation) {
+ if (certFile == "" || keyFile == "") && caFile == "" {
+ return
+ }
+ tlsConfig, err := ensureTLSConfig(o.client.authorizer, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ if certFile != "" && keyFile != "" {
+ authCert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ panic(err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{authCert}
+ }
+
+ if caFile != "" {
+ certPool := x509.NewCertPool()
+ ca, err := os.ReadFile(caFile)
+ if err != nil {
+ panic(err)
+ }
+ if !certPool.AppendCertsFromPEM(ca) {
+ panic(fmt.Errorf("unable to parse CA file: %q", caFile))
+ }
+ tlsConfig.RootCAs = certPool
+ }
+ }
+}
+
+// LoginOptTLSClientConfigFromConfig returns a function that sets the TLS settings on login
+// receiving the configuration in memory rather than from files.
+func LoginOptTLSClientConfigFromConfig(conf *tls.Config) LoginOption {
+ return func(o *loginOperation) {
+ _, err := ensureTLSConfig(o.client.authorizer, conf)
+ if err != nil {
+ panic(err)
+ }
+ }
+}
+
+type (
+ // LogoutOption allows specifying various settings on logout
+ LogoutOption func(*logoutOperation)
+
+ logoutOperation struct{}
+)
+
+// Logout logs out of a registry
+func (c *Client) Logout(host string, opts ...LogoutOption) error {
+ operation := &logoutOperation{}
+ for _, opt := range opts {
+ opt(operation)
+ }
+
+ if err := credentials.Logout(context.Background(), c.credentialsStore, host); err != nil {
+ return err
+ }
+ _, _ = fmt.Fprintf(c.out, "Removing login credentials for %s\n", host)
+ return nil
+}
+
+type (
+ // PullOption allows specifying various settings on pull
+ PullOption func(*pullOperation)
+
+ // PullResult is the result returned upon successful pull.
+ PullResult struct {
+ Manifest *DescriptorPullSummary `json:"manifest"`
+ Config *DescriptorPullSummary `json:"config"`
+ Chart *DescriptorPullSummaryWithMeta `json:"chart"`
+ Prov *DescriptorPullSummary `json:"prov"`
+ Ref string `json:"ref"`
+ }
+
+ DescriptorPullSummary struct {
+ Data []byte `json:"-"`
+ Digest string `json:"digest"`
+ Size int64 `json:"size"`
+ }
+
+ DescriptorPullSummaryWithMeta struct {
+ DescriptorPullSummary
+ Meta *chart.Metadata `json:"meta"`
+ }
+
+ pullOperation struct {
+ withChart bool
+ withProv bool
+ ignoreMissingProv bool
+ }
+)
+
+// processChartPull handles chart-specific processing of a generic pull result
+func (c *Client) processChartPull(genericResult *GenericPullResult, operation *pullOperation) (*PullResult, error) {
+ var err error
+
+ // Chart-specific validation
+ minNumDescriptors := 1 // 1 for the config
+ if operation.withChart {
+ minNumDescriptors++
+ }
+ if operation.withProv && !operation.ignoreMissingProv {
+ minNumDescriptors++
+ }
+
+ numDescriptors := len(genericResult.Descriptors)
+ if numDescriptors < minNumDescriptors {
+ return nil, fmt.Errorf("manifest does not contain minimum number of descriptors (%d), descriptors found: %d",
+ minNumDescriptors, numDescriptors)
+ }
+
+ // Find chart-specific descriptors
+ var configDescriptor *ocispec.Descriptor
+ var chartDescriptor *ocispec.Descriptor
+ var provDescriptor *ocispec.Descriptor
+
+ for _, descriptor := range genericResult.Descriptors {
+ d := descriptor
+ switch d.MediaType {
+ case ConfigMediaType:
+ configDescriptor = &d
+ case ChartLayerMediaType:
+ chartDescriptor = &d
+ case ProvLayerMediaType:
+ provDescriptor = &d
+ case LegacyChartLayerMediaType:
+ chartDescriptor = &d
+ _, _ = fmt.Fprintf(c.out, "Warning: chart media type %s is deprecated\n", LegacyChartLayerMediaType)
+ }
+ }
+
+ // Chart-specific validation
+ if configDescriptor == nil {
+ return nil, fmt.Errorf("could not load config with mediatype %s", ConfigMediaType)
+ }
+ if operation.withChart && chartDescriptor == nil {
+ return nil, fmt.Errorf("manifest does not contain a layer with mediatype %s",
+ ChartLayerMediaType)
+ }
+
+ var provMissing bool
+ if operation.withProv && provDescriptor == nil {
+ if operation.ignoreMissingProv {
+ provMissing = true
+ } else {
+ return nil, fmt.Errorf("manifest does not contain a layer with mediatype %s",
+ ProvLayerMediaType)
+ }
+ }
+
+ // Build chart-specific result
+ result := &PullResult{
+ Manifest: &DescriptorPullSummary{
+ Digest: genericResult.Manifest.Digest.String(),
+ Size: genericResult.Manifest.Size,
+ },
+ Config: &DescriptorPullSummary{
+ Digest: configDescriptor.Digest.String(),
+ Size: configDescriptor.Size,
+ },
+ Chart: &DescriptorPullSummaryWithMeta{},
+ Prov: &DescriptorPullSummary{},
+ Ref: genericResult.Ref,
+ }
+
+ // Fetch data using generic client
+ genericClient := c.Generic()
+
+ result.Manifest.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, genericResult.Manifest)
+ if err != nil {
+ return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", genericResult.Manifest.Digest, err)
+ }
+
+ result.Config.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *configDescriptor)
+ if err != nil {
+ return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", configDescriptor.Digest, err)
+ }
+
+ if err := json.Unmarshal(result.Config.Data, &result.Chart.Meta); err != nil {
+ return nil, err
+ }
+
+ if operation.withChart {
+ result.Chart.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *chartDescriptor)
+ if err != nil {
+ return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", chartDescriptor.Digest, err)
+ }
+ result.Chart.Digest = chartDescriptor.Digest.String()
+ result.Chart.Size = chartDescriptor.Size
+ }
+
+ if operation.withProv && !provMissing {
+ result.Prov.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *provDescriptor)
+ if err != nil {
+ return nil, fmt.Errorf("unable to retrieve blob with digest %s: %w", provDescriptor.Digest, err)
+ }
+ result.Prov.Digest = provDescriptor.Digest.String()
+ result.Prov.Size = provDescriptor.Size
+ }
+
+ _, _ = fmt.Fprintf(c.out, "Pulled: %s\n", result.Ref)
+ _, _ = fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest)
+
+ if strings.Contains(result.Ref, "_") {
+ _, _ = fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref)
+ _, _ = fmt.Fprint(c.out, registryUnderscoreMessage+"\n")
+ }
+
+ return result, nil
+}
+
+// Pull downloads a chart from a registry
+func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) {
+ operation := &pullOperation{
+ withChart: true, // By default, always download the chart layer
+ }
+ for _, option := range options {
+ option(operation)
+ }
+ if !operation.withChart && !operation.withProv {
+ return nil, errors.New(
+ "must specify at least one layer to pull (chart/prov)")
+ }
+
+ // Build allowed media types for chart pull
+ allowedMediaTypes := []string{
+ ocispec.MediaTypeImageManifest,
+ ConfigMediaType,
+ }
+ if operation.withChart {
+ allowedMediaTypes = append(allowedMediaTypes, ChartLayerMediaType, LegacyChartLayerMediaType)
+ }
+ if operation.withProv {
+ allowedMediaTypes = append(allowedMediaTypes, ProvLayerMediaType)
+ }
+
+ // Use generic client for the pull operation
+ genericClient := c.Generic()
+ genericResult, err := genericClient.PullGeneric(ref, GenericPullOptions{
+ AllowedMediaTypes: allowedMediaTypes,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Process the result with chart-specific logic
+ return c.processChartPull(genericResult, operation)
+}
+
+// PullOptWithChart returns a function that sets the withChart setting on pull
+func PullOptWithChart(withChart bool) PullOption {
+ return func(operation *pullOperation) {
+ operation.withChart = withChart
+ }
+}
+
+// PullOptWithProv returns a function that sets the withProv setting on pull
+func PullOptWithProv(withProv bool) PullOption {
+ return func(operation *pullOperation) {
+ operation.withProv = withProv
+ }
+}
+
+// PullOptIgnoreMissingProv returns a function that sets the ignoreMissingProv setting on pull
+func PullOptIgnoreMissingProv(ignoreMissingProv bool) PullOption {
+ return func(operation *pullOperation) {
+ operation.ignoreMissingProv = ignoreMissingProv
+ }
+}
+
+type (
+ // PushOption allows specifying various settings on push
+ PushOption func(*pushOperation)
+
+ // PushResult is the result returned upon successful push.
+ PushResult struct {
+ Manifest *descriptorPushSummary `json:"manifest"`
+ Config *descriptorPushSummary `json:"config"`
+ Chart *descriptorPushSummaryWithMeta `json:"chart"`
+ Prov *descriptorPushSummary `json:"prov"`
+ Ref string `json:"ref"`
+ }
+
+ descriptorPushSummary struct {
+ Digest string `json:"digest"`
+ Size int64 `json:"size"`
+ }
+
+ descriptorPushSummaryWithMeta struct {
+ descriptorPushSummary
+ Meta *chart.Metadata `json:"meta"`
+ }
+
+ pushOperation struct {
+ provData []byte
+ strictMode bool
+ creationTime string
+ }
+)
+
+// Push uploads a chart to a registry.
+func (c *Client) Push(data []byte, ref string, options ...PushOption) (*PushResult, error) {
+ parsedRef, err := newReference(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ operation := &pushOperation{
+ strictMode: true, // By default, enable strict mode
+ }
+ for _, option := range options {
+ option(operation)
+ }
+ meta, err := extractChartMeta(data)
+ if err != nil {
+ return nil, err
+ }
+ if operation.strictMode {
+ if !strings.HasSuffix(ref, fmt.Sprintf("/%s:%s", meta.Name, meta.Version)) {
+ return nil, errors.New(
+ "strict mode enabled, ref basename and tag must match the chart name and version")
+ }
+ }
+
+ ctx := context.Background()
+
+ memoryStore := memory.New()
+ chartDescriptor, err := oras.PushBytes(ctx, memoryStore, ChartLayerMediaType, data)
+ if err != nil {
+ return nil, err
+ }
+
+ configData, err := json.Marshal(meta)
+ if err != nil {
+ return nil, err
+ }
+
+ configDescriptor, err := oras.PushBytes(ctx, memoryStore, ConfigMediaType, configData)
+ if err != nil {
+ return nil, err
+ }
+
+ layers := []ocispec.Descriptor{chartDescriptor}
+ var provDescriptor ocispec.Descriptor
+ if operation.provData != nil {
+ provDescriptor, err = oras.PushBytes(ctx, memoryStore, ProvLayerMediaType, operation.provData)
+ if err != nil {
+ return nil, err
+ }
+
+ layers = append(layers, provDescriptor)
+ }
+
+ // sort layers for determinism, similar to how ORAS v1 does it
+ sort.Slice(layers, func(i, j int) bool {
+ return layers[i].Digest < layers[j].Digest
+ })
+
+ ociAnnotations := generateOCIAnnotations(meta, operation.creationTime)
+
+ manifestDescriptor, err := c.tagManifest(ctx, memoryStore, configDescriptor,
+ layers, ociAnnotations, parsedRef)
+ if err != nil {
+ return nil, err
+ }
+
+ repository, err := remote.NewRepository(parsedRef.String())
+ if err != nil {
+ return nil, err
+ }
+ repository.PlainHTTP = c.plainHTTP
+ repository.Client = c.authorizer
+
+ manifestDescriptor, err = oras.ExtendedCopy(ctx, memoryStore, parsedRef.String(), repository, parsedRef.String(), oras.DefaultExtendedCopyOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ chartSummary := &descriptorPushSummaryWithMeta{
+ Meta: meta,
+ }
+ chartSummary.Digest = chartDescriptor.Digest.String()
+ chartSummary.Size = chartDescriptor.Size
+ result := &PushResult{
+ Manifest: &descriptorPushSummary{
+ Digest: manifestDescriptor.Digest.String(),
+ Size: manifestDescriptor.Size,
+ },
+ Config: &descriptorPushSummary{
+ Digest: configDescriptor.Digest.String(),
+ Size: configDescriptor.Size,
+ },
+ Chart: chartSummary,
+ Prov: &descriptorPushSummary{}, // prevent nil references
+ Ref: parsedRef.String(),
+ }
+ if operation.provData != nil {
+ result.Prov = &descriptorPushSummary{
+ Digest: provDescriptor.Digest.String(),
+ Size: provDescriptor.Size,
+ }
+ }
+ _, _ = fmt.Fprintf(c.out, "Pushed: %s\n", result.Ref)
+ _, _ = fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest)
+ if strings.Contains(parsedRef.orasReference.Reference, "_") {
+ _, _ = fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref)
+ _, _ = fmt.Fprint(c.out, registryUnderscoreMessage+"\n")
+ }
+
+ return result, err
+}
+
+// PushOptProvData returns a function that sets the prov bytes setting on push
+func PushOptProvData(provData []byte) PushOption {
+ return func(operation *pushOperation) {
+ operation.provData = provData
+ }
+}
+
+// PushOptStrictMode returns a function that sets the strictMode setting on push
+func PushOptStrictMode(strictMode bool) PushOption {
+ return func(operation *pushOperation) {
+ operation.strictMode = strictMode
+ }
+}
+
+// PushOptCreationTime returns a function that sets the creation time
+func PushOptCreationTime(creationTime string) PushOption {
+ return func(operation *pushOperation) {
+ operation.creationTime = creationTime
+ }
+}
+
+// Tags provides a sorted list all semver compliant tags for a given repository
+func (c *Client) Tags(ref string) ([]string, error) {
+ parsedReference, err := registry.ParseReference(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx := context.Background()
+ repository, err := remote.NewRepository(parsedReference.String())
+ if err != nil {
+ return nil, err
+ }
+ repository.PlainHTTP = c.plainHTTP
+ repository.Client = c.authorizer
+
+ var tagVersions []*semver.Version
+ err = repository.Tags(ctx, "", func(tags []string) error {
+ for _, tag := range tags {
+ // Change underscore (_) back to plus (+) for Helm
+ // See https://github.com/helm/helm/issues/10166
+ tagVersion, err := semver.StrictNewVersion(strings.ReplaceAll(tag, "_", "+"))
+ if err == nil {
+ tagVersions = append(tagVersions, tagVersion)
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Sort the collection
+ sort.Sort(sort.Reverse(semver.Collection(tagVersions)))
+
+ tags := make([]string, len(tagVersions))
+
+ for iTv, tv := range tagVersions {
+ tags[iTv] = tv.String()
+ }
+
+ return tags, nil
+
+}
+
+// Resolve a reference to a descriptor.
+func (c *Client) Resolve(ref string) (desc ocispec.Descriptor, err error) {
+ remoteRepository, err := remote.NewRepository(ref)
+ if err != nil {
+ return desc, err
+ }
+ remoteRepository.PlainHTTP = c.plainHTTP
+ remoteRepository.Client = c.authorizer
+
+ parsedReference, err := newReference(ref)
+ if err != nil {
+ return desc, err
+ }
+
+ ctx := context.Background()
+ parsedString := parsedReference.String()
+ return remoteRepository.Resolve(ctx, parsedString)
+}
+
+// ValidateReference for path and version
+func (c *Client) ValidateReference(ref, version string, u *url.URL) (string, *url.URL, error) {
+ var tag string
+
+ registryReference, err := newReference(u.Host + u.Path)
+ if err != nil {
+ return "", nil, err
+ }
+
+ if version == "" {
+ // Use OCI URI tag as default
+ version = registryReference.Tag
+ } else {
+ if registryReference.Tag != "" && registryReference.Tag != version {
+ return "", nil, fmt.Errorf("chart reference and version mismatch: %s is not %s", version, registryReference.Tag)
+ }
+ }
+
+ if registryReference.Digest != "" {
+ if version == "" {
+ // Install by digest only
+ return "", u, nil
+ }
+ u.Path = fmt.Sprintf("%s@%s", registryReference.Repository, registryReference.Digest)
+
+ // Validate the tag if it was specified
+ path := registryReference.Registry + "/" + registryReference.Repository + ":" + version
+ desc, err := c.Resolve(path)
+ if err != nil {
+ // The resource does not have to be tagged when digest is specified
+ return "", u, nil
+ }
+ if desc.Digest.String() != registryReference.Digest {
+ return "", nil, fmt.Errorf("chart reference digest mismatch: %s is not %s", desc.Digest.String(), registryReference.Digest)
+ }
+ return registryReference.Digest, u, nil
+ }
+
+ // Evaluate whether an explicit version has been provided. Otherwise, determine version to use
+ _, errSemVer := semver.NewVersion(version)
+ if errSemVer == nil {
+ tag = version
+ } else {
+ // Retrieve list of repository tags
+ tags, err := c.Tags(strings.TrimPrefix(ref, fmt.Sprintf("%s://", OCIScheme)))
+ if err != nil {
+ return "", nil, err
+ }
+ if len(tags) == 0 {
+ return "", nil, fmt.Errorf("unable to locate any tags in provided repository: %s", ref)
+ }
+
+ // Determine if version provided
+ // If empty, try to get the highest available tag
+ // If exact version, try to find it
+ // If semver constraint string, try to find a match
+ tag, err = GetTagMatchingVersionOrConstraint(tags, version)
+ if err != nil {
+ return "", nil, err
+ }
+ }
+
+ u.Path = fmt.Sprintf("%s:%s", registryReference.Repository, tag)
+ // desc, err := c.Resolve(u.Path)
+
+ return "", u, err
+}
+
+// tagManifest prepares and tags a manifest in memory storage
+func (c *Client) tagManifest(ctx context.Context, memoryStore *memory.Store,
+ configDescriptor ocispec.Descriptor, layers []ocispec.Descriptor,
+ ociAnnotations map[string]string, parsedRef reference) (ocispec.Descriptor, error) {
+
+ manifest := ocispec.Manifest{
+ Versioned: specs.Versioned{SchemaVersion: 2},
+ Config: configDescriptor,
+ Layers: layers,
+ Annotations: ociAnnotations,
+ }
+
+ manifestData, err := json.Marshal(manifest)
+ if err != nil {
+ return ocispec.Descriptor{}, err
+ }
+
+ return oras.TagBytes(ctx, memoryStore, ocispec.MediaTypeImageManifest,
+ manifestData, parsedRef.String())
+}
diff --git a/helm/pkg/registry/client_http_test.go b/helm/pkg/registry/client_http_test.go
new file mode 100644
index 000000000..a2c3a1833
--- /dev/null
+++ b/helm/pkg/registry/client_http_test.go
@@ -0,0 +1,78 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+ "oras.land/oras-go/v2/content"
+)
+
+type HTTPRegistryClientTestSuite struct {
+ TestRegistry
+}
+
+func (suite *HTTPRegistryClientTestSuite) SetupSuite() {
+ // init test client
+ setup(&suite.TestRegistry, false, false)
+}
+
+func (suite *HTTPRegistryClientTestSuite) TearDownSuite() {
+ teardown(&suite.TestRegistry)
+ _ = os.RemoveAll(suite.WorkspaceDir)
+}
+
+func (suite *HTTPRegistryClientTestSuite) Test_0_Login() {
+ err := suite.RegistryClient.Login(suite.DockerRegistryHost,
+ LoginOptBasicAuth("badverybad", "ohsobad"),
+ LoginOptPlainText(true))
+ suite.NotNil(err, "error logging into registry with bad credentials")
+
+ err = suite.RegistryClient.Login(suite.DockerRegistryHost,
+ LoginOptBasicAuth(testUsername, testPassword),
+ LoginOptPlainText(true))
+ suite.Nil(err, "no error logging into registry with good credentials")
+}
+
+func (suite *HTTPRegistryClientTestSuite) Test_1_Push() {
+ testPush(&suite.TestRegistry)
+}
+
+func (suite *HTTPRegistryClientTestSuite) Test_2_Pull() {
+ testPull(&suite.TestRegistry)
+}
+
+func (suite *HTTPRegistryClientTestSuite) Test_3_Tags() {
+ testTags(&suite.TestRegistry)
+}
+
+func (suite *HTTPRegistryClientTestSuite) Test_4_ManInTheMiddle() {
+ ref := fmt.Sprintf("%s/testrepo/supposedlysafechart:9.9.9", suite.CompromisedRegistryHost)
+
+ // returns content that does not match the expected digest
+ _, err := suite.RegistryClient.Pull(ref)
+ suite.NotNil(err)
+ suite.True(errors.Is(err, content.ErrMismatchedDigest))
+}
+
+func TestHTTPRegistryClientTestSuite(t *testing.T) {
+ suite.Run(t, new(HTTPRegistryClientTestSuite))
+}
diff --git a/helm/pkg/registry/client_insecure_tls_test.go b/helm/pkg/registry/client_insecure_tls_test.go
new file mode 100644
index 000000000..2774f5e6f
--- /dev/null
+++ b/helm/pkg/registry/client_insecure_tls_test.go
@@ -0,0 +1,77 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+)
+
+type InsecureTLSRegistryClientTestSuite struct {
+ TestRegistry
+}
+
+func (suite *InsecureTLSRegistryClientTestSuite) SetupSuite() {
+ // init test client
+ setup(&suite.TestRegistry, true, true)
+}
+
+func (suite *InsecureTLSRegistryClientTestSuite) TearDownSuite() {
+ teardown(&suite.TestRegistry)
+ _ = os.RemoveAll(suite.WorkspaceDir)
+}
+
+func (suite *InsecureTLSRegistryClientTestSuite) Test_0_Login() {
+ err := suite.RegistryClient.Login(suite.DockerRegistryHost,
+ LoginOptBasicAuth("badverybad", "ohsobad"),
+ LoginOptInsecure(true))
+ suite.NotNil(err, "error logging into registry with bad credentials")
+
+ err = suite.RegistryClient.Login(suite.DockerRegistryHost,
+ LoginOptBasicAuth(testUsername, testPassword),
+ LoginOptInsecure(true))
+ suite.Nil(err, "no error logging into registry with good credentials")
+}
+
+func (suite *InsecureTLSRegistryClientTestSuite) Test_1_Push() {
+ testPush(&suite.TestRegistry)
+}
+
+func (suite *InsecureTLSRegistryClientTestSuite) Test_2_Pull() {
+ testPull(&suite.TestRegistry)
+}
+
+func (suite *InsecureTLSRegistryClientTestSuite) Test_3_Tags() {
+ testTags(&suite.TestRegistry)
+}
+
+func (suite *InsecureTLSRegistryClientTestSuite) Test_4_Logout() {
+ err := suite.RegistryClient.Logout("this-host-aint-real:5000")
+ if err != nil {
+ // credential backend for mac generates an error
+ suite.NotNil(err, "failed to delete the credential for this-host-aint-real:5000")
+ }
+
+ err = suite.RegistryClient.Logout(suite.DockerRegistryHost)
+ suite.Nil(err, "no error logging out of registry")
+}
+
+func TestInsecureTLSRegistryClientTestSuite(t *testing.T) {
+ suite.Run(t, new(InsecureTLSRegistryClientTestSuite))
+}
diff --git a/helm/pkg/registry/client_test.go b/helm/pkg/registry/client_test.go
new file mode 100644
index 000000000..98a8b2ea3
--- /dev/null
+++ b/helm/pkg/registry/client_test.go
@@ -0,0 +1,168 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/require"
+ "oras.land/oras-go/v2/content/memory"
+)
+
+// Inspired by oras test
+// https://github.com/oras-project/oras-go/blob/05a2b09cbf2eab1df691411884dc4df741ec56ab/content_test.go#L1802
+func TestTagManifestTransformsReferences(t *testing.T) {
+ memStore := memory.New()
+ client := &Client{out: io.Discard}
+ ctx := t.Context()
+
+ refWithPlus := "test-registry.io/charts/test:1.0.0+metadata"
+ expectedRef := "test-registry.io/charts/test:1.0.0_metadata" // + becomes _
+
+ configDesc := ocispec.Descriptor{MediaType: ConfigMediaType, Digest: "sha256:config", Size: 100}
+ layers := []ocispec.Descriptor{{MediaType: ChartLayerMediaType, Digest: "sha256:layer", Size: 200}}
+
+ parsedRef, err := newReference(refWithPlus)
+ require.NoError(t, err)
+
+ desc, err := client.tagManifest(ctx, memStore, configDesc, layers, nil, parsedRef)
+ require.NoError(t, err)
+
+ transformedDesc, err := memStore.Resolve(ctx, expectedRef)
+ require.NoError(t, err, "Should find the reference with _ instead of +")
+ require.Equal(t, desc.Digest, transformedDesc.Digest)
+
+ _, err = memStore.Resolve(ctx, refWithPlus)
+ require.Error(t, err, "Should NOT find the reference with the original +")
+}
+
+// Verifies that Login always restores ForceAttemptOAuth2 to false on success.
+func TestLogin_ResetsForceAttemptOAuth2_OnSuccess(t *testing.T) {
+ t.Parallel()
+
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/v2/" {
+ // Accept either HEAD or GET
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+ http.NotFound(w, r)
+ }))
+ defer srv.Close()
+
+ host := strings.TrimPrefix(srv.URL, "http://")
+
+ credFile := filepath.Join(t.TempDir(), "config.json")
+ c, err := NewClient(
+ ClientOptWriter(io.Discard),
+ ClientOptCredentialsFile(credFile),
+ )
+ if err != nil {
+ t.Fatalf("NewClient error: %v", err)
+ }
+
+ if c.authorizer == nil || c.authorizer.ForceAttemptOAuth2 {
+ t.Fatalf("expected ForceAttemptOAuth2 default to be false")
+ }
+
+ // Call Login with plain HTTP against our test server
+ if err := c.Login(host, LoginOptPlainText(true), LoginOptBasicAuth("u", "p")); err != nil {
+ t.Fatalf("Login error: %v", err)
+ }
+
+ if c.authorizer.ForceAttemptOAuth2 {
+ t.Errorf("ForceAttemptOAuth2 should be false after successful Login")
+ }
+}
+
+// Verifies that Login restores ForceAttemptOAuth2 to false even when ping fails.
+func TestLogin_ResetsForceAttemptOAuth2_OnFailure(t *testing.T) {
+ t.Parallel()
+
+ // Start and immediately close, so connections will fail
+ srv := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {}))
+ host := strings.TrimPrefix(srv.URL, "http://")
+ srv.Close()
+
+ credFile := filepath.Join(t.TempDir(), "config.json")
+ c, err := NewClient(
+ ClientOptWriter(io.Discard),
+ ClientOptCredentialsFile(credFile),
+ )
+ if err != nil {
+ t.Fatalf("NewClient error: %v", err)
+ }
+
+ // Invoke Login, expect an error but ForceAttemptOAuth2 must end false
+ _ = c.Login(host, LoginOptPlainText(true), LoginOptBasicAuth("u", "p"))
+
+ if c.authorizer.ForceAttemptOAuth2 {
+ t.Errorf("ForceAttemptOAuth2 should be false after failed Login")
+ }
+}
+
+// TestWarnIfHostHasPath verifies that warnIfHostHasPath correctly detects path components.
+func TestWarnIfHostHasPath(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ host string
+ wantWarn bool
+ }{
+ {
+ name: "domain only",
+ host: "ghcr.io",
+ wantWarn: false,
+ },
+ {
+ name: "domain with port",
+ host: "localhost:8000",
+ wantWarn: false,
+ },
+ {
+ name: "domain with repository path",
+ host: "ghcr.io/terryhowe",
+ wantWarn: true,
+ },
+ {
+ name: "domain with nested path",
+ host: "ghcr.io/terryhowe/myrepo",
+ wantWarn: true,
+ },
+ {
+ name: "localhost with port and path",
+ host: "localhost:8000/myrepo",
+ wantWarn: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := warnIfHostHasPath(tt.host)
+ if got != tt.wantWarn {
+ t.Errorf("warnIfHostHasPath(%q) = %v, want %v", tt.host, got, tt.wantWarn)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/registry/client_tls_test.go b/helm/pkg/registry/client_tls_test.go
new file mode 100644
index 000000000..ddeeb3b66
--- /dev/null
+++ b/helm/pkg/registry/client_tls_test.go
@@ -0,0 +1,103 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+)
+
+type TLSRegistryClientTestSuite struct {
+ TestRegistry
+}
+
+func (suite *TLSRegistryClientTestSuite) SetupSuite() {
+ // init test client
+ setup(&suite.TestRegistry, true, false)
+}
+
+func (suite *TLSRegistryClientTestSuite) TearDownSuite() {
+ teardown(&suite.TestRegistry)
+ _ = os.RemoveAll(suite.WorkspaceDir)
+}
+
+func (suite *TLSRegistryClientTestSuite) Test_0_Login() {
+ err := suite.RegistryClient.Login(suite.DockerRegistryHost,
+ LoginOptBasicAuth("badverybad", "ohsobad"),
+ LoginOptTLSClientConfig(tlsCert, tlsKey, tlsCA))
+ suite.NotNil(err, "error logging into registry with bad credentials")
+
+ err = suite.RegistryClient.Login(suite.DockerRegistryHost,
+ LoginOptBasicAuth(testUsername, testPassword),
+ LoginOptTLSClientConfig(tlsCert, tlsKey, tlsCA))
+ suite.Nil(err, "no error logging into registry with good credentials")
+}
+
+func (suite *TLSRegistryClientTestSuite) Test_1_Login() {
+ err := suite.RegistryClient.Login(suite.DockerRegistryHost,
+ LoginOptBasicAuth("badverybad", "ohsobad"),
+ LoginOptTLSClientConfigFromConfig(&tls.Config{}))
+ suite.NotNil(err, "error logging into registry with bad credentials")
+
+ // Create a *tls.Config from tlsCert, tlsKey, and tlsCA.
+ cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey)
+ suite.Nil(err, "error loading x509 key pair")
+ rootCAs := x509.NewCertPool()
+ caCert, err := os.ReadFile(tlsCA)
+ suite.Nil(err, "error reading CA certificate")
+ rootCAs.AppendCertsFromPEM(caCert)
+ conf := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ RootCAs: rootCAs,
+ }
+
+ err = suite.RegistryClient.Login(suite.DockerRegistryHost,
+ LoginOptBasicAuth(testUsername, testPassword),
+ LoginOptTLSClientConfigFromConfig(conf))
+ suite.Nil(err, "no error logging into registry with good credentials")
+}
+
+func (suite *TLSRegistryClientTestSuite) Test_1_Push() {
+ testPush(&suite.TestRegistry)
+}
+
+func (suite *TLSRegistryClientTestSuite) Test_2_Pull() {
+ testPull(&suite.TestRegistry)
+}
+
+func (suite *TLSRegistryClientTestSuite) Test_3_Tags() {
+ testTags(&suite.TestRegistry)
+}
+
+func (suite *TLSRegistryClientTestSuite) Test_4_Logout() {
+ err := suite.RegistryClient.Logout("this-host-aint-real:5000")
+ if err != nil {
+ // credential backend for mac generates an error
+ suite.NotNil(err, "failed to delete the credential for this-host-aint-real:5000")
+ }
+
+ err = suite.RegistryClient.Logout(suite.DockerRegistryHost)
+ suite.Nil(err, "no error logging out of registry")
+}
+
+func TestTLSRegistryClientTestSuite(t *testing.T) {
+ suite.Run(t, new(TLSRegistryClientTestSuite))
+}
diff --git a/helm/pkg/registry/constants.go b/helm/pkg/registry/constants.go
new file mode 100644
index 000000000..c455cf314
--- /dev/null
+++ b/helm/pkg/registry/constants.go
@@ -0,0 +1,37 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry // import "helm.sh/helm/v4/pkg/registry"
+
+const (
+ // OCIScheme is the URL scheme for OCI-based requests
+ OCIScheme = "oci"
+
+ // CredentialsFileBasename is the filename for auth credentials file
+ CredentialsFileBasename = "registry/config.json"
+
+ // ConfigMediaType is the reserved media type for the Helm chart manifest config
+ ConfigMediaType = "application/vnd.cncf.helm.config.v1+json"
+
+ // ChartLayerMediaType is the reserved media type for Helm chart package content
+ ChartLayerMediaType = "application/vnd.cncf.helm.chart.content.v1.tar+gzip"
+
+ // ProvLayerMediaType is the reserved media type for Helm chart provenance files
+ ProvLayerMediaType = "application/vnd.cncf.helm.chart.provenance.v1.prov"
+
+ // LegacyChartLayerMediaType is the legacy reserved media type for Helm chart package content.
+ LegacyChartLayerMediaType = "application/tar+gzip"
+)
diff --git a/helm/pkg/registry/generic.go b/helm/pkg/registry/generic.go
new file mode 100644
index 000000000..b46133d91
--- /dev/null
+++ b/helm/pkg/registry/generic.go
@@ -0,0 +1,161 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "slices"
+ "sort"
+ "sync"
+
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "oras.land/oras-go/v2"
+ "oras.land/oras-go/v2/content"
+ "oras.land/oras-go/v2/content/memory"
+ "oras.land/oras-go/v2/registry/remote"
+ "oras.land/oras-go/v2/registry/remote/auth"
+ "oras.land/oras-go/v2/registry/remote/credentials"
+)
+
+// GenericClient provides low-level OCI operations without artifact-specific assumptions
+type GenericClient struct {
+ debug bool
+ enableCache bool
+ credentialsFile string
+ username string
+ password string
+ out io.Writer
+ authorizer *auth.Client
+ registryAuthorizer RemoteClient
+ credentialsStore credentials.Store
+ httpClient *http.Client
+ plainHTTP bool
+}
+
+// GenericPullOptions configures a generic pull operation
+type GenericPullOptions struct {
+ // MediaTypes to include in the pull (empty means all)
+ AllowedMediaTypes []string
+ // Skip descriptors with these media types
+ SkipMediaTypes []string
+ // Custom PreCopy function for filtering
+ PreCopy func(context.Context, ocispec.Descriptor) error
+}
+
+// GenericPullResult contains the result of a generic pull operation
+type GenericPullResult struct {
+ Manifest ocispec.Descriptor
+ Descriptors []ocispec.Descriptor
+ MemoryStore *memory.Store
+ Ref string
+}
+
+// NewGenericClient creates a new generic OCI client from an existing Client
+func NewGenericClient(client *Client) *GenericClient {
+ return &GenericClient{
+ debug: client.debug,
+ enableCache: client.enableCache,
+ credentialsFile: client.credentialsFile,
+ username: client.username,
+ password: client.password,
+ out: client.out,
+ authorizer: client.authorizer,
+ registryAuthorizer: client.registryAuthorizer,
+ credentialsStore: client.credentialsStore,
+ httpClient: client.httpClient,
+ plainHTTP: client.plainHTTP,
+ }
+}
+
+// PullGeneric performs a generic OCI pull without artifact-specific assumptions
+func (c *GenericClient) PullGeneric(ref string, options GenericPullOptions) (*GenericPullResult, error) {
+ parsedRef, err := newReference(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ memoryStore := memory.New()
+ var descriptors []ocispec.Descriptor
+
+ // Set up a repository with authentication and configuration
+ repository, err := remote.NewRepository(parsedRef.String())
+ if err != nil {
+ return nil, err
+ }
+ repository.PlainHTTP = c.plainHTTP
+ repository.Client = c.authorizer
+
+ ctx := context.Background()
+
+ // Prepare allowed media types for filtering
+ var allowedMediaTypes []string
+ if len(options.AllowedMediaTypes) > 0 {
+ allowedMediaTypes = make([]string, len(options.AllowedMediaTypes))
+ copy(allowedMediaTypes, options.AllowedMediaTypes)
+ sort.Strings(allowedMediaTypes)
+ }
+
+ var mu sync.Mutex
+ manifest, err := oras.Copy(ctx, repository, parsedRef.String(), memoryStore, "", oras.CopyOptions{
+ CopyGraphOptions: oras.CopyGraphOptions{
+ PreCopy: func(ctx context.Context, desc ocispec.Descriptor) error {
+ // Apply a custom PreCopy function if provided
+ if options.PreCopy != nil {
+ if err := options.PreCopy(ctx, desc); err != nil {
+ return err
+ }
+ }
+
+ mediaType := desc.MediaType
+
+ // Skip media types if specified
+ if slices.Contains(options.SkipMediaTypes, mediaType) {
+ return oras.SkipNode
+ }
+
+ // Filter by allowed media types if specified
+ if len(allowedMediaTypes) > 0 {
+ if i := sort.SearchStrings(allowedMediaTypes, mediaType); i >= len(allowedMediaTypes) || allowedMediaTypes[i] != mediaType {
+ return oras.SkipNode
+ }
+ }
+
+ mu.Lock()
+ descriptors = append(descriptors, desc)
+ mu.Unlock()
+ return nil
+ },
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &GenericPullResult{
+ Manifest: manifest,
+ Descriptors: descriptors,
+ MemoryStore: memoryStore,
+ Ref: parsedRef.String(),
+ }, nil
+}
+
+// GetDescriptorData retrieves the data for a specific descriptor
+func (c *GenericClient) GetDescriptorData(store *memory.Store, desc ocispec.Descriptor) ([]byte, error) {
+ return content.FetchAll(context.Background(), store, desc)
+}
diff --git a/helm/pkg/registry/main_test.go b/helm/pkg/registry/main_test.go
new file mode 100644
index 000000000..4f6e11e4f
--- /dev/null
+++ b/helm/pkg/registry/main_test.go
@@ -0,0 +1,51 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "net"
+ "os"
+ "testing"
+
+ "github.com/foxcpp/go-mockdns"
+)
+
+func TestMain(m *testing.M) {
+ // A mock DNS server needed for TLS connection testing.
+ var srv *mockdns.Server
+ var err error
+
+ srv, err = mockdns.NewServer(map[string]mockdns.Zone{
+ "helm-test-registry.": {
+ A: []string{"127.0.0.1"},
+ },
+ }, false)
+ if err != nil {
+ panic(err)
+ }
+
+ saveDialFunction := net.DefaultResolver.Dial
+ srv.PatchNet(net.DefaultResolver)
+
+ // Run all tests in the package
+ code := m.Run()
+
+ net.DefaultResolver.Dial = saveDialFunction
+ _ = srv.Close()
+
+ os.Exit(code)
+}
diff --git a/helm/pkg/registry/plugin.go b/helm/pkg/registry/plugin.go
new file mode 100644
index 000000000..e4b4afa24
--- /dev/null
+++ b/helm/pkg/registry/plugin.go
@@ -0,0 +1,212 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Plugin-specific constants
+const (
+ // PluginArtifactType is the artifact type for Helm plugins
+ PluginArtifactType = "application/vnd.helm.plugin.v1+json"
+)
+
+// PluginPullOptions configures a plugin pull operation
+type PluginPullOptions struct {
+ // PluginName specifies the expected plugin name for layer validation
+ PluginName string
+}
+
+// PluginPullResult contains the result of a plugin pull operation
+type PluginPullResult struct {
+ Manifest ocispec.Descriptor
+ PluginData []byte
+ Prov struct {
+ Data []byte
+ }
+ Ref string
+ PluginName string
+}
+
+// PullPlugin downloads a plugin from an OCI registry using artifact type
+func (c *Client) PullPlugin(ref string, pluginName string, options ...PluginPullOption) (*PluginPullResult, error) {
+ operation := &pluginPullOperation{
+ pluginName: pluginName,
+ }
+ for _, option := range options {
+ option(operation)
+ }
+
+ // Use generic client for the pull operation with artifact type filtering
+ genericClient := c.Generic()
+ genericResult, err := genericClient.PullGeneric(ref, GenericPullOptions{
+ // Allow manifests and all layer types - we'll validate artifact type after download
+ AllowedMediaTypes: []string{
+ ocispec.MediaTypeImageManifest,
+ "application/vnd.oci.image.layer.v1.tar",
+ "application/vnd.oci.image.layer.v1.tar+gzip",
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Process the result with plugin-specific logic
+ return c.processPluginPull(genericResult, operation.pluginName)
+}
+
+// processPluginPull handles plugin-specific processing of a generic pull result using artifact type
+func (c *Client) processPluginPull(genericResult *GenericPullResult, pluginName string) (*PluginPullResult, error) {
+ // First validate that this is actually a plugin artifact
+ manifestData, err := c.Generic().GetDescriptorData(genericResult.MemoryStore, genericResult.Manifest)
+ if err != nil {
+ return nil, fmt.Errorf("unable to retrieve manifest: %w", err)
+ }
+
+ // Parse the manifest to check artifact type
+ var manifest ocispec.Manifest
+ if err := json.Unmarshal(manifestData, &manifest); err != nil {
+ return nil, fmt.Errorf("unable to parse manifest: %w", err)
+ }
+
+ // Validate artifact type (for OCI v1.1+ manifests)
+ if manifest.ArtifactType != "" && manifest.ArtifactType != PluginArtifactType {
+ return nil, fmt.Errorf("expected artifact type %s, got %s", PluginArtifactType, manifest.ArtifactType)
+ }
+
+ // For backwards compatibility, also check config media type if no artifact type
+ if manifest.ArtifactType == "" && manifest.Config.MediaType != PluginArtifactType {
+ return nil, fmt.Errorf("expected config media type %s for legacy compatibility, got %s", PluginArtifactType, manifest.Config.MediaType)
+ }
+
+ // Find the plugin tarball and optional provenance using NAME-VERSION.tgz format
+ var pluginDescriptor *ocispec.Descriptor
+ var provenanceDescriptor *ocispec.Descriptor
+ var foundProvenanceName string
+
+ // Look for layers with the expected titles/annotations
+ for _, layer := range manifest.Layers {
+ d := layer
+ // Check for title annotation
+ if title, exists := d.Annotations[ocispec.AnnotationTitle]; exists {
+ // Check if this looks like a plugin tarball: {pluginName}-{version}.tgz
+ if pluginDescriptor == nil && strings.HasPrefix(title, pluginName+"-") && strings.HasSuffix(title, ".tgz") {
+ pluginDescriptor = &d
+ }
+ // Check if this looks like a plugin provenance: {pluginName}-{version}.tgz.prov
+ if provenanceDescriptor == nil && strings.HasPrefix(title, pluginName+"-") && strings.HasSuffix(title, ".tgz.prov") {
+ provenanceDescriptor = &d
+ foundProvenanceName = title
+ }
+ }
+ }
+
+ // Plugin tarball is required
+ if pluginDescriptor == nil {
+ return nil, fmt.Errorf("required layer matching pattern %s-VERSION.tgz not found in manifest", pluginName)
+ }
+
+ // Build plugin-specific result
+ result := &PluginPullResult{
+ Manifest: genericResult.Manifest,
+ Ref: genericResult.Ref,
+ PluginName: pluginName,
+ }
+
+ // Fetch plugin data using generic client
+ genericClient := c.Generic()
+ result.PluginData, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *pluginDescriptor)
+ if err != nil {
+ return nil, fmt.Errorf("unable to retrieve plugin data with digest %s: %w", pluginDescriptor.Digest, err)
+ }
+
+ // Fetch provenance data if available
+ if provenanceDescriptor != nil {
+ result.Prov.Data, err = genericClient.GetDescriptorData(genericResult.MemoryStore, *provenanceDescriptor)
+ if err != nil {
+ return nil, fmt.Errorf("unable to retrieve provenance data with digest %s: %w", provenanceDescriptor.Digest, err)
+ }
+ }
+
+ _, _ = fmt.Fprintf(c.out, "Pulled plugin: %s\n", result.Ref)
+ _, _ = fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest)
+ if result.Prov.Data != nil {
+ _, _ = fmt.Fprintf(c.out, "Provenance: %s\n", foundProvenanceName)
+ }
+
+ if strings.Contains(result.Ref, "_") {
+ _, _ = fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref)
+ _, _ = fmt.Fprint(c.out, registryUnderscoreMessage+"\n")
+ }
+
+ return result, nil
+}
+
+// Plugin pull operation types and options
+type (
+ pluginPullOperation struct {
+ pluginName string
+ withProv bool
+ }
+
+ // PluginPullOption allows customizing plugin pull operations
+ PluginPullOption func(*pluginPullOperation)
+)
+
+// PluginPullOptWithPluginName sets the plugin name for validation
+func PluginPullOptWithPluginName(name string) PluginPullOption {
+ return func(operation *pluginPullOperation) {
+ operation.pluginName = name
+ }
+}
+
+// GetPluginName extracts the plugin name from an OCI reference using proper reference parsing
+func GetPluginName(source string) (string, error) {
+ ref, err := newReference(source)
+ if err != nil {
+ return "", fmt.Errorf("invalid OCI reference: %w", err)
+ }
+
+ // Extract plugin name from the repository path
+ // e.g., "ghcr.io/user/plugin-name:v1.0.0" -> Repository: "user/plugin-name"
+ repository := ref.Repository
+ if repository == "" {
+ return "", fmt.Errorf("invalid OCI reference: missing repository")
+ }
+
+ // Get the last part of the repository path as the plugin name
+ parts := strings.Split(repository, "/")
+ pluginName := parts[len(parts)-1]
+
+ if pluginName == "" {
+ return "", fmt.Errorf("invalid OCI reference: cannot determine plugin name from repository %s", repository)
+ }
+
+ return pluginName, nil
+}
+
+// PullPluginOptWithProv configures the pull to fetch provenance data
+func PullPluginOptWithProv(withProv bool) PluginPullOption {
+ return func(operation *pluginPullOperation) {
+ operation.withProv = withProv
+ }
+}
diff --git a/helm/pkg/registry/plugin_test.go b/helm/pkg/registry/plugin_test.go
new file mode 100644
index 000000000..f8525829c
--- /dev/null
+++ b/helm/pkg/registry/plugin_test.go
@@ -0,0 +1,93 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "testing"
+)
+
+func TestGetPluginName(t *testing.T) {
+ tests := []struct {
+ name string
+ source string
+ expected string
+ expectErr bool
+ }{
+ {
+ name: "valid OCI reference with tag",
+ source: "oci://ghcr.io/user/plugin-name:v1.0.0",
+ expected: "plugin-name",
+ },
+ {
+ name: "valid OCI reference with digest",
+ source: "oci://ghcr.io/user/plugin-name@sha256:1234567890abcdef",
+ expected: "plugin-name",
+ },
+ {
+ name: "valid OCI reference without tag",
+ source: "oci://ghcr.io/user/plugin-name",
+ expected: "plugin-name",
+ },
+ {
+ name: "valid OCI reference with multiple path segments",
+ source: "oci://registry.example.com/org/team/plugin-name:latest",
+ expected: "plugin-name",
+ },
+ {
+ name: "valid OCI reference with plus signs in tag",
+ source: "oci://registry.example.com/user/plugin-name:v1.0.0+build.1",
+ expected: "plugin-name",
+ },
+ {
+ name: "valid OCI reference - single path segment",
+ source: "oci://registry.example.com/plugin",
+ expected: "plugin",
+ },
+ {
+ name: "invalid OCI reference - no repository",
+ source: "oci://registry.example.com",
+ expectErr: true,
+ },
+ {
+ name: "invalid OCI reference - malformed",
+ source: "not-an-oci-reference",
+ expectErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ pluginName, err := GetPluginName(tt.source)
+
+ if tt.expectErr {
+ if err == nil {
+ t.Errorf("expected error but got none")
+ }
+ return
+ }
+
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ return
+ }
+
+ if pluginName != tt.expected {
+ t.Errorf("expected plugin name %q, got %q", tt.expected, pluginName)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/registry/reference.go b/helm/pkg/registry/reference.go
new file mode 100644
index 000000000..9a98cf5c9
--- /dev/null
+++ b/helm/pkg/registry/reference.go
@@ -0,0 +1,84 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "fmt"
+ "strings"
+
+ "oras.land/oras-go/v2/registry"
+)
+
+type reference struct {
+ orasReference registry.Reference
+ Registry string
+ Repository string
+ Tag string
+ Digest string
+}
+
+// newReference will parse and validate the reference, and clean tags when
+// applicable tags are only cleaned when plus (+) signs are present and are
+// converted to underscores (_) before pushing
+// See https://github.com/helm/helm/issues/10166
+func newReference(raw string) (result reference, err error) {
+ // Remove the oci:// prefix if it is there
+ raw = strings.TrimPrefix(raw, OCIScheme+"://")
+
+ // The sole possible reference modification is replacing plus (+) signs
+ // present in tags with underscores (_). To do this properly, we first
+ // need to identify a tag, and then pass it on to the reference parser
+ // NOTE: Passing immediately to the reference parser will fail since (+)
+ // signs are an invalid tag character, and simply replacing all plus (+)
+ // occurrences could invalidate other portions of the URI
+ lastIndex := strings.LastIndex(raw, "@")
+ if lastIndex >= 0 {
+ result.Digest = raw[(lastIndex + 1):]
+ raw = raw[:lastIndex]
+ }
+ parts := strings.Split(raw, ":")
+ if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], "/") {
+ tag := parts[len(parts)-1]
+
+ if tag != "" {
+ // Replace any plus (+) signs with known underscore (_) conversion
+ newTag := strings.ReplaceAll(tag, "+", "_")
+ raw = strings.ReplaceAll(raw, tag, newTag)
+ }
+ }
+
+ result.orasReference, err = registry.ParseReference(raw)
+ if err != nil {
+ return result, err
+ }
+ result.Registry = result.orasReference.Registry
+ result.Repository = result.orasReference.Repository
+ result.Tag = result.orasReference.Reference
+ return result, nil
+}
+
+func (r *reference) String() string {
+ if r.Tag == "" {
+ return r.orasReference.String() + "@" + r.Digest
+ }
+ return r.orasReference.String()
+}
+
+// IsOCI determines whether a URL is to be treated as an OCI URL
+func IsOCI(url string) bool {
+ return strings.HasPrefix(url, fmt.Sprintf("%s://", OCIScheme))
+}
diff --git a/helm/pkg/registry/reference_test.go b/helm/pkg/registry/reference_test.go
new file mode 100644
index 000000000..b6872cc37
--- /dev/null
+++ b/helm/pkg/registry/reference_test.go
@@ -0,0 +1,100 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import "testing"
+
+func verify(t *testing.T, actual reference, registry, repository, tag, digest string) {
+ t.Helper()
+ if registry != actual.orasReference.Registry {
+ t.Errorf("Oras reference registry expected %v actual %v", registry, actual.Registry)
+ }
+ if repository != actual.orasReference.Repository {
+ t.Errorf("Oras reference repository expected %v actual %v", repository, actual.Repository)
+ }
+ if tag != actual.orasReference.Reference {
+ t.Errorf("Oras reference reference expected %v actual %v", tag, actual.Tag)
+ }
+ if registry != actual.Registry {
+ t.Errorf("Registry expected %v actual %v", registry, actual.Registry)
+ }
+ if repository != actual.Repository {
+ t.Errorf("Repository expected %v actual %v", repository, actual.Repository)
+ }
+ if tag != actual.Tag {
+ t.Errorf("Tag expected %v actual %v", tag, actual.Tag)
+ }
+ if digest != actual.Digest {
+ t.Errorf("Digest expected %v actual %v", digest, actual.Digest)
+ }
+ expectedString := registry
+ if repository != "" {
+ expectedString = expectedString + "/" + repository
+ }
+ if tag != "" {
+ expectedString = expectedString + ":" + tag
+ } else {
+ expectedString = expectedString + "@" + digest
+ }
+ if actual.String() != expectedString {
+ t.Errorf("String expected %s actual %s", expectedString, actual.String())
+ }
+}
+
+func TestNewReference(t *testing.T) {
+ actual, err := newReference("registry.example.com/repository:1.0@sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888")
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+ verify(t, actual, "registry.example.com", "repository", "1.0", "sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888")
+
+ actual, err = newReference("oci://registry.example.com/repository:1.0@sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888")
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+ verify(t, actual, "registry.example.com", "repository", "1.0", "sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888")
+
+ actual, err = newReference("a/b:1@c")
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+ verify(t, actual, "a", "b", "1", "c")
+
+ actual, err = newReference("a/b:@")
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+ verify(t, actual, "a", "b", "", "")
+
+ actual, err = newReference("registry.example.com/repository:1.0+001")
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+ verify(t, actual, "registry.example.com", "repository", "1.0_001", "")
+
+ actual, err = newReference("thing:1.0")
+ if err == nil {
+ t.Errorf("Expect error error %v", err)
+ }
+ verify(t, actual, "", "", "", "")
+
+ actual, err = newReference("registry.example.com/the/repository@sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888")
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+ verify(t, actual, "registry.example.com", "the/repository", "", "sha256:c6841b3a895f1444a6738b5d04564a57e860ce42f8519c3be807fb6d9bee7888")
+}
diff --git a/helm/pkg/registry/registry_test.go b/helm/pkg/registry/registry_test.go
new file mode 100644
index 000000000..d4921c50b
--- /dev/null
+++ b/helm/pkg/registry/registry_test.go
@@ -0,0 +1,390 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/distribution/distribution/v3/configuration"
+ "github.com/distribution/distribution/v3/registry"
+ _ "github.com/distribution/distribution/v3/registry/auth/htpasswd"
+ _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+ "golang.org/x/crypto/bcrypt"
+
+ "helm.sh/helm/v4/internal/tlsutil"
+)
+
+const (
+ tlsServerKey = "./testdata/tls/server.key"
+ tlsServerCert = "./testdata/tls/server.crt"
+ tlsCA = "./testdata/tls/ca.crt"
+ tlsKey = "./testdata/tls/client.key"
+ tlsCert = "./testdata/tls/client.crt"
+)
+
+var (
+ testWorkspaceDir = "helm-registry-test"
+ testHtpasswdFileBasename = "authtest.htpasswd"
+ testUsername = "myuser"
+ testPassword = "mypass"
+)
+
+type TestRegistry struct {
+ suite.Suite
+ Out io.Writer
+ DockerRegistryHost string
+ CompromisedRegistryHost string
+ WorkspaceDir string
+ RegistryClient *Client
+ dockerRegistry *registry.Registry
+}
+
+func setup(suite *TestRegistry, tlsEnabled, insecure bool) {
+ suite.WorkspaceDir = testWorkspaceDir
+ err := os.RemoveAll(suite.WorkspaceDir)
+ require.NoError(suite.T(), err, "no error removing test workspace dir")
+ err = os.Mkdir(suite.WorkspaceDir, 0700)
+ require.NoError(suite.T(), err, "no error creating test workspace dir")
+
+ var out bytes.Buffer
+
+ suite.Out = &out
+ credentialsFile := filepath.Join(suite.WorkspaceDir, CredentialsFileBasename)
+
+ // init test client
+ opts := []ClientOption{
+ ClientOptDebug(true),
+ ClientOptEnableCache(true),
+ ClientOptWriter(suite.Out),
+ ClientOptCredentialsFile(credentialsFile),
+ ClientOptBasicAuth(testUsername, testPassword),
+ }
+
+ if tlsEnabled {
+ var tlsConf *tls.Config
+ if insecure {
+ tlsConf, err = tlsutil.NewTLSConfig(
+ tlsutil.WithInsecureSkipVerify(true),
+ )
+ } else {
+ tlsConf, err = tlsutil.NewTLSConfig(
+ tlsutil.WithCertKeyPairFiles(tlsCert, tlsKey),
+ tlsutil.WithCAFile(tlsCA),
+ )
+ }
+ httpClient := &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsConf,
+ },
+ }
+ suite.Nil(err, "no error loading tls config")
+ opts = append(opts, ClientOptHTTPClient(httpClient))
+ } else {
+ opts = append(opts, ClientOptPlainHTTP())
+ }
+
+ suite.RegistryClient, err = NewClient(opts...)
+ suite.Nil(err, "no error creating registry client")
+
+ // create htpasswd file (w BCrypt, which is required)
+ pwBytes, err := bcrypt.GenerateFromPassword([]byte(testPassword), bcrypt.DefaultCost)
+ suite.Nil(err, "no error generating bcrypt password for test htpasswd file")
+ htpasswdPath := filepath.Join(suite.WorkspaceDir, testHtpasswdFileBasename)
+ err = os.WriteFile(htpasswdPath, fmt.Appendf(nil, "%s:%s\n", testUsername, string(pwBytes)), 0644)
+ suite.Nil(err, "no error creating test htpasswd file")
+
+ // Registry config
+ config := &configuration.Configuration{}
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ suite.Nil(err, "no error finding free port for test registry")
+ defer func() { _ = ln.Close() }()
+
+ // Change the registry host to another host which is not localhost.
+ // This is required because Docker enforces HTTP if the registry
+ // host is localhost/127.0.0.1.
+ port := ln.Addr().(*net.TCPAddr).Port
+ suite.DockerRegistryHost = fmt.Sprintf("helm-test-registry:%d", port)
+
+ config.HTTP.Addr = ln.Addr().String()
+ config.HTTP.DrainTimeout = time.Duration(10) * time.Second
+ config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}}
+
+ config.Auth = configuration.Auth{
+ "htpasswd": configuration.Parameters{
+ "realm": "localhost",
+ "path": htpasswdPath,
+ },
+ }
+
+ // config tls
+ if tlsEnabled {
+ // TLS config
+ // this set tlsConf.ClientAuth = tls.RequireAndVerifyClientCert in the
+ // server tls config
+ config.HTTP.TLS.Certificate = tlsServerCert
+ config.HTTP.TLS.Key = tlsServerKey
+ // Skip client authentication if the registry is insecure.
+ if !insecure {
+ config.HTTP.TLS.ClientCAs = []string{tlsCA}
+ }
+ }
+ suite.dockerRegistry, err = registry.NewRegistry(context.Background(), config)
+ suite.Nil(err, "no error creating test registry")
+
+ suite.CompromisedRegistryHost = initCompromisedRegistryTestServer()
+ go func() {
+ _ = suite.dockerRegistry.ListenAndServe()
+ }()
+}
+
+func teardown(suite *TestRegistry) {
+ if suite.dockerRegistry != nil {
+ _ = suite.dockerRegistry.Shutdown(context.Background())
+ }
+}
+
+func initCompromisedRegistryTestServer() string {
+ s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if strings.Contains(r.URL.Path, "manifests") {
+ w.Header().Set("Content-Type", "application/vnd.oci.image.manifest.v1+json")
+ w.WriteHeader(http.StatusOK)
+
+ _, _ = fmt.Fprintf(w, `{ "schemaVersion": 2, "config": {
+ "mediaType": "%s",
+ "digest": "sha256:a705ee2789ab50a5ba20930f246dbd5cc01ff9712825bb98f57ee8414377f133",
+ "size": 181
+ },
+ "layers": [
+ {
+ "mediaType": "%s",
+ "digest": "sha256:ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb",
+ "size": 1
+ }
+ ]
+}`, ConfigMediaType, ChartLayerMediaType)
+ } else if r.URL.Path == "/v2/testrepo/supposedlysafechart/blobs/sha256:a705ee2789ab50a5ba20930f246dbd5cc01ff9712825bb98f57ee8414377f133" {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte("{\"name\":\"mychart\",\"version\":\"0.1.0\",\"description\":\"A Helm chart for Kubernetes\\n" +
+ "an 'application' or a 'library' chart.\",\"apiVersion\":\"v2\",\"appVersion\":\"1.16.0\",\"type\":" +
+ "\"application\"}"))
+ } else if r.URL.Path == "/v2/testrepo/supposedlysafechart/blobs/sha256:ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb" {
+ w.Header().Set("Content-Type", ChartLayerMediaType)
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte("b"))
+ } else {
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ }))
+
+ u, _ := url.Parse(s.URL)
+ return fmt.Sprintf("localhost:%s", u.Port())
+}
+
+func testPush(suite *TestRegistry) {
+
+ testingChartCreationTime := "1977-09-02T22:04:05Z"
+
+ // Bad bytes
+ ref := fmt.Sprintf("%s/testrepo/testchart:1.2.3", suite.DockerRegistryHost)
+ _, err := suite.RegistryClient.Push([]byte("hello"), ref, PushOptCreationTime(testingChartCreationTime))
+ suite.NotNil(err, "error pushing non-chart bytes")
+
+ // Load a test chart
+ chartData, err := os.ReadFile("../repo/v1/repotest/testdata/examplechart-0.1.0.tgz")
+ suite.Nil(err, "no error loading test chart")
+ meta, err := extractChartMeta(chartData)
+ suite.Nil(err, "no error extracting chart meta")
+
+ // non-strict ref (chart name)
+ ref = fmt.Sprintf("%s/testrepo/boop:%s", suite.DockerRegistryHost, meta.Version)
+ _, err = suite.RegistryClient.Push(chartData, ref, PushOptCreationTime(testingChartCreationTime))
+ suite.NotNil(err, "error pushing non-strict ref (bad basename)")
+
+ // non-strict ref (chart name), with strict mode disabled
+ _, err = suite.RegistryClient.Push(chartData, ref, PushOptStrictMode(false), PushOptCreationTime(testingChartCreationTime))
+ suite.Nil(err, "no error pushing non-strict ref (bad basename), with strict mode disabled")
+
+ // non-strict ref (chart version)
+ ref = fmt.Sprintf("%s/testrepo/%s:latest", suite.DockerRegistryHost, meta.Name)
+ _, err = suite.RegistryClient.Push(chartData, ref, PushOptCreationTime(testingChartCreationTime))
+ suite.NotNil(err, "error pushing non-strict ref (bad tag)")
+
+ // non-strict ref (chart version), with strict mode disabled
+ _, err = suite.RegistryClient.Push(chartData, ref, PushOptStrictMode(false), PushOptCreationTime(testingChartCreationTime))
+ suite.Nil(err, "no error pushing non-strict ref (bad tag), with strict mode disabled")
+
+ // basic push, good ref
+ chartData, err = os.ReadFile("../downloader/testdata/local-subchart-0.1.0.tgz")
+ suite.Nil(err, "no error loading test chart")
+ meta, err = extractChartMeta(chartData)
+ suite.Nil(err, "no error extracting chart meta")
+ ref = fmt.Sprintf("%s/testrepo/%s:%s", suite.DockerRegistryHost, meta.Name, meta.Version)
+ _, err = suite.RegistryClient.Push(chartData, ref, PushOptCreationTime(testingChartCreationTime))
+ suite.Nil(err, "no error pushing good ref")
+
+ _, err = suite.RegistryClient.Pull(ref)
+ suite.Nil(err, "no error pulling a simple chart")
+
+ // Load another test chart
+ chartData, err = os.ReadFile("../downloader/testdata/signtest-0.1.0.tgz")
+ suite.Nil(err, "no error loading test chart")
+ meta, err = extractChartMeta(chartData)
+ suite.Nil(err, "no error extracting chart meta")
+
+ // Load prov file
+ provData, err := os.ReadFile("../downloader/testdata/signtest-0.1.0.tgz.prov")
+ suite.Nil(err, "no error loading test prov")
+
+ // push with prov
+ ref = fmt.Sprintf("%s/testrepo/%s:%s", suite.DockerRegistryHost, meta.Name, meta.Version)
+ result, err := suite.RegistryClient.Push(chartData, ref, PushOptProvData(provData), PushOptCreationTime(testingChartCreationTime))
+ suite.Nil(err, "no error pushing good ref with prov")
+
+ _, err = suite.RegistryClient.Pull(ref, PullOptWithProv(true))
+ suite.Nil(err, "no error pulling a simple chart")
+
+ // Validate the output
+ // Note: these digests/sizes etc may change if the test chart/prov files are modified,
+ // or if the format of the OCI manifest changes
+ suite.Equal(ref, result.Ref)
+ suite.Equal(meta.Name, result.Chart.Meta.Name)
+ suite.Equal(meta.Version, result.Chart.Meta.Version)
+ suite.Equal(int64(742), result.Manifest.Size)
+ suite.Equal(int64(99), result.Config.Size)
+ suite.Equal(int64(973), result.Chart.Size)
+ suite.Equal(int64(695), result.Prov.Size)
+ suite.Equal(
+ "sha256:fbbade96da6050f68f94f122881e3b80051a18f13ab5f4081868dd494538f5c2",
+ result.Manifest.Digest)
+ suite.Equal(
+ "sha256:8d17cb6bf6ccd8c29aace9a658495cbd5e2e87fc267876e86117c7db681c9580",
+ result.Config.Digest)
+ suite.Equal(
+ "sha256:e5ef611620fb97704d8751c16bab17fedb68883bfb0edc76f78a70e9173f9b55",
+ result.Chart.Digest)
+ suite.Equal(
+ "sha256:b0a02b7412f78ae93324d48df8fcc316d8482e5ad7827b5b238657a29a22f256",
+ result.Prov.Digest)
+}
+
+func testPull(suite *TestRegistry) {
+ // bad/missing ref
+ ref := fmt.Sprintf("%s/testrepo/no-existy:1.2.3", suite.DockerRegistryHost)
+ _, err := suite.RegistryClient.Pull(ref)
+ suite.NotNil(err, "error on bad/missing ref")
+
+ // Load test chart (to build ref pushed in previous test)
+ chartData, err := os.ReadFile("../downloader/testdata/local-subchart-0.1.0.tgz")
+ suite.Nil(err, "no error loading test chart")
+ meta, err := extractChartMeta(chartData)
+ suite.Nil(err, "no error extracting chart meta")
+ ref = fmt.Sprintf("%s/testrepo/%s:%s", suite.DockerRegistryHost, meta.Name, meta.Version)
+
+ // Simple pull, chart only
+ _, err = suite.RegistryClient.Pull(ref)
+ suite.Nil(err, "no error pulling a simple chart")
+
+ // Simple pull with prov (no prov uploaded)
+ _, err = suite.RegistryClient.Pull(ref, PullOptWithProv(true))
+ suite.NotNil(err, "error pulling a chart with prov when no prov exists")
+
+ // Simple pull with prov, ignoring missing prov
+ _, err = suite.RegistryClient.Pull(ref,
+ PullOptWithProv(true),
+ PullOptIgnoreMissingProv(true))
+ suite.Nil(err,
+ "no error pulling a chart with prov when no prov exists, ignoring missing")
+
+ // Load test chart (to build ref pushed in previous test)
+ chartData, err = os.ReadFile("../downloader/testdata/signtest-0.1.0.tgz")
+ suite.Nil(err, "no error loading test chart")
+ meta, err = extractChartMeta(chartData)
+ suite.Nil(err, "no error extracting chart meta")
+ ref = fmt.Sprintf("%s/testrepo/%s:%s", suite.DockerRegistryHost, meta.Name, meta.Version)
+
+ // Load prov file
+ provData, err := os.ReadFile("../downloader/testdata/signtest-0.1.0.tgz.prov")
+ suite.Nil(err, "no error loading test prov")
+
+ // no chart and no prov causes error
+ _, err = suite.RegistryClient.Pull(ref,
+ PullOptWithChart(false),
+ PullOptWithProv(false))
+ suite.NotNil(err, "error on both no chart and no prov")
+
+ // full pull with chart and prov
+ result, err := suite.RegistryClient.Pull(ref, PullOptWithProv(true))
+ suite.Require().Nil(err, "no error pulling a chart with prov")
+
+ // Validate the output
+ // Note: these digests/sizes etc may change if the test chart/prov files are modified,
+ // or if the format of the OCI manifest changes
+ suite.Equal(ref, result.Ref)
+ suite.Equal(meta.Name, result.Chart.Meta.Name)
+ suite.Equal(meta.Version, result.Chart.Meta.Version)
+ suite.Equal(int64(742), result.Manifest.Size)
+ suite.Equal(int64(99), result.Config.Size)
+ suite.Equal(int64(973), result.Chart.Size)
+ suite.Equal(int64(695), result.Prov.Size)
+ suite.Equal(
+ "sha256:fbbade96da6050f68f94f122881e3b80051a18f13ab5f4081868dd494538f5c2",
+ result.Manifest.Digest)
+ suite.Equal(
+ "sha256:8d17cb6bf6ccd8c29aace9a658495cbd5e2e87fc267876e86117c7db681c9580",
+ result.Config.Digest)
+ suite.Equal(
+ "sha256:e5ef611620fb97704d8751c16bab17fedb68883bfb0edc76f78a70e9173f9b55",
+ result.Chart.Digest)
+ suite.Equal(
+ "sha256:b0a02b7412f78ae93324d48df8fcc316d8482e5ad7827b5b238657a29a22f256",
+ result.Prov.Digest)
+ suite.Equal("{\"schemaVersion\":2,\"config\":{\"mediaType\":\"application/vnd.cncf.helm.config.v1+json\",\"digest\":\"sha256:8d17cb6bf6ccd8c29aace9a658495cbd5e2e87fc267876e86117c7db681c9580\",\"size\":99},\"layers\":[{\"mediaType\":\"application/vnd.cncf.helm.chart.provenance.v1.prov\",\"digest\":\"sha256:b0a02b7412f78ae93324d48df8fcc316d8482e5ad7827b5b238657a29a22f256\",\"size\":695},{\"mediaType\":\"application/vnd.cncf.helm.chart.content.v1.tar+gzip\",\"digest\":\"sha256:e5ef611620fb97704d8751c16bab17fedb68883bfb0edc76f78a70e9173f9b55\",\"size\":973}],\"annotations\":{\"org.opencontainers.image.created\":\"1977-09-02T22:04:05Z\",\"org.opencontainers.image.description\":\"A Helm chart for Kubernetes\",\"org.opencontainers.image.title\":\"signtest\",\"org.opencontainers.image.version\":\"0.1.0\"}}",
+ string(result.Manifest.Data))
+ suite.Equal("{\"name\":\"signtest\",\"version\":\"0.1.0\",\"description\":\"A Helm chart for Kubernetes\",\"apiVersion\":\"v1\"}",
+ string(result.Config.Data))
+ suite.Equal(chartData, result.Chart.Data)
+ suite.Equal(provData, result.Prov.Data)
+}
+
+func testTags(suite *TestRegistry) {
+ // Load test chart (to build ref pushed in previous test)
+ chartData, err := os.ReadFile("../downloader/testdata/local-subchart-0.1.0.tgz")
+ suite.Nil(err, "no error loading test chart")
+ meta, err := extractChartMeta(chartData)
+ suite.Nil(err, "no error extracting chart meta")
+ ref := fmt.Sprintf("%s/testrepo/%s", suite.DockerRegistryHost, meta.Name)
+
+ // Query for tags and validate length
+ tags, err := suite.RegistryClient.Tags(ref)
+ suite.Nil(err, "no error retrieving tags")
+ suite.Equal(1, len(tags))
+}
diff --git a/helm/pkg/registry/tag.go b/helm/pkg/registry/tag.go
new file mode 100644
index 000000000..bfb4b1ef6
--- /dev/null
+++ b/helm/pkg/registry/tag.go
@@ -0,0 +1,59 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry // import "helm.sh/helm/v4/pkg/registry"
+
+import (
+ "fmt"
+
+ "github.com/Masterminds/semver/v3"
+)
+
+func GetTagMatchingVersionOrConstraint(tags []string, versionString string) (string, error) {
+ var constraint *semver.Constraints
+ if versionString == "" {
+ // If the string is empty, set a wildcard constraint
+ constraint, _ = semver.NewConstraint("*")
+ } else {
+ // when customer inputs a specific version, check whether there's an exact match first
+ for _, v := range tags {
+ if versionString == v {
+ return v, nil
+ }
+ }
+
+ // Otherwise set constraint to the string given
+ var err error
+ constraint, err = semver.NewConstraint(versionString)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ // Otherwise try to find the first available version matching the string,
+ // in case it is a constraint
+ for _, v := range tags {
+ test, err := semver.NewVersion(v)
+ if err != nil {
+ continue
+ }
+ if constraint.Check(test) {
+ return v, nil
+ }
+ }
+
+ return "", fmt.Errorf("could not locate a version matching provided version string %s", versionString)
+}
diff --git a/helm/pkg/registry/tag_test.go b/helm/pkg/registry/tag_test.go
new file mode 100644
index 000000000..09f0f12ea
--- /dev/null
+++ b/helm/pkg/registry/tag_test.go
@@ -0,0 +1,122 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestGetTagMatchingVersionOrConstraint_ExactMatch(t *testing.T) {
+ tags := []string{"1.0.0", "1.2.3", "2.0.0"}
+ got, err := GetTagMatchingVersionOrConstraint(tags, "1.2.3")
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if got != "1.2.3" {
+ t.Fatalf("expected exact match '1.2.3', got %q", got)
+ }
+}
+
+func TestGetTagMatchingVersionOrConstraint_EmptyVersionWildcard(t *testing.T) {
+ // Includes a non-semver tag which should be skipped
+ tags := []string{"latest", "0.9.0", "1.0.0"}
+ got, err := GetTagMatchingVersionOrConstraint(tags, "")
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ // Should pick the first valid semver tag in order, which is 0.9.0
+ if got != "0.9.0" {
+ t.Fatalf("expected '0.9.0', got %q", got)
+ }
+}
+
+func TestGetTagMatchingVersionOrConstraint_ConstraintRange(t *testing.T) {
+ tags := []string{"0.5.0", "1.0.0", "1.1.0", "2.0.0"}
+
+ // Caret range
+ got, err := GetTagMatchingVersionOrConstraint(tags, "^1.0.0")
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if got != "1.0.0" { // first match in order
+ t.Fatalf("expected '1.0.0', got %q", got)
+ }
+
+ // Compound range
+ got, err = GetTagMatchingVersionOrConstraint(tags, ">=1.0.0 <2.0.0")
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if got != "1.0.0" {
+ t.Fatalf("expected '1.0.0', got %q", got)
+ }
+}
+
+func TestGetTagMatchingVersionOrConstraint_InvalidConstraint(t *testing.T) {
+ tags := []string{"1.0.0"}
+ _, err := GetTagMatchingVersionOrConstraint(tags, ">a1")
+ if err == nil {
+ t.Fatalf("expected error for invalid constraint")
+ }
+}
+
+func TestGetTagMatchingVersionOrConstraint_NoMatches(t *testing.T) {
+ tags := []string{"0.1.0", "0.2.0"}
+ _, err := GetTagMatchingVersionOrConstraint(tags, ">=1.0.0")
+ if err == nil {
+ t.Fatalf("expected error when no tags match")
+ }
+ if !strings.Contains(err.Error(), ">=1.0.0") {
+ t.Fatalf("expected error to contain version string, got: %v", err)
+ }
+}
+
+func TestGetTagMatchingVersionOrConstraint_SkipsNonSemverTags(t *testing.T) {
+ tags := []string{"alpha", "1.0.0", "beta", "1.1.0"}
+ got, err := GetTagMatchingVersionOrConstraint(tags, ">=1.0.0 <2.0.0")
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if got != "1.0.0" {
+ t.Fatalf("expected '1.0.0', got %q", got)
+ }
+}
+
+func TestGetTagMatchingVersionOrConstraint_OrderMatters_FirstMatchReturned(t *testing.T) {
+ // Both 1.2.0 and 1.3.0 satisfy >=1.2.0 <2.0.0, but the function returns the first in input order
+ tags := []string{"1.3.0", "1.2.0"}
+ got, err := GetTagMatchingVersionOrConstraint(tags, ">=1.2.0 <2.0.0")
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if got != "1.3.0" {
+ t.Fatalf("expected '1.3.0' (first satisfying tag), got %q", got)
+ }
+}
+
+func TestGetTagMatchingVersionOrConstraint_ExactMatchHasPrecedence(t *testing.T) {
+ // Exact match should be returned even if another earlier tag would match the parsed constraint
+ tags := []string{"1.3.0", "1.2.3"}
+ got, err := GetTagMatchingVersionOrConstraint(tags, "1.2.3")
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if got != "1.2.3" {
+ t.Fatalf("expected exact match '1.2.3', got %q", got)
+ }
+}
diff --git a/helm/pkg/registry/testdata/tls/ca.crt b/helm/pkg/registry/testdata/tls/ca.crt
new file mode 100644
index 000000000..8c46ff81e
--- /dev/null
+++ b/helm/pkg/registry/testdata/tls/ca.crt
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDiTCCAnGgAwIBAgIUbTTp/VG6blpKnXwWpSVtw54jxzswDQYJKoZIhvcNAQEL
+BQAwUzELMAkGA1UEBhMCQ04xCzAJBgNVBAgMAkdEMQswCQYDVQQHDAJTWjETMBEG
+A1UECgwKQWNtZSwgSW5jLjEVMBMGA1UEAwwMQWNtZSBSb290IENBMCAXDTI0MDQy
+MTA5NDUxOFoYDzMzOTMwNDA0MDk0NTE4WjBTMQswCQYDVQQGEwJDTjELMAkGA1UE
+CAwCR0QxCzAJBgNVBAcMAlNaMRMwEQYDVQQKDApBY21lLCBJbmMuMRUwEwYDVQQD
+DAxBY21lIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCq
+OrCgXpMjeSjWJYanmSG/2K4zk0HXeU3eMt5bkshlqHnEwJFD5tMZkJZUsGPiJr9A
+vAqYu2V9/gMKUptvHgxmMkh9BZYCnXAGzhl+OogYcJA5l/YBuDvmgz8M3aRZr7xd
+IA9KtepnDlp7NRWXsgRHzJNMBkV4PpEVHbJTVdjHVYERCw0C1kcb6wjzshnmUmJJ
+JVEQDRCCaYymtIymR6kKrZzIw2FeyXxcccbvTsKILItEECYmRNevo1mc5/f8BEXx
+IzEPhDpoKSTq5JjWHCQH1shkwWyg2neL7g0UJ8nyV0pqqScE0L1WUZ1BHnVJAmGm
+R61WXxA3xCFzJHSc2enRAgMBAAGjUzBRMB0GA1UdDgQWBBREgz+BR+lJFNaG2D7+
+tDVzzyjc4jAfBgNVHSMEGDAWgBREgz+BR+lJFNaG2D7+tDVzzyjc4jAPBgNVHRMB
+Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAL9DjzmNwDljtMWvwAbDC11bIw
+zHON10J/bLcoZy3r7SaD1ZjPigzdpd0oVaoq+Kcg/J0JuIN2fBzyFljft//9knDA
+GgO4TvDdd7dk4gv6C/fbmeh+/HsnjRDHQmExzgth5akSnmtxyk5HQR72FrWICqjf
+oEqg8xs0gVwl8Z0xXLgJ7BZEzRxYlV/G2+vjA1FYIGd3Qfiyg8Qd68Y5bs2/HdBC
+a0EteVUNhS1XVjFFxDZnegPKZs30RwDHcVt9Pj/dLVXu2BgtdYupWtMbtfXNmsg2
+pJcFk7Ve1CAtfrQ2t8DAwOpKHkKIqExupQaGwbdTAtNiQtdGntv4oHuEGJ9p
+-----END CERTIFICATE-----
diff --git a/helm/pkg/registry/testdata/tls/ca.key b/helm/pkg/registry/testdata/tls/ca.key
new file mode 100644
index 000000000..f228b4d24
--- /dev/null
+++ b/helm/pkg/registry/testdata/tls/ca.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCqOrCgXpMjeSjW
+JYanmSG/2K4zk0HXeU3eMt5bkshlqHnEwJFD5tMZkJZUsGPiJr9AvAqYu2V9/gMK
+UptvHgxmMkh9BZYCnXAGzhl+OogYcJA5l/YBuDvmgz8M3aRZr7xdIA9KtepnDlp7
+NRWXsgRHzJNMBkV4PpEVHbJTVdjHVYERCw0C1kcb6wjzshnmUmJJJVEQDRCCaYym
+tIymR6kKrZzIw2FeyXxcccbvTsKILItEECYmRNevo1mc5/f8BEXxIzEPhDpoKSTq
+5JjWHCQH1shkwWyg2neL7g0UJ8nyV0pqqScE0L1WUZ1BHnVJAmGmR61WXxA3xCFz
+JHSc2enRAgMBAAECggEAJVX2A1Z64x7hzAYzAHNfqZo2qu0zVbUvVPrHNkJ9XX6U
+Jokt0zy/NC44Kp79aU6iR+p2UIVZf0bFF/CCUt6+TXPd3j3pZu1s8rElekAQNXwK
+xfcEZ+AmkypaG9JJB7q5j5tGf1Zi8PN++OLtt3W95pmB/PyrI/JlE8KNqCV+BEnq
+jLheACmehK+G7Rtez128lPvWHAnUTuQQ0wql1z4Z9VB5UwCYD3AxDz34jd8lwZQ1
+RQLUQblN46zpzkBTAX7sTmi9/y0nHJ7rJukTKxDciZ0xPkhtiAKjh6R2wb1TO51Q
+fyGT7iyvtxnqQf+VoNYZGiQ/L7DMppSEHUMm0gkZuQKBgQDoFmLz5J7spQgASjXi
+OLt8lWQOovzNC7K/pjILhD86o58efbZs6NdBrdq8GbeBtowd8HW0nwrxPbk0YN8W
+Fr8kl6hAHYd4UYpMWYNDmB7KIVTAoU/Fk+p5AjXIBwQcYm9H66tDAO/yC8G8EEzu
+iPoBTBQGMss87LH0jsSCDO0oQwKBgQC7xLY58zrU/cdK+ZbKmNA158CibH6ksXHP
+Z4gm+yMW0t7Jdd39L+CfyAEWF9BAagJUuiaxIq3ZiHu7rA6PJ2G8jqRcIHyFgMRk
+sxKTd7F86AI/IEZy7k0l//E4AsXERVgafvRuuSwYsm+ns6cuVYjAYRaHHinZpQao
+Y98SxuxeWwKBgGFE+KX1XHIb3JWahKjSVCmrxuqnfsJFM95Evla7T3C5ILg7wdg1
+Yfoh7jnFoXZY1rK5k+tmeMSQtO1x6C2uzN9+PELa3Wsc6ZSEM5KBz+2xOH8fXHqX
+Or8KoRW7cwqears+12FWpDnSmZjDUCrs97LRetb6NNnM7exsZYmH92FXAoGBAJDZ
+fm4UCfWXVK+s/TuLSUvcXYmvQr9QN+j1CF5x7C7GO6GUcMzJq3H3e4cMldWrMeMk
+u4Z4pz6iADnV0GF00vv/2iFL2mOu41J/pjvm4R/nZxxFjLNKzG8dE3vO/7uadw3x
+lCT6al8e/+2SNM0UpOsrupI/na9NlGZArSyyElPzAoGBAIVv0H798SZjUxpfLT8s
++DI1QFbenNeoEaeXdkYtGrSPUhfZQQ2F744QDsbMm6+4oFkD9yg2A3DvSbd9+WrP
+eDKKA5MAeNiD3X6glEcQOE1x6iTZ0jEXArv1n/qCl1qaUDPDUr8meIlkuwRgwyyW
+vKxiQdtK+ZLUNfU2R5xZwo+X
+-----END PRIVATE KEY-----
diff --git a/helm/pkg/registry/testdata/tls/client.crt b/helm/pkg/registry/testdata/tls/client.crt
new file mode 100644
index 000000000..f54f46c77
--- /dev/null
+++ b/helm/pkg/registry/testdata/tls/client.crt
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDijCCAnKgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJDTjEL
+MAkGA1UECAwCR0QxCzAJBgNVBAcMAlNaMRMwEQYDVQQKDApBY21lLCBJbmMuMRUw
+EwYDVQQDDAxBY21lIFJvb3QgQ0EwIBcNMjQwNDIxMTA1MzA1WhgPMzM5MzA0MDQx
+MDUzMDVaMFkxCzAJBgNVBAYTAkNOMQswCQYDVQQIDAJHRDELMAkGA1UEBwwCU1ox
+EzARBgNVBAoMCkFjbWUsIEluYy4xGzAZBgNVBAMMEmhlbG0tdGVzdC1yZWdpc3Ry
+eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALK1aOjQvB337gmjkORj
+QQyBDsScyWCnc1gjypcwPvi97+FFlp/jZUWasIa+FXeYWhwWiUI2tUttDNPZATqq
+c2My1uME2Dm0PG9qAUuvW5CEdE7Bw3T2K/8A1myfux/vyMXEjXKHAl+uhTcqDlew
+/yIF2gfO2dKYk+xnZwdE6w8bIQTqnaG0JxtK7Q0ULldsCOFtF+a4C9Zye6ggdieh
+cwVuV41ehbVCK3E7AylTFwbALB6ZQ4z3V6jXrXBNdMKSLyesWAAwROcUB+S68NEa
+5AWSfGXOT2glHzMHe7fJoulTetvJiaKBpxnFInMquBRzxpNO7A6eVmp6FQfpXqof
+wikCAwEAAaNhMF8wHQYDVR0RBBYwFIISaGVsbS10ZXN0LXJlZ2lzdHJ5MB0GA1Ud
+DgQWBBT6yXtjugflf08vGK3ClkHGw/D9HzAfBgNVHSMEGDAWgBREgz+BR+lJFNaG
+2D7+tDVzzyjc4jANBgkqhkiG9w0BAQsFAAOCAQEAoDEJSYcegsEH1/mzAT8CUul5
+MkxF8U1Dtc8m6Nyosolh16AlJ5dmF5d537lqf0VwHDFtQiwexWVohTW9ngpk0C0Z
+Jphf0+9ptpzBQn9x0mcHyKJRD3TbUc80oehY33bHAhPNdV3C1gwCfcbdX8Gz89ZT
+MdLY0BfDELeBKVpaHd2vuK+E06X0a7T5P7vnYmNFpQOMyyytl7vM1TofmU905sNI
+hrHqKH6c2G6QKW+vuiPoX+QbZFZ4NJ+Lco176wnpJjMZx3+Z6t4TV4sCaZgxj3RT
+gDQBRnsD6m03ZoVZvIOlApUs3IEKXsqsrXJpuxfvU89u9z6vOn6TteFsExXiuA==
+-----END CERTIFICATE-----
diff --git a/helm/pkg/registry/testdata/tls/client.key b/helm/pkg/registry/testdata/tls/client.key
new file mode 100644
index 000000000..3e7645003
--- /dev/null
+++ b/helm/pkg/registry/testdata/tls/client.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCytWjo0Lwd9+4J
+o5DkY0EMgQ7EnMlgp3NYI8qXMD74ve/hRZaf42VFmrCGvhV3mFocFolCNrVLbQzT
+2QE6qnNjMtbjBNg5tDxvagFLr1uQhHROwcN09iv/ANZsn7sf78jFxI1yhwJfroU3
+Kg5XsP8iBdoHztnSmJPsZ2cHROsPGyEE6p2htCcbSu0NFC5XbAjhbRfmuAvWcnuo
+IHYnoXMFbleNXoW1QitxOwMpUxcGwCwemUOM91eo161wTXTCki8nrFgAMETnFAfk
+uvDRGuQFknxlzk9oJR8zB3u3yaLpU3rbyYmigacZxSJzKrgUc8aTTuwOnlZqehUH
+6V6qH8IpAgMBAAECggEAFv5M3oG25pM3GyHiu2QC41k6nXT/2xIIfvtx7lR8kbQc
+iGtT90QCjHtcAaY07GObmngS1oRj/K2uBBbsd9AlEwsgR2rg6EHGsd4dhw+rtBM6
+xMRdAfBHlmKU9Dp0EOag+kMxIN56oXV6ue+NE17YYNgIZs9ISvarN7RRNwf4x4NS
+wpeWBqt120B3p9mGS64vE6wFxpRKSpFcpIp+yUswI45x8mbvCBr4tNW0OQ7y+WwS
+rPp7GayutEUB9etRWviw10D7pz3HrxfarrZJm65IH1Fw5Ye6ayteoWg4IY2s3qSS
+gh4qMZNMPeE6G3UBmkMdUf27+Udt8bSrSoz2Z8OlVQKBgQDcMY6h0BTFJcioBLhV
+qe0FmckVNzs5jtzdwXFSjQduUCZ74ag5hsW3jQ0KNvd1B/xOv/Df6rYJY3ww8cQ1
++KRTzt5B4qZwC1swuzqHWjR/W5XBlX3hRbs+I3imveaQ9zNFpktDZhaG72AWLLpa
+Y31ddrkG4a8rTZFSuOVCbyj7JQKBgQDPxN/2Ayt/x+n/A4LNDSUQiUSALIeBHCCo
+UzNQojcQLyobBVCIu5E3gRqIbvyRde7MQMGhfpLuaW7wmW0hqkUtRDYb4Hy52YMg
+PFkno11wdpoEN3McLJNH08q+2dFjUKzQWygelDvkQMkwiL2syu+rEoUIEOCWyW6V
+mPEPmfcdtQKBgEbqgwhkTrwr7hMG6iNUxex+2f9GOYHRHBsjeQ7gMtt5XtuZEqfs
+WvNBr0hx6YK8nqryMG69VgFyFAZjZxEG0k3Xm0dW6sm9LpJkSnZbO/skkPe24MLT
+xXk+zVXOZVqc8ttksmqzj1/H6odZwm7oCfE3EmI//z2QDtS4jcW2rVktAoGABfdn
+Xw80PpUlGRemt/C6scDfYLbmpUSDg5HwFU6zOhnAocoDSAnq36crdeOKCTtTwjXR
+2ati2MnaT7p4MdFL70LYMvC9ZDDk3RYekU7VrhcZ0Skuew6kpBlm5xgmNS3p6InV
+mxsypRlfLa+fksi5HTaI73RcnrfmHxGnSoVnXUkCgYAHggM+T7e11OB+aEQ0nFcL
+nS58M7QgB3/Xd7jGrl9Fi5qogtHE80epiV/srWaACZV6ricCZoDikOZzH1rRL2AA
+Wlmb4j9yKp4P4uN0tniU0JuFEIQgLklAsEb4BG6izHI0UpXZTKVXY0XymOBdNtaw
+QakjUJVKk+LqapUGIR8xRw==
+-----END PRIVATE KEY-----
diff --git a/helm/pkg/registry/testdata/tls/server.crt b/helm/pkg/registry/testdata/tls/server.crt
new file mode 100644
index 000000000..42585e775
--- /dev/null
+++ b/helm/pkg/registry/testdata/tls/server.crt
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDijCCAnKgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJDTjEL
+MAkGA1UECAwCR0QxCzAJBgNVBAcMAlNaMRMwEQYDVQQKDApBY21lLCBJbmMuMRUw
+EwYDVQQDDAxBY21lIFJvb3QgQ0EwIBcNMjQwNDIxMTA1MzM4WhgPMzM5MzA0MDQx
+MDUzMzhaMFkxCzAJBgNVBAYTAkNOMQswCQYDVQQIDAJHRDELMAkGA1UEBwwCU1ox
+EzARBgNVBAoMCkFjbWUsIEluYy4xGzAZBgNVBAMMEmhlbG0tdGVzdC1yZWdpc3Ry
+eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAME7cQl/08+JJv8aR07t
+9nAnqQ6fYUwMBX8ULS2i6dXUoR0WpTBS8VgGUb2pNnH83r/VbvAcHSY/3LSUdt1d
+j+cyCBQHXf8ySolInVP3L3s435WJuB9yzVZmlI8xrLOYmfVLnoyWjsirZT2KjLSw
+gVgn0N9PQ6K+IvrIph/jgBsv9c6oCLvWH1TcVtS5AN6gb5aSvr2cXRCVelntLH9V
+QpsmceMtHfzJUW37AarEvTj8NNTOWMIPNs1rqNpFEy1AepHy388C63SJuqy69dvx
+9wE1DCCduH3PMgF7cxWicow9JcIK4kZLrBD4ULdSxTmqA1+yLf+VHhSrDIQy3Lwj
+bBcCAwEAAaNhMF8wHQYDVR0RBBYwFIISaGVsbS10ZXN0LXJlZ2lzdHJ5MB0GA1Ud
+DgQWBBSQliNnbB0bCKi3c3mqifj3CPZbxTAfBgNVHSMEGDAWgBREgz+BR+lJFNaG
+2D7+tDVzzyjc4jANBgkqhkiG9w0BAQsFAAOCAQEAPztylxowZuLT3zRdB0JHkmnI
+zoUmG1hwBeRtruMqQGZnSX0F2glTVKcJzC+Wl5XzMHt2AcRmYl4qk7flWfFavlFp
+7ycIbbKH/4MVmuJF53Zy40fOZ2rDSfyjNsPNQLxTg3tlWVbEAcuyKAWLJ5RZG+hL
+fSKVFzdEsV+Ux//BUuce/q42hTBbZF09GtG+Lg7/DgxGIY7CLzID8GfdcYRBv4sX
+eeOHeGnDC1zttMcnWU49zghJ8MXwo7tOsybQEZmSZZdwQwm+pEwxdibJAXQ/OSGb
+c7RI+clTmnwbP/vnig5RnMALFbUaP2aE/mTMYLWBBV1VqWkfx4Xc7xbE9lrpuA==
+-----END CERTIFICATE-----
diff --git a/helm/pkg/registry/testdata/tls/server.key b/helm/pkg/registry/testdata/tls/server.key
new file mode 100644
index 000000000..4f7bd54fb
--- /dev/null
+++ b/helm/pkg/registry/testdata/tls/server.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDBO3EJf9PPiSb/
+GkdO7fZwJ6kOn2FMDAV/FC0tounV1KEdFqUwUvFYBlG9qTZx/N6/1W7wHB0mP9y0
+lHbdXY/nMggUB13/MkqJSJ1T9y97ON+Vibgfcs1WZpSPMayzmJn1S56Mlo7Iq2U9
+ioy0sIFYJ9DfT0OiviL6yKYf44AbL/XOqAi71h9U3FbUuQDeoG+Wkr69nF0QlXpZ
+7Sx/VUKbJnHjLR38yVFt+wGqxL04/DTUzljCDzbNa6jaRRMtQHqR8t/PAut0ibqs
+uvXb8fcBNQwgnbh9zzIBe3MVonKMPSXCCuJGS6wQ+FC3UsU5qgNfsi3/lR4UqwyE
+Mty8I2wXAgMBAAECggEAAKk5/ytSlGCTicatCcZJbb0xy3ZpUcyuVCH28ABuEyiY
+DugEU3PLll6Aw+JWG/Ieg1xKj3dSwWe+H785eazK3W9pYanCY4+1FSuMOW/pPkWs
+IvA536ARhCmNRo27JoSJU+Wyh1tlTHOk2mukt/vs/vOb6x4NTPttIs7lUP42DC6O
+e/gTvwD13Rrg9PC0aDpZzLqdmXyUoHQ4h8dfYytDE9rZ1gC2CNdd7NWvt2JUppRx
+qWR5OQxm+QiZqrMDUFTZISB/bD7MX/Ubq5InAfwdznzyav4uWsxq72FuoFFGl9xh
+l6WEdusyKay/eNZgXqrHyuJvmt1PUL+Azu8ZYD+C2QKBgQD/nogcrVKLzmmrnggG
+lMAvF5tp3gMI7+wqALH/79Gelvj5CWzGBnS7BcuXFR5cbpLk1cW6mj16IPIRA2CR
+xpGfYKtYt0j5hvIZTg3TpK3Pj/kqEv0AicdGP6SYduJYgaUwFKRzHSR+N3121v5X
+MVXKb5q6pD1wb7cOc2FJAOySHQKBgQDBhR8bAg99EgvVNioSkot++kRffWxwZ9uS
+k1jmhLl7djb1tND4yZGZmi8+bdw7qz7J5yEJHuJiMwOkDsBokpKykk36tjBx3UiV
+Z46OiKbRkiwBLg6fio6BVwAuQpoQ+qMWwkjZFPzWiEhxTPo3ZyiJP8JlT8sG3rV4
+My3wvLagwwKBgFT3RRcDJaUC/2zkIpbNavQ8TJRsD2YxGbb8dC42cN7eH/Pnhhhs
+nPBthLa7dlQTDRCzXf4gtr6ZpNyy2q6Z6l2nrEzY35DRojd3EnF/E6cinBe4KBC9
+u1dGYFetbJ8uuNG6is8YqMCrgTC3VeN1qqaXYj8XyLRO7fIHuBakD/6hAoGARDal
+cUK3rPF4hE5UZDmNvFOBWFuAptqlFjSkKJVuQCu6Ub/LzXZXwVoM/yeAcvP47Phw
+t6NQTycGSIT+o53O4e0aWZ5w0yIaHLflEy7uBn9MzZmrg+c2NjcxlBzb69I9PJ99
+SC/Ss9hUGMP2iyLssfxsjIOk4CYOt3Dq56nNgjsCgYBWOLVMCV10DpYKUY5LFq60
+CJppqPyBfGB+5LLYfOp8JSIh1ZwSL139A2oCynGjrIyyPksdkBUMcS/qLhT1vmzo
+zdUZMwK8D/TjF037F/t34LUHweP/2pl90DUcNPHJJs/IhXji7Kpdnqf3LhSXmgNs
+d7TshLFRKM1z2BlZPZ56cA==
+-----END PRIVATE KEY-----
diff --git a/helm/pkg/registry/transport.go b/helm/pkg/registry/transport.go
new file mode 100644
index 000000000..f039a8159
--- /dev/null
+++ b/helm/pkg/registry/transport.go
@@ -0,0 +1,175 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log/slog"
+ "mime"
+ "net/http"
+ "strings"
+ "sync/atomic"
+
+ "oras.land/oras-go/v2/registry/remote/retry"
+)
+
+var (
+ // requestCount records the number of logged request-response pairs and will
+ // be used as the unique id for the next pair.
+ requestCount atomic.Uint64
+
+ // toScrub is a set of headers that should be scrubbed from the log.
+ toScrub = []string{
+ "Authorization",
+ "Set-Cookie",
+ }
+)
+
+// payloadSizeLimit limits the maximum size of the response body to be printed.
+const payloadSizeLimit int64 = 16 * 1024 // 16 KiB
+
+// LoggingTransport is an http.RoundTripper that keeps track of the in-flight
+// request and add hooks to report HTTP tracing events.
+type LoggingTransport struct {
+ http.RoundTripper
+}
+
+// NewTransport creates and returns a new instance of LoggingTransport
+func NewTransport(debug bool) *retry.Transport {
+ type cloner[T any] interface {
+ Clone() T
+ }
+
+ // try to copy (clone) the http.DefaultTransport so any mutations we
+ // perform on it (e.g. TLS config) are not reflected globally
+ // follow https://github.com/golang/go/issues/39299 for a more elegant
+ // solution in the future
+ transport := http.DefaultTransport
+ if t, ok := transport.(cloner[*http.Transport]); ok {
+ transport = t.Clone()
+ } else if t, ok := transport.(cloner[http.RoundTripper]); ok {
+ // this branch will not be used with go 1.20, it was added
+ // optimistically to try to clone if the http.DefaultTransport
+ // implementation changes, still the Clone method in that case
+ // might not return http.RoundTripper...
+ transport = t.Clone()
+ }
+ if debug {
+ transport = &LoggingTransport{RoundTripper: transport}
+ }
+
+ return retry.NewTransport(transport)
+}
+
+// RoundTrip calls base round trip while keeping track of the current request.
+func (t *LoggingTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+ id := requestCount.Add(1) - 1
+
+ slog.Debug(req.Method, "id", id, "url", req.URL, "header", logHeader(req.Header))
+ resp, err = t.RoundTripper.RoundTrip(req)
+ if err != nil {
+ slog.Debug("Response"[:len(req.Method)], "id", id, "error", err)
+ } else if resp != nil {
+ slog.Debug("Response"[:len(req.Method)], "id", id, "status", resp.Status, "header", logHeader(resp.Header), "body", logResponseBody(resp))
+ } else {
+ slog.Debug("Response"[:len(req.Method)], "id", id, "response", "nil")
+ }
+
+ return resp, err
+}
+
+// logHeader prints out the provided header keys and values, with auth header scrubbed.
+func logHeader(header http.Header) string {
+ if len(header) > 0 {
+ var headers []string
+ for k, v := range header {
+ for _, h := range toScrub {
+ if strings.EqualFold(k, h) {
+ v = []string{"*****"}
+ }
+ }
+ headers = append(headers, fmt.Sprintf(" %q: %q", k, strings.Join(v, ", ")))
+ }
+ return strings.Join(headers, "\n")
+ }
+ return " Empty header"
+}
+
+// logResponseBody prints out the response body if it is printable and within size limit.
+func logResponseBody(resp *http.Response) string {
+ if resp.Body == nil || resp.Body == http.NoBody {
+ return " No response body to print"
+ }
+
+ // non-applicable body is not printed and remains untouched for subsequent processing
+ contentType := resp.Header.Get("Content-Type")
+ if contentType == "" {
+ return " Response body without a content type is not printed"
+ }
+ if !isPrintableContentType(contentType) {
+ return fmt.Sprintf(" Response body of content type %q is not printed", contentType)
+ }
+
+ buf := bytes.NewBuffer(nil)
+ body := resp.Body
+ // restore the body by concatenating the read body with the remaining body
+ resp.Body = struct {
+ io.Reader
+ io.Closer
+ }{
+ Reader: io.MultiReader(buf, body),
+ Closer: body,
+ }
+ // read the body up to limit+1 to check if the body exceeds the limit
+ if _, err := io.CopyN(buf, body, payloadSizeLimit+1); err != nil && err != io.EOF {
+ return fmt.Sprintf(" Error reading response body: %v", err)
+ }
+
+ readBody := buf.String()
+ if len(readBody) == 0 {
+ return " Response body is empty"
+ }
+ if containsCredentials(readBody) {
+ return " Response body redacted due to potential credentials"
+ }
+ if len(readBody) > int(payloadSizeLimit) {
+ return readBody[:payloadSizeLimit] + "\n...(truncated)"
+ }
+ return readBody
+}
+
+// isPrintableContentType returns true if the contentType is printable.
+func isPrintableContentType(contentType string) bool {
+ mediaType, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return false
+ }
+
+ switch mediaType {
+ case "application/json", // JSON types
+ "text/plain", "text/html": // text types
+ return true
+ }
+ return strings.HasSuffix(mediaType, "+json")
+}
+
+// containsCredentials returns true if the body contains potential credentials.
+func containsCredentials(body string) bool {
+ return strings.Contains(body, `"token"`) || strings.Contains(body, `"access_token"`)
+}
diff --git a/helm/pkg/registry/transport_test.go b/helm/pkg/registry/transport_test.go
new file mode 100644
index 000000000..b4990c526
--- /dev/null
+++ b/helm/pkg/registry/transport_test.go
@@ -0,0 +1,399 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package registry
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "net/http"
+ "testing"
+)
+
+var errMockRead = errors.New("mock read error")
+
+type errorReader struct{}
+
+func (e *errorReader) Read(_ []byte) (n int, err error) {
+ return 0, errMockRead
+}
+
+func Test_isPrintableContentType(t *testing.T) {
+ tests := []struct {
+ name string
+ contentType string
+ want bool
+ }{
+ {
+ name: "Empty content type",
+ contentType: "",
+ want: false,
+ },
+ {
+ name: "General JSON type",
+ contentType: "application/json",
+ want: true,
+ },
+ {
+ name: "General JSON type with charset",
+ contentType: "application/json; charset=utf-8",
+ want: true,
+ },
+ {
+ name: "Random type with application/json prefix",
+ contentType: "application/jsonwhatever",
+ want: false,
+ },
+ {
+ name: "Manifest type in JSON",
+ contentType: "application/vnd.oci.image.manifest.v1+json",
+ want: true,
+ },
+ {
+ name: "Manifest type in JSON with charset",
+ contentType: "application/vnd.oci.image.manifest.v1+json; charset=utf-8",
+ want: true,
+ },
+ {
+ name: "Random content type in JSON",
+ contentType: "application/whatever+json",
+ want: true,
+ },
+ {
+ name: "Plain text type",
+ contentType: "text/plain",
+ want: true,
+ },
+ {
+ name: "Plain text type with charset",
+ contentType: "text/plain; charset=utf-8",
+ want: true,
+ },
+ {
+ name: "Random type with text/plain prefix",
+ contentType: "text/plainnnnn",
+ want: false,
+ },
+ {
+ name: "HTML type",
+ contentType: "text/html",
+ want: true,
+ },
+ {
+ name: "Plain text type with charset",
+ contentType: "text/html; charset=utf-8",
+ want: true,
+ },
+ {
+ name: "Random type with text/html prefix",
+ contentType: "text/htmlllll",
+ want: false,
+ },
+ {
+ name: "Binary type",
+ contentType: "application/octet-stream",
+ want: false,
+ },
+ {
+ name: "Unknown type",
+ contentType: "unknown/unknown",
+ want: false,
+ },
+ {
+ name: "Invalid type",
+ contentType: "text/",
+ want: false,
+ },
+ {
+ name: "Random string",
+ contentType: "random123!@#",
+ want: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := isPrintableContentType(tt.contentType); got != tt.want {
+ t.Errorf("isPrintableContentType() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_logResponseBody(t *testing.T) {
+ tests := []struct {
+ name string
+ resp *http.Response
+ want string
+ wantData []byte
+ }{
+ {
+ name: "Nil body",
+ resp: &http.Response{
+ Body: nil,
+ Header: http.Header{"Content-Type": []string{"application/json"}},
+ },
+ want: " No response body to print",
+ },
+ {
+ name: "No body",
+ wantData: nil,
+ resp: &http.Response{
+ Body: http.NoBody,
+ ContentLength: 100, // in case of HEAD response, the content length is set but the body is empty
+ Header: http.Header{"Content-Type": []string{"application/json"}},
+ },
+ want: " No response body to print",
+ },
+ {
+ name: "Empty body",
+ wantData: []byte(""),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte(""))),
+ ContentLength: 0,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: " Response body is empty",
+ },
+ {
+ name: "Unknown content length",
+ wantData: []byte("whatever"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("whatever"))),
+ ContentLength: -1,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: "whatever",
+ },
+ {
+ name: "Missing content type header",
+ wantData: []byte("whatever"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("whatever"))),
+ ContentLength: 8,
+ },
+ want: " Response body without a content type is not printed",
+ },
+ {
+ name: "Empty content type header",
+ wantData: []byte("whatever"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("whatever"))),
+ ContentLength: 8,
+ Header: http.Header{"Content-Type": []string{""}},
+ },
+ want: " Response body without a content type is not printed",
+ },
+ {
+ name: "Non-printable content type",
+ wantData: []byte("binary data"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("binary data"))),
+ ContentLength: 11,
+ Header: http.Header{"Content-Type": []string{"application/octet-stream"}},
+ },
+ want: " Response body of content type \"application/octet-stream\" is not printed",
+ },
+ {
+ name: "Body at the limit",
+ wantData: bytes.Repeat([]byte("a"), int(payloadSizeLimit)),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader(bytes.Repeat([]byte("a"), int(payloadSizeLimit)))),
+ ContentLength: payloadSizeLimit,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: string(bytes.Repeat([]byte("a"), int(payloadSizeLimit))),
+ },
+ {
+ name: "Body larger than limit",
+ wantData: bytes.Repeat([]byte("a"), int(payloadSizeLimit+1)),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader(bytes.Repeat([]byte("a"), int(payloadSizeLimit+1)))), // 1 byte larger than limit
+ ContentLength: payloadSizeLimit + 1,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: string(bytes.Repeat([]byte("a"), int(payloadSizeLimit))) + "\n...(truncated)",
+ },
+ {
+ name: "Printable content type within limit",
+ wantData: []byte("data"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("data"))),
+ ContentLength: 4,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: "data",
+ },
+ {
+ name: "Actual body size is larger than content length",
+ wantData: []byte("data"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("data"))),
+ ContentLength: 3, // mismatched content length
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: "data",
+ },
+ {
+ name: "Actual body size is larger than content length and exceeds limit",
+ wantData: bytes.Repeat([]byte("a"), int(payloadSizeLimit+1)),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader(bytes.Repeat([]byte("a"), int(payloadSizeLimit+1)))), // 1 byte larger than limit
+ ContentLength: 1, // mismatched content length
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: string(bytes.Repeat([]byte("a"), int(payloadSizeLimit))) + "\n...(truncated)",
+ },
+ {
+ name: "Actual body size is smaller than content length",
+ wantData: []byte("data"),
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte("data"))),
+ ContentLength: 5, // mismatched content length
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: "data",
+ },
+ {
+ name: "Body contains token",
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte(`{"token":"12345"}`))),
+ ContentLength: 17,
+ Header: http.Header{"Content-Type": []string{"application/json"}},
+ },
+ wantData: []byte(`{"token":"12345"}`),
+ want: " Response body redacted due to potential credentials",
+ },
+ {
+ name: "Body contains access_token",
+ resp: &http.Response{
+ Body: io.NopCloser(bytes.NewReader([]byte(`{"access_token":"12345"}`))),
+ ContentLength: 17,
+ Header: http.Header{"Content-Type": []string{"application/json"}},
+ },
+ wantData: []byte(`{"access_token":"12345"}`),
+ want: " Response body redacted due to potential credentials",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := logResponseBody(tt.resp); got != tt.want {
+ t.Errorf("logResponseBody() = %v, want %v", got, tt.want)
+ }
+ // validate the response body
+ if tt.resp.Body != nil {
+ readBytes, err := io.ReadAll(tt.resp.Body)
+ if err != nil {
+ t.Errorf("failed to read body after logResponseBody(), err= %v", err)
+ }
+ if !bytes.Equal(readBytes, tt.wantData) {
+ t.Errorf("resp.Body after logResponseBody() = %v, want %v", readBytes, tt.wantData)
+ }
+ if closeErr := tt.resp.Body.Close(); closeErr != nil {
+ t.Errorf("failed to close body after logResponseBody(), err= %v", closeErr)
+ }
+ }
+ })
+ }
+}
+
+func Test_logResponseBody_error(t *testing.T) {
+ tests := []struct {
+ name string
+ resp *http.Response
+ want string
+ }{
+ {
+ name: "Error reading body",
+ resp: &http.Response{
+ Body: io.NopCloser(&errorReader{}),
+ ContentLength: 10,
+ Header: http.Header{"Content-Type": []string{"text/plain"}},
+ },
+ want: " Error reading response body: mock read error",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := logResponseBody(tt.resp); got != tt.want {
+ t.Errorf("logResponseBody() = %v, want %v", got, tt.want)
+ }
+ if closeErr := tt.resp.Body.Close(); closeErr != nil {
+ t.Errorf("failed to close body after logResponseBody(), err= %v", closeErr)
+ }
+ })
+ }
+}
+
+func Test_containsCredentials(t *testing.T) {
+ tests := []struct {
+ name string
+ body string
+ want bool
+ }{
+ {
+ name: "Contains token keyword",
+ body: `{"token": "12345"}`,
+ want: true,
+ },
+ {
+ name: "Contains quoted token keyword",
+ body: `whatever "token" blah`,
+ want: true,
+ },
+ {
+ name: "Contains unquoted token keyword",
+ body: `whatever token blah`,
+ want: false,
+ },
+ {
+ name: "Contains access_token keyword",
+ body: `{"access_token": "12345"}`,
+ want: true,
+ },
+ {
+ name: "Contains quoted access_token keyword",
+ body: `whatever "access_token" blah`,
+ want: true,
+ },
+ {
+ name: "Contains unquoted access_token keyword",
+ body: `whatever access_token blah`,
+ want: false,
+ },
+ {
+ name: "Does not contain credentials",
+ body: `{"key": "value"}`,
+ want: false,
+ },
+ {
+ name: "Empty body",
+ body: ``,
+ want: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := containsCredentials(tt.body); got != tt.want {
+ t.Errorf("containsCredentials() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/release/common.go b/helm/pkg/release/common.go
new file mode 100644
index 000000000..d33c96646
--- /dev/null
+++ b/helm/pkg/release/common.go
@@ -0,0 +1,116 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package release
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "helm.sh/helm/v4/pkg/chart"
+ v1release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+var NewAccessor func(rel Releaser) (Accessor, error) = newDefaultAccessor //nolint:revive
+
+var NewHookAccessor func(rel Hook) (HookAccessor, error) = newDefaultHookAccessor //nolint:revive
+
+func newDefaultAccessor(rel Releaser) (Accessor, error) {
+ switch v := rel.(type) {
+ case v1release.Release:
+ return &v1Accessor{&v}, nil
+ case *v1release.Release:
+ return &v1Accessor{v}, nil
+ default:
+ return nil, fmt.Errorf("unsupported release type: %T", rel)
+ }
+}
+
+func newDefaultHookAccessor(hook Hook) (HookAccessor, error) {
+ switch h := hook.(type) {
+ case v1release.Hook:
+ return &v1HookAccessor{&h}, nil
+ case *v1release.Hook:
+ return &v1HookAccessor{h}, nil
+ default:
+ return nil, errors.New("unsupported release hook type")
+ }
+}
+
+type v1Accessor struct {
+ rel *v1release.Release
+}
+
+func (a *v1Accessor) Name() string {
+ return a.rel.Name
+}
+
+func (a *v1Accessor) Namespace() string {
+ return a.rel.Namespace
+}
+
+func (a *v1Accessor) Version() int {
+ return a.rel.Version
+}
+
+func (a *v1Accessor) Hooks() []Hook {
+ var hooks = make([]Hook, len(a.rel.Hooks))
+ for i, h := range a.rel.Hooks {
+ hooks[i] = h
+ }
+ return hooks
+}
+
+func (a *v1Accessor) Manifest() string {
+ return a.rel.Manifest
+}
+
+func (a *v1Accessor) Notes() string {
+ return a.rel.Info.Notes
+}
+
+func (a *v1Accessor) Labels() map[string]string {
+ return a.rel.Labels
+}
+
+func (a *v1Accessor) Chart() chart.Charter {
+ return a.rel.Chart
+}
+
+func (a *v1Accessor) Status() string {
+ return a.rel.Info.Status.String()
+}
+
+func (a *v1Accessor) ApplyMethod() string {
+ return a.rel.ApplyMethod
+}
+
+func (a *v1Accessor) DeployedAt() time.Time {
+ return a.rel.Info.LastDeployed
+}
+
+type v1HookAccessor struct {
+ hook *v1release.Hook
+}
+
+func (a *v1HookAccessor) Path() string {
+ return a.hook.Path
+}
+
+func (a *v1HookAccessor) Manifest() string {
+ return a.hook.Manifest
+}
diff --git a/helm/pkg/release/common/status.go b/helm/pkg/release/common/status.go
new file mode 100644
index 000000000..fd5010301
--- /dev/null
+++ b/helm/pkg/release/common/status.go
@@ -0,0 +1,49 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+// Status is the status of a release
+type Status string
+
+// Describe the status of a release
+// NOTE: Make sure to update cmd/helm/status.go when adding or modifying any of these statuses.
+const (
+ // StatusUnknown indicates that a release is in an uncertain state.
+ StatusUnknown Status = "unknown"
+ // StatusDeployed indicates that the release has been pushed to Kubernetes.
+ StatusDeployed Status = "deployed"
+ // StatusUninstalled indicates that a release has been uninstalled from Kubernetes.
+ StatusUninstalled Status = "uninstalled"
+ // StatusSuperseded indicates that this release object is outdated and a newer one exists.
+ StatusSuperseded Status = "superseded"
+ // StatusFailed indicates that the release was not successfully deployed.
+ StatusFailed Status = "failed"
+ // StatusUninstalling indicates that an uninstall operation is underway.
+ StatusUninstalling Status = "uninstalling"
+ // StatusPendingInstall indicates that an install operation is underway.
+ StatusPendingInstall Status = "pending-install"
+ // StatusPendingUpgrade indicates that an upgrade operation is underway.
+ StatusPendingUpgrade Status = "pending-upgrade"
+ // StatusPendingRollback indicates that a rollback operation is underway.
+ StatusPendingRollback Status = "pending-rollback"
+)
+
+func (x Status) String() string { return string(x) }
+
+// IsPending determines if this status is a state or a transition.
+func (x Status) IsPending() bool {
+ return x == StatusPendingInstall || x == StatusPendingUpgrade || x == StatusPendingRollback
+}
diff --git a/helm/pkg/release/common_test.go b/helm/pkg/release/common_test.go
new file mode 100644
index 000000000..e9f8d364a
--- /dev/null
+++ b/helm/pkg/release/common_test.go
@@ -0,0 +1,65 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package release
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/release/common"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestNewDefaultAccessor(t *testing.T) {
+ // Testing the default implementation rather than NewAccessor which can be
+ // overridden by developers.
+ is := assert.New(t)
+
+ // Create release
+ info := &rspb.Info{Status: common.StatusDeployed, LastDeployed: time.Now().Add(1000)}
+ labels := make(map[string]string)
+ labels["foo"] = "bar"
+ rel := &rspb.Release{
+ Name: "happy-cats",
+ Version: 2,
+ Info: info,
+ Labels: labels,
+ Namespace: "default",
+ ApplyMethod: "csa",
+ }
+
+ // newDefaultAccessor should not be called directly Instead, NewAccessor should be
+ // called and it will call NewDefaultAccessor. NewAccessor can be changed to a
+ // non-default accessor by a user so the test calls the default implementation.
+ // The accessor provides a means to access data on resources that are different types
+ // but have the same interface. Instead of properties, methods are used to access
+ // information. Structs with properties are useful in Go when it comes to marshalling
+ // and unmarshalling data (e.g. coming and going from JSON or YAML). But, structs
+ // can't be used with interfaces. The accessors enable access to the underlying data
+ // in a manner that works with Go interfaces.
+ accessor, err := newDefaultAccessor(rel)
+ is.NoError(err)
+
+ // Verify information
+ is.Equal(rel.Name, accessor.Name())
+ is.Equal(rel.Namespace, accessor.Namespace())
+ is.Equal(rel.Version, accessor.Version())
+ is.Equal(rel.ApplyMethod, accessor.ApplyMethod())
+ is.Equal(rel.Labels, accessor.Labels())
+}
diff --git a/helm/pkg/release/interfaces.go b/helm/pkg/release/interfaces.go
new file mode 100644
index 000000000..aaa5a756f
--- /dev/null
+++ b/helm/pkg/release/interfaces.go
@@ -0,0 +1,46 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package release
+
+import (
+ "time"
+
+ "helm.sh/helm/v4/pkg/chart"
+)
+
+type Releaser interface{}
+
+type Hook interface{}
+
+type Accessor interface {
+ Name() string
+ Namespace() string
+ Version() int
+ Hooks() []Hook
+ Manifest() string
+ Notes() string
+ Labels() map[string]string
+ Chart() chart.Charter
+ Status() string
+ ApplyMethod() string
+ DeployedAt() time.Time
+}
+
+type HookAccessor interface {
+ Path() string
+ Manifest() string
+}
diff --git a/helm/pkg/release/responses.go b/helm/pkg/release/responses.go
new file mode 100644
index 000000000..6e0a0eaec
--- /dev/null
+++ b/helm/pkg/release/responses.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package release
+
+// UninstallReleaseResponse represents a successful response to an uninstall request.
+type UninstallReleaseResponse struct {
+ // Release is the release that was marked deleted.
+ Release Releaser `json:"release,omitempty"`
+ // Info is an uninstall message
+ Info string `json:"info,omitempty"`
+}
diff --git a/helm/pkg/release/v1/hook.go b/helm/pkg/release/v1/hook.go
new file mode 100644
index 000000000..f0a370c15
--- /dev/null
+++ b/helm/pkg/release/v1/hook.go
@@ -0,0 +1,189 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// HookEvent specifies the hook event
+type HookEvent string
+
+// Hook event types
+const (
+ HookPreInstall HookEvent = "pre-install"
+ HookPostInstall HookEvent = "post-install"
+ HookPreDelete HookEvent = "pre-delete"
+ HookPostDelete HookEvent = "post-delete"
+ HookPreUpgrade HookEvent = "pre-upgrade"
+ HookPostUpgrade HookEvent = "post-upgrade"
+ HookPreRollback HookEvent = "pre-rollback"
+ HookPostRollback HookEvent = "post-rollback"
+ HookTest HookEvent = "test"
+)
+
+func (x HookEvent) String() string { return string(x) }
+
+// HookDeletePolicy specifies the hook delete policy
+type HookDeletePolicy string
+
+// Hook delete policy types
+const (
+ HookSucceeded HookDeletePolicy = "hook-succeeded"
+ HookFailed HookDeletePolicy = "hook-failed"
+ HookBeforeHookCreation HookDeletePolicy = "before-hook-creation"
+)
+
+func (x HookDeletePolicy) String() string { return string(x) }
+
+// HookOutputLogPolicy specifies the hook output log policy
+type HookOutputLogPolicy string
+
+// Hook output log policy types
+const (
+ HookOutputOnSucceeded HookOutputLogPolicy = "hook-succeeded"
+ HookOutputOnFailed HookOutputLogPolicy = "hook-failed"
+)
+
+func (x HookOutputLogPolicy) String() string { return string(x) }
+
+// HookAnnotation is the label name for a hook
+const HookAnnotation = "helm.sh/hook"
+
+// HookWeightAnnotation is the label name for a hook weight
+const HookWeightAnnotation = "helm.sh/hook-weight"
+
+// HookDeleteAnnotation is the label name for the delete policy for a hook
+const HookDeleteAnnotation = "helm.sh/hook-delete-policy"
+
+// HookOutputLogAnnotation is the label name for the output log policy for a hook
+const HookOutputLogAnnotation = "helm.sh/hook-output-log-policy"
+
+// Hook defines a hook object.
+type Hook struct {
+ Name string `json:"name,omitempty"`
+ // Kind is the Kubernetes kind.
+ Kind string `json:"kind,omitempty"`
+ // Path is the chart-relative path to the template.
+ Path string `json:"path,omitempty"`
+ // Manifest is the manifest contents.
+ Manifest string `json:"manifest,omitempty"`
+ // Events are the events that this hook fires on.
+ Events []HookEvent `json:"events,omitempty"`
+ // LastRun indicates the date/time this was last run.
+ LastRun HookExecution `json:"last_run,omitempty"`
+ // Weight indicates the sort order for execution among similar Hook type
+ Weight int `json:"weight,omitempty"`
+ // DeletePolicies are the policies that indicate when to delete the hook
+ DeletePolicies []HookDeletePolicy `json:"delete_policies,omitempty"`
+ // OutputLogPolicies defines whether we should copy hook logs back to main process
+ OutputLogPolicies []HookOutputLogPolicy `json:"output_log_policies,omitempty"`
+}
+
+// A HookExecution records the result for the last execution of a hook for a given release.
+type HookExecution struct {
+ // StartedAt indicates the date/time this hook was started
+ StartedAt time.Time `json:"started_at,omitzero"`
+ // CompletedAt indicates the date/time this hook was completed.
+ CompletedAt time.Time `json:"completed_at,omitzero"`
+ // Phase indicates whether the hook completed successfully
+ Phase HookPhase `json:"phase"`
+}
+
+// A HookPhase indicates the state of a hook execution
+type HookPhase string
+
+const (
+ // HookPhaseUnknown indicates that a hook is in an unknown state
+ HookPhaseUnknown HookPhase = "Unknown"
+ // HookPhaseRunning indicates that a hook is currently executing
+ HookPhaseRunning HookPhase = "Running"
+ // HookPhaseSucceeded indicates that hook execution succeeded
+ HookPhaseSucceeded HookPhase = "Succeeded"
+ // HookPhaseFailed indicates that hook execution failed
+ HookPhaseFailed HookPhase = "Failed"
+)
+
+// String converts a hook phase to a printable string
+func (x HookPhase) String() string { return string(x) }
+
+// hookExecutionJSON is used for custom JSON marshaling/unmarshaling
+type hookExecutionJSON struct {
+ StartedAt *time.Time `json:"started_at,omitempty"`
+ CompletedAt *time.Time `json:"completed_at,omitempty"`
+ Phase HookPhase `json:"phase"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+// It handles empty string time fields by treating them as zero values.
+func (h *HookExecution) UnmarshalJSON(data []byte) error {
+ // First try to unmarshal into a map to handle empty string time fields
+ var raw map[string]interface{}
+ if err := json.Unmarshal(data, &raw); err != nil {
+ return err
+ }
+
+ // Replace empty string time fields with nil
+ for _, field := range []string{"started_at", "completed_at"} {
+ if val, ok := raw[field]; ok {
+ if str, ok := val.(string); ok && str == "" {
+ raw[field] = nil
+ }
+ }
+ }
+
+ // Re-marshal with cleaned data
+ cleaned, err := json.Marshal(raw)
+ if err != nil {
+ return err
+ }
+
+ // Unmarshal into temporary struct with pointer time fields
+ var tmp hookExecutionJSON
+ if err := json.Unmarshal(cleaned, &tmp); err != nil {
+ return err
+ }
+
+ // Copy values to HookExecution struct
+ if tmp.StartedAt != nil {
+ h.StartedAt = *tmp.StartedAt
+ }
+ if tmp.CompletedAt != nil {
+ h.CompletedAt = *tmp.CompletedAt
+ }
+ h.Phase = tmp.Phase
+
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+// It omits zero-value time fields from the JSON output.
+func (h HookExecution) MarshalJSON() ([]byte, error) {
+ tmp := hookExecutionJSON{
+ Phase: h.Phase,
+ }
+
+ if !h.StartedAt.IsZero() {
+ tmp.StartedAt = &h.StartedAt
+ }
+ if !h.CompletedAt.IsZero() {
+ tmp.CompletedAt = &h.CompletedAt
+ }
+
+ return json.Marshal(tmp)
+}
diff --git a/helm/pkg/release/v1/hook_test.go b/helm/pkg/release/v1/hook_test.go
new file mode 100644
index 000000000..cea2568bc
--- /dev/null
+++ b/helm/pkg/release/v1/hook_test.go
@@ -0,0 +1,231 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHookExecutionMarshalJSON(t *testing.T) {
+ started := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC)
+ completed := time.Date(2025, 10, 8, 12, 5, 0, 0, time.UTC)
+
+ tests := []struct {
+ name string
+ exec HookExecution
+ expected string
+ }{
+ {
+ name: "all fields populated",
+ exec: HookExecution{
+ StartedAt: started,
+ CompletedAt: completed,
+ Phase: HookPhaseSucceeded,
+ },
+ expected: `{"started_at":"2025-10-08T12:00:00Z","completed_at":"2025-10-08T12:05:00Z","phase":"Succeeded"}`,
+ },
+ {
+ name: "only phase",
+ exec: HookExecution{
+ Phase: HookPhaseRunning,
+ },
+ expected: `{"phase":"Running"}`,
+ },
+ {
+ name: "with started time only",
+ exec: HookExecution{
+ StartedAt: started,
+ Phase: HookPhaseRunning,
+ },
+ expected: `{"started_at":"2025-10-08T12:00:00Z","phase":"Running"}`,
+ },
+ {
+ name: "failed phase",
+ exec: HookExecution{
+ StartedAt: started,
+ CompletedAt: completed,
+ Phase: HookPhaseFailed,
+ },
+ expected: `{"started_at":"2025-10-08T12:00:00Z","completed_at":"2025-10-08T12:05:00Z","phase":"Failed"}`,
+ },
+ {
+ name: "unknown phase",
+ exec: HookExecution{
+ Phase: HookPhaseUnknown,
+ },
+ expected: `{"phase":"Unknown"}`,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ data, err := json.Marshal(&tt.exec)
+ require.NoError(t, err)
+ assert.JSONEq(t, tt.expected, string(data))
+ })
+ }
+}
+
+func TestHookExecutionUnmarshalJSON(t *testing.T) {
+ started := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC)
+ completed := time.Date(2025, 10, 8, 12, 5, 0, 0, time.UTC)
+
+ tests := []struct {
+ name string
+ input string
+ expected HookExecution
+ wantErr bool
+ }{
+ {
+ name: "all fields populated",
+ input: `{"started_at":"2025-10-08T12:00:00Z","completed_at":"2025-10-08T12:05:00Z","phase":"Succeeded"}`,
+ expected: HookExecution{
+ StartedAt: started,
+ CompletedAt: completed,
+ Phase: HookPhaseSucceeded,
+ },
+ },
+ {
+ name: "only phase",
+ input: `{"phase":"Running"}`,
+ expected: HookExecution{
+ Phase: HookPhaseRunning,
+ },
+ },
+ {
+ name: "empty string time fields",
+ input: `{"started_at":"","completed_at":"","phase":"Succeeded"}`,
+ expected: HookExecution{
+ Phase: HookPhaseSucceeded,
+ },
+ },
+ {
+ name: "missing time fields",
+ input: `{"phase":"Failed"}`,
+ expected: HookExecution{
+ Phase: HookPhaseFailed,
+ },
+ },
+ {
+ name: "null time fields",
+ input: `{"started_at":null,"completed_at":null,"phase":"Unknown"}`,
+ expected: HookExecution{
+ Phase: HookPhaseUnknown,
+ },
+ },
+ {
+ name: "mixed empty and valid time fields",
+ input: `{"started_at":"2025-10-08T12:00:00Z","completed_at":"","phase":"Running"}`,
+ expected: HookExecution{
+ StartedAt: started,
+ Phase: HookPhaseRunning,
+ },
+ },
+ {
+ name: "with started time only",
+ input: `{"started_at":"2025-10-08T12:00:00Z","phase":"Running"}`,
+ expected: HookExecution{
+ StartedAt: started,
+ Phase: HookPhaseRunning,
+ },
+ },
+ {
+ name: "failed phase with times",
+ input: `{"started_at":"2025-10-08T12:00:00Z","completed_at":"2025-10-08T12:05:00Z","phase":"Failed"}`,
+ expected: HookExecution{
+ StartedAt: started,
+ CompletedAt: completed,
+ Phase: HookPhaseFailed,
+ },
+ },
+ {
+ name: "invalid time format",
+ input: `{"started_at":"invalid-time","phase":"Running"}`,
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var exec HookExecution
+ err := json.Unmarshal([]byte(tt.input), &exec)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ assert.Equal(t, tt.expected.StartedAt.Unix(), exec.StartedAt.Unix())
+ assert.Equal(t, tt.expected.CompletedAt.Unix(), exec.CompletedAt.Unix())
+ assert.Equal(t, tt.expected.Phase, exec.Phase)
+ })
+ }
+}
+
+func TestHookExecutionRoundTrip(t *testing.T) {
+ started := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC)
+ completed := time.Date(2025, 10, 8, 12, 5, 0, 0, time.UTC)
+
+ original := HookExecution{
+ StartedAt: started,
+ CompletedAt: completed,
+ Phase: HookPhaseSucceeded,
+ }
+
+ data, err := json.Marshal(&original)
+ require.NoError(t, err)
+
+ var decoded HookExecution
+ err = json.Unmarshal(data, &decoded)
+ require.NoError(t, err)
+
+ assert.Equal(t, original.StartedAt.Unix(), decoded.StartedAt.Unix())
+ assert.Equal(t, original.CompletedAt.Unix(), decoded.CompletedAt.Unix())
+ assert.Equal(t, original.Phase, decoded.Phase)
+}
+
+func TestHookExecutionEmptyStringRoundTrip(t *testing.T) {
+ // This test specifically verifies that empty string time fields
+ // are handled correctly during parsing
+ input := `{"started_at":"","completed_at":"","phase":"Succeeded"}`
+
+ var exec HookExecution
+ err := json.Unmarshal([]byte(input), &exec)
+ require.NoError(t, err)
+
+ // Verify time fields are zero values
+ assert.True(t, exec.StartedAt.IsZero())
+ assert.True(t, exec.CompletedAt.IsZero())
+ assert.Equal(t, HookPhaseSucceeded, exec.Phase)
+
+ // Marshal back and verify empty time fields are omitted
+ data, err := json.Marshal(&exec)
+ require.NoError(t, err)
+
+ var result map[string]interface{}
+ err = json.Unmarshal(data, &result)
+ require.NoError(t, err)
+
+ // Zero time values should be omitted
+ assert.NotContains(t, result, "started_at")
+ assert.NotContains(t, result, "completed_at")
+ assert.Equal(t, "Succeeded", result["phase"])
+}
diff --git a/helm/pkg/release/v1/info.go b/helm/pkg/release/v1/info.go
new file mode 100644
index 000000000..f895fdf6c
--- /dev/null
+++ b/helm/pkg/release/v1/info.go
@@ -0,0 +1,125 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "encoding/json"
+ "time"
+
+ "helm.sh/helm/v4/pkg/release/common"
+
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// Info describes release information.
+type Info struct {
+ // FirstDeployed is when the release was first deployed.
+ FirstDeployed time.Time `json:"first_deployed,omitzero"`
+ // LastDeployed is when the release was last deployed.
+ LastDeployed time.Time `json:"last_deployed,omitzero"`
+ // Deleted tracks when this object was deleted.
+ Deleted time.Time `json:"deleted,omitzero"`
+ // Description is human-friendly "log entry" about this release.
+ Description string `json:"description,omitempty"`
+ // Status is the current state of the release
+ Status common.Status `json:"status,omitempty"`
+ // Contains the rendered templates/NOTES.txt if available
+ Notes string `json:"notes,omitempty"`
+ // Contains the deployed resources information
+ Resources map[string][]runtime.Object `json:"resources,omitempty"`
+}
+
+// infoJSON is used for custom JSON marshaling/unmarshaling
+type infoJSON struct {
+ FirstDeployed *time.Time `json:"first_deployed,omitempty"`
+ LastDeployed *time.Time `json:"last_deployed,omitempty"`
+ Deleted *time.Time `json:"deleted,omitempty"`
+ Description string `json:"description,omitempty"`
+ Status common.Status `json:"status,omitempty"`
+ Notes string `json:"notes,omitempty"`
+ Resources map[string][]runtime.Object `json:"resources,omitempty"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+// It handles empty string time fields by treating them as zero values.
+func (i *Info) UnmarshalJSON(data []byte) error {
+ // First try to unmarshal into a map to handle empty string time fields
+ var raw map[string]interface{}
+ if err := json.Unmarshal(data, &raw); err != nil {
+ return err
+ }
+
+ // Replace empty string time fields with nil
+ for _, field := range []string{"first_deployed", "last_deployed", "deleted"} {
+ if val, ok := raw[field]; ok {
+ if str, ok := val.(string); ok && str == "" {
+ raw[field] = nil
+ }
+ }
+ }
+
+ // Re-marshal with cleaned data
+ cleaned, err := json.Marshal(raw)
+ if err != nil {
+ return err
+ }
+
+ // Unmarshal into temporary struct with pointer time fields
+ var tmp infoJSON
+ if err := json.Unmarshal(cleaned, &tmp); err != nil {
+ return err
+ }
+
+ // Copy values to Info struct
+ if tmp.FirstDeployed != nil {
+ i.FirstDeployed = *tmp.FirstDeployed
+ }
+ if tmp.LastDeployed != nil {
+ i.LastDeployed = *tmp.LastDeployed
+ }
+ if tmp.Deleted != nil {
+ i.Deleted = *tmp.Deleted
+ }
+ i.Description = tmp.Description
+ i.Status = tmp.Status
+ i.Notes = tmp.Notes
+ i.Resources = tmp.Resources
+
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+// It omits zero-value time fields from the JSON output.
+func (i Info) MarshalJSON() ([]byte, error) {
+ tmp := infoJSON{
+ Description: i.Description,
+ Status: i.Status,
+ Notes: i.Notes,
+ Resources: i.Resources,
+ }
+
+ if !i.FirstDeployed.IsZero() {
+ tmp.FirstDeployed = &i.FirstDeployed
+ }
+ if !i.LastDeployed.IsZero() {
+ tmp.LastDeployed = &i.LastDeployed
+ }
+ if !i.Deleted.IsZero() {
+ tmp.Deleted = &i.Deleted
+ }
+
+ return json.Marshal(tmp)
+}
diff --git a/helm/pkg/release/v1/info_test.go b/helm/pkg/release/v1/info_test.go
new file mode 100644
index 000000000..0fff78f76
--- /dev/null
+++ b/helm/pkg/release/v1/info_test.go
@@ -0,0 +1,285 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ "helm.sh/helm/v4/pkg/release/common"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestInfoMarshalJSON(t *testing.T) {
+ now := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC)
+ later := time.Date(2025, 10, 8, 13, 0, 0, 0, time.UTC)
+ deleted := time.Date(2025, 10, 8, 14, 0, 0, 0, time.UTC)
+
+ tests := []struct {
+ name string
+ info Info
+ expected string
+ }{
+ {
+ name: "all fields populated",
+ info: Info{
+ FirstDeployed: now,
+ LastDeployed: later,
+ Deleted: deleted,
+ Description: "Test release",
+ Status: common.StatusDeployed,
+ Notes: "Test notes",
+ },
+ expected: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","deleted":"2025-10-08T14:00:00Z","description":"Test release","status":"deployed","notes":"Test notes"}`,
+ },
+ {
+ name: "only required fields",
+ info: Info{
+ FirstDeployed: now,
+ LastDeployed: later,
+ Status: common.StatusDeployed,
+ },
+ expected: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","status":"deployed"}`,
+ },
+ {
+ name: "zero time values omitted",
+ info: Info{
+ Description: "Test release",
+ Status: common.StatusDeployed,
+ },
+ expected: `{"description":"Test release","status":"deployed"}`,
+ },
+ {
+ name: "with pending status",
+ info: Info{
+ FirstDeployed: now,
+ LastDeployed: later,
+ Status: common.StatusPendingInstall,
+ Description: "Installing release",
+ },
+ expected: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","description":"Installing release","status":"pending-install"}`,
+ },
+ {
+ name: "uninstalled with deleted time",
+ info: Info{
+ FirstDeployed: now,
+ LastDeployed: later,
+ Deleted: deleted,
+ Status: common.StatusUninstalled,
+ Description: "Uninstalled release",
+ },
+ expected: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","deleted":"2025-10-08T14:00:00Z","description":"Uninstalled release","status":"uninstalled"}`,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ data, err := json.Marshal(&tt.info)
+ require.NoError(t, err)
+ assert.JSONEq(t, tt.expected, string(data))
+ })
+ }
+}
+
+func TestInfoUnmarshalJSON(t *testing.T) {
+ now := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC)
+ later := time.Date(2025, 10, 8, 13, 0, 0, 0, time.UTC)
+ deleted := time.Date(2025, 10, 8, 14, 0, 0, 0, time.UTC)
+
+ tests := []struct {
+ name string
+ input string
+ expected Info
+ wantErr bool
+ }{
+ {
+ name: "all fields populated",
+ input: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","deleted":"2025-10-08T14:00:00Z","description":"Test release","status":"deployed","notes":"Test notes"}`,
+ expected: Info{
+ FirstDeployed: now,
+ LastDeployed: later,
+ Deleted: deleted,
+ Description: "Test release",
+ Status: common.StatusDeployed,
+ Notes: "Test notes",
+ },
+ },
+ {
+ name: "only required fields",
+ input: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","status":"deployed"}`,
+ expected: Info{
+ FirstDeployed: now,
+ LastDeployed: later,
+ Status: common.StatusDeployed,
+ },
+ },
+ {
+ name: "empty string time fields",
+ input: `{"first_deployed":"","last_deployed":"","deleted":"","description":"Test release","status":"deployed"}`,
+ expected: Info{
+ Description: "Test release",
+ Status: common.StatusDeployed,
+ },
+ },
+ {
+ name: "missing time fields",
+ input: `{"description":"Test release","status":"deployed"}`,
+ expected: Info{
+ Description: "Test release",
+ Status: common.StatusDeployed,
+ },
+ },
+ {
+ name: "null time fields",
+ input: `{"first_deployed":null,"last_deployed":null,"deleted":null,"description":"Test release","status":"deployed"}`,
+ expected: Info{
+ Description: "Test release",
+ Status: common.StatusDeployed,
+ },
+ },
+ {
+ name: "mixed empty and valid time fields",
+ input: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"","deleted":"","status":"deployed"}`,
+ expected: Info{
+ FirstDeployed: now,
+ Status: common.StatusDeployed,
+ },
+ },
+ {
+ name: "pending install status",
+ input: `{"first_deployed":"2025-10-08T12:00:00Z","status":"pending-install","description":"Installing"}`,
+ expected: Info{
+ FirstDeployed: now,
+ Status: common.StatusPendingInstall,
+ Description: "Installing",
+ },
+ },
+ {
+ name: "uninstalled with deleted time",
+ input: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","deleted":"2025-10-08T14:00:00Z","status":"uninstalled"}`,
+ expected: Info{
+ FirstDeployed: now,
+ LastDeployed: later,
+ Deleted: deleted,
+ Status: common.StatusUninstalled,
+ },
+ },
+ {
+ name: "failed status",
+ input: `{"first_deployed":"2025-10-08T12:00:00Z","last_deployed":"2025-10-08T13:00:00Z","status":"failed","description":"Deployment failed"}`,
+ expected: Info{
+ FirstDeployed: now,
+ LastDeployed: later,
+ Status: common.StatusFailed,
+ Description: "Deployment failed",
+ },
+ },
+ {
+ name: "invalid time format",
+ input: `{"first_deployed":"invalid-time","status":"deployed"}`,
+ wantErr: true,
+ },
+ {
+ name: "empty object",
+ input: `{}`,
+ expected: Info{
+ Status: "",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var info Info
+ err := json.Unmarshal([]byte(tt.input), &info)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ assert.Equal(t, tt.expected.FirstDeployed.Unix(), info.FirstDeployed.Unix())
+ assert.Equal(t, tt.expected.LastDeployed.Unix(), info.LastDeployed.Unix())
+ assert.Equal(t, tt.expected.Deleted.Unix(), info.Deleted.Unix())
+ assert.Equal(t, tt.expected.Description, info.Description)
+ assert.Equal(t, tt.expected.Status, info.Status)
+ assert.Equal(t, tt.expected.Notes, info.Notes)
+ assert.Equal(t, tt.expected.Resources, info.Resources)
+ })
+ }
+}
+
+func TestInfoRoundTrip(t *testing.T) {
+ now := time.Date(2025, 10, 8, 12, 0, 0, 0, time.UTC)
+ later := time.Date(2025, 10, 8, 13, 0, 0, 0, time.UTC)
+
+ original := Info{
+ FirstDeployed: now,
+ LastDeployed: later,
+ Description: "Test release",
+ Status: common.StatusDeployed,
+ Notes: "Release notes",
+ }
+
+ data, err := json.Marshal(&original)
+ require.NoError(t, err)
+
+ var decoded Info
+ err = json.Unmarshal(data, &decoded)
+ require.NoError(t, err)
+
+ assert.Equal(t, original.FirstDeployed.Unix(), decoded.FirstDeployed.Unix())
+ assert.Equal(t, original.LastDeployed.Unix(), decoded.LastDeployed.Unix())
+ assert.Equal(t, original.Deleted.Unix(), decoded.Deleted.Unix())
+ assert.Equal(t, original.Description, decoded.Description)
+ assert.Equal(t, original.Status, decoded.Status)
+ assert.Equal(t, original.Notes, decoded.Notes)
+}
+
+func TestInfoEmptyStringRoundTrip(t *testing.T) {
+ // This test specifically verifies that empty string time fields
+ // are handled correctly during parsing
+ input := `{"first_deployed":"","last_deployed":"","deleted":"","status":"deployed","description":"test"}`
+
+ var info Info
+ err := json.Unmarshal([]byte(input), &info)
+ require.NoError(t, err)
+
+ // Verify time fields are zero values
+ assert.True(t, info.FirstDeployed.IsZero())
+ assert.True(t, info.LastDeployed.IsZero())
+ assert.True(t, info.Deleted.IsZero())
+ assert.Equal(t, common.StatusDeployed, info.Status)
+ assert.Equal(t, "test", info.Description)
+
+ // Marshal back and verify empty time fields are omitted
+ data, err := json.Marshal(&info)
+ require.NoError(t, err)
+
+ var result map[string]interface{}
+ err = json.Unmarshal(data, &result)
+ require.NoError(t, err)
+
+ // Zero time values should be omitted due to omitzero tag
+ assert.NotContains(t, result, "first_deployed")
+ assert.NotContains(t, result, "last_deployed")
+ assert.NotContains(t, result, "deleted")
+ assert.Equal(t, "deployed", result["status"])
+ assert.Equal(t, "test", result["description"])
+}
diff --git a/helm/pkg/release/v1/mock.go b/helm/pkg/release/v1/mock.go
new file mode 100644
index 000000000..dc135a24a
--- /dev/null
+++ b/helm/pkg/release/v1/mock.go
@@ -0,0 +1,142 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "fmt"
+ "math/rand"
+ "time"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ rcommon "helm.sh/helm/v4/pkg/release/common"
+)
+
+// MockHookTemplate is the hook template used for all mock release objects.
+var MockHookTemplate = `apiVersion: v1
+kind: Job
+metadata:
+ annotations:
+ "helm.sh/hook": pre-install
+`
+
+// MockManifest is the manifest used for all mock release objects.
+var MockManifest = `apiVersion: v1
+kind: Secret
+metadata:
+ name: fixture
+`
+
+// MockReleaseOptions allows for user-configurable options on mock release objects.
+type MockReleaseOptions struct {
+ Name string
+ Version int
+ Chart *chart.Chart
+ Status rcommon.Status
+ Namespace string
+ Labels map[string]string
+}
+
+// Mock creates a mock release object based on options set by MockReleaseOptions. This function should typically not be used outside of testing.
+func Mock(opts *MockReleaseOptions) *Release {
+ date := time.Unix(242085845, 0).UTC()
+
+ name := opts.Name
+ if name == "" {
+ name = "testrelease-" + fmt.Sprint(rand.Intn(100))
+ }
+
+ version := 1
+ if opts.Version != 0 {
+ version = opts.Version
+ }
+
+ namespace := opts.Namespace
+ if namespace == "" {
+ namespace = "default"
+ }
+ var labels map[string]string
+ if len(opts.Labels) > 0 {
+ labels = opts.Labels
+ }
+
+ ch := opts.Chart
+ if opts.Chart == nil {
+ ch = &chart.Chart{
+ Metadata: &chart.Metadata{
+ Name: "foo",
+ Version: "0.1.0-beta.1",
+ AppVersion: "1.0",
+ Annotations: map[string]string{
+ "category": "web-apps",
+ "supported": "true",
+ },
+ Dependencies: []*chart.Dependency{
+ {
+ Name: "cool-plugin",
+ Version: "1.0.0",
+ Repository: "https://coolplugin.io/charts",
+ Condition: "coolPlugin.enabled",
+ Enabled: true,
+ },
+ {
+ Name: "crds",
+ Version: "2.7.1",
+ Condition: "crds.enabled",
+ },
+ },
+ },
+ Templates: []*common.File{
+ {Name: "templates/foo.tpl", ModTime: time.Now(), Data: []byte(MockManifest)},
+ },
+ }
+ }
+
+ scode := rcommon.StatusDeployed
+ if len(opts.Status) > 0 {
+ scode = opts.Status
+ }
+
+ info := &Info{
+ FirstDeployed: date,
+ LastDeployed: date,
+ Status: scode,
+ Description: "Release mock",
+ Notes: "Some mock release notes!",
+ }
+
+ return &Release{
+ Name: name,
+ Info: info,
+ Chart: ch,
+ Config: map[string]interface{}{"name": "value"},
+ Version: version,
+ Namespace: namespace,
+ Hooks: []*Hook{
+ {
+ Name: "pre-install-hook",
+ Kind: "Job",
+ Path: "pre-install-hook.yaml",
+ Manifest: MockHookTemplate,
+ LastRun: HookExecution{},
+ Events: []HookEvent{HookPreInstall},
+ },
+ },
+ Manifest: MockManifest,
+ Labels: labels,
+ }
+}
diff --git a/helm/pkg/release/v1/release.go b/helm/pkg/release/v1/release.go
new file mode 100644
index 000000000..454ee6eb7
--- /dev/null
+++ b/helm/pkg/release/v1/release.go
@@ -0,0 +1,60 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/release/common"
+)
+
+type ApplyMethod string
+
+const ApplyMethodClientSideApply ApplyMethod = "csa"
+const ApplyMethodServerSideApply ApplyMethod = "ssa"
+
+// Release describes a deployment of a chart, together with the chart
+// and the variables used to deploy that chart.
+type Release struct {
+ // Name is the name of the release
+ Name string `json:"name,omitempty"`
+ // Info provides information about a release
+ Info *Info `json:"info,omitempty"`
+ // Chart is the chart that was released.
+ Chart *chart.Chart `json:"chart,omitempty"`
+ // Config is the set of extra Values added to the chart.
+ // These values override the default values inside of the chart.
+ Config map[string]interface{} `json:"config,omitempty"`
+ // Manifest is the string representation of the rendered template.
+ Manifest string `json:"manifest,omitempty"`
+ // Hooks are all of the hooks declared for this release.
+ Hooks []*Hook `json:"hooks,omitempty"`
+ // Version is an int which represents the revision of the release.
+ Version int `json:"version,omitempty"`
+ // Namespace is the kubernetes namespace of the release.
+ Namespace string `json:"namespace,omitempty"`
+ // Labels of the release.
+ // Disabled encoding into Json cause labels are stored in storage driver metadata field.
+ Labels map[string]string `json:"-"`
+ // ApplyMethod stores whether server-side or client-side apply was used for the release
+ // Unset (empty string) should be treated as the default of client-side apply
+ ApplyMethod string `json:"apply_method,omitempty"` // "ssa" | "csa"
+}
+
+// SetStatus is a helper for setting the status on a release.
+func (r *Release) SetStatus(status common.Status, msg string) {
+ r.Info.Status = status
+ r.Info.Description = msg
+}
diff --git a/helm/pkg/release/v1/util/filter.go b/helm/pkg/release/v1/util/filter.go
new file mode 100644
index 000000000..dc60195cf
--- /dev/null
+++ b/helm/pkg/release/v1/util/filter.go
@@ -0,0 +1,81 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util // import "helm.sh/helm/v4/pkg/release/v1/util"
+
+import (
+ "helm.sh/helm/v4/pkg/release/common"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+// FilterFunc returns true if the release object satisfies
+// the predicate of the underlying filter func.
+type FilterFunc func(*rspb.Release) bool
+
+// Check applies the FilterFunc to the release object.
+func (fn FilterFunc) Check(rls *rspb.Release) bool {
+ if rls == nil {
+ return false
+ }
+ return fn(rls)
+}
+
+// Filter applies the filter(s) to the list of provided releases
+// returning the list that satisfies the filtering predicate.
+func (fn FilterFunc) Filter(rels []*rspb.Release) (rets []*rspb.Release) {
+ for _, rel := range rels {
+ if fn.Check(rel) {
+ rets = append(rets, rel)
+ }
+ }
+ return
+}
+
+// Any returns a FilterFunc that filters a list of releases
+// determined by the predicate 'f0 || f1 || ... || fn'.
+func Any(filters ...FilterFunc) FilterFunc {
+ return func(rls *rspb.Release) bool {
+ for _, filter := range filters {
+ if filter(rls) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// All returns a FilterFunc that filters a list of releases
+// determined by the predicate 'f0 && f1 && ... && fn'.
+func All(filters ...FilterFunc) FilterFunc {
+ return func(rls *rspb.Release) bool {
+ for _, filter := range filters {
+ if !filter(rls) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// StatusFilter filters a set of releases by status code.
+func StatusFilter(status common.Status) FilterFunc {
+ return FilterFunc(func(rls *rspb.Release) bool {
+ if rls == nil {
+ return true
+ }
+ return rls.Info.Status == status
+ })
+}
diff --git a/helm/pkg/release/v1/util/filter_test.go b/helm/pkg/release/v1/util/filter_test.go
new file mode 100644
index 000000000..1004a4c57
--- /dev/null
+++ b/helm/pkg/release/v1/util/filter_test.go
@@ -0,0 +1,60 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util // import "helm.sh/helm/v4/pkg/release/v1/util"
+
+import (
+ "testing"
+
+ "helm.sh/helm/v4/pkg/release/common"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestFilterAny(t *testing.T) {
+ ls := Any(StatusFilter(common.StatusUninstalled)).Filter(releases)
+ if len(ls) != 2 {
+ t.Fatalf("expected 2 results, got '%d'", len(ls))
+ }
+
+ r0, r1 := ls[0], ls[1]
+ switch {
+ case r0.Info.Status != common.StatusUninstalled:
+ t.Fatalf("expected UNINSTALLED result, got '%s'", r1.Info.Status.String())
+ case r1.Info.Status != common.StatusUninstalled:
+ t.Fatalf("expected UNINSTALLED result, got '%s'", r1.Info.Status.String())
+ }
+}
+
+func TestFilterAll(t *testing.T) {
+ fn := FilterFunc(func(rls *rspb.Release) bool {
+ // true if not uninstalled and version < 4
+ v0 := !StatusFilter(common.StatusUninstalled).Check(rls)
+ v1 := rls.Version < 4
+ return v0 && v1
+ })
+
+ ls := All(fn).Filter(releases)
+ if len(ls) != 1 {
+ t.Fatalf("expected 1 result, got '%d'", len(ls))
+ }
+
+ switch r0 := ls[0]; {
+ case r0.Version == 4:
+ t.Fatal("got release with status revision 4")
+ case r0.Info.Status == common.StatusUninstalled:
+ t.Fatal("got release with status UNINSTALLED")
+ }
+}
diff --git a/helm/pkg/release/v1/util/kind_sorter.go b/helm/pkg/release/v1/util/kind_sorter.go
new file mode 100644
index 000000000..bc074340f
--- /dev/null
+++ b/helm/pkg/release/v1/util/kind_sorter.go
@@ -0,0 +1,165 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "sort"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+// KindSortOrder is an ordering of Kinds.
+type KindSortOrder []string
+
+// InstallOrder is the order in which manifests should be installed (by Kind).
+//
+// Those occurring earlier in the list get installed before those occurring later in the list.
+var InstallOrder KindSortOrder = []string{
+ "PriorityClass",
+ "Namespace",
+ "NetworkPolicy",
+ "ResourceQuota",
+ "LimitRange",
+ "PodSecurityPolicy",
+ "PodDisruptionBudget",
+ "ServiceAccount",
+ "Secret",
+ "SecretList",
+ "ConfigMap",
+ "StorageClass",
+ "PersistentVolume",
+ "PersistentVolumeClaim",
+ "CustomResourceDefinition",
+ "ClusterRole",
+ "ClusterRoleList",
+ "ClusterRoleBinding",
+ "ClusterRoleBindingList",
+ "Role",
+ "RoleList",
+ "RoleBinding",
+ "RoleBindingList",
+ "Service",
+ "DaemonSet",
+ "Pod",
+ "ReplicationController",
+ "ReplicaSet",
+ "Deployment",
+ "HorizontalPodAutoscaler",
+ "StatefulSet",
+ "Job",
+ "CronJob",
+ "IngressClass",
+ "Ingress",
+ "APIService",
+ "MutatingWebhookConfiguration",
+ "ValidatingWebhookConfiguration",
+}
+
+// UninstallOrder is the order in which manifests should be uninstalled (by Kind).
+//
+// Those occurring earlier in the list get uninstalled before those occurring later in the list.
+var UninstallOrder KindSortOrder = []string{
+ // For uninstall, we remove validation before mutation to ensure webhooks don't block removal
+ "ValidatingWebhookConfiguration",
+ "MutatingWebhookConfiguration",
+ "APIService",
+ "Ingress",
+ "IngressClass",
+ "Service",
+ "CronJob",
+ "Job",
+ "StatefulSet",
+ "HorizontalPodAutoscaler",
+ "Deployment",
+ "ReplicaSet",
+ "ReplicationController",
+ "Pod",
+ "DaemonSet",
+ "RoleBindingList",
+ "RoleBinding",
+ "RoleList",
+ "Role",
+ "ClusterRoleBindingList",
+ "ClusterRoleBinding",
+ "ClusterRoleList",
+ "ClusterRole",
+ "CustomResourceDefinition",
+ "PersistentVolumeClaim",
+ "PersistentVolume",
+ "StorageClass",
+ "ConfigMap",
+ "SecretList",
+ "Secret",
+ "ServiceAccount",
+ "PodDisruptionBudget",
+ "PodSecurityPolicy",
+ "LimitRange",
+ "ResourceQuota",
+ "NetworkPolicy",
+ "Namespace",
+ "PriorityClass",
+}
+
+// sort manifests by kind.
+//
+// Results are sorted by 'ordering', keeping order of items with equal kind/priority
+func sortManifestsByKind(manifests []Manifest, ordering KindSortOrder) []Manifest {
+ sort.SliceStable(manifests, func(i, j int) bool {
+ return lessByKind(manifests[i], manifests[j], manifests[i].Head.Kind, manifests[j].Head.Kind, ordering)
+ })
+
+ return manifests
+}
+
+// sort hooks by kind, using an out-of-place sort to preserve the input parameters.
+//
+// Results are sorted by 'ordering', keeping order of items with equal kind/priority
+func sortHooksByKind(hooks []*release.Hook, ordering KindSortOrder) []*release.Hook {
+ h := hooks
+ sort.SliceStable(h, func(i, j int) bool {
+ return lessByKind(h[i], h[j], h[i].Kind, h[j].Kind, ordering)
+ })
+
+ return h
+}
+
+func lessByKind(_ interface{}, _ interface{}, kindA string, kindB string, o KindSortOrder) bool {
+ ordering := make(map[string]int, len(o))
+ for v, k := range o {
+ ordering[k] = v
+ }
+
+ first, aok := ordering[kindA]
+ second, bok := ordering[kindB]
+
+ if !aok && !bok {
+ // if both are unknown then sort alphabetically by kind, keep original order if same kind
+ if kindA != kindB {
+ return kindA < kindB
+ }
+ return first < second
+ }
+ // unknown kind is last
+ if !aok {
+ return false
+ }
+ if !bok {
+ return true
+ }
+ // sort different kinds, keep original order if same priority
+ return first < second
+}
diff --git a/helm/pkg/release/v1/util/kind_sorter_test.go b/helm/pkg/release/v1/util/kind_sorter_test.go
new file mode 100644
index 000000000..919de24e5
--- /dev/null
+++ b/helm/pkg/release/v1/util/kind_sorter_test.go
@@ -0,0 +1,347 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "testing"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestKindSorter(t *testing.T) {
+ manifests := []Manifest{
+ {
+ Name: "U",
+ Head: &SimpleHead{Kind: "IngressClass"},
+ },
+ {
+ Name: "E",
+ Head: &SimpleHead{Kind: "SecretList"},
+ },
+ {
+ Name: "i",
+ Head: &SimpleHead{Kind: "ClusterRole"},
+ },
+ {
+ Name: "I",
+ Head: &SimpleHead{Kind: "ClusterRoleList"},
+ },
+ {
+ Name: "j",
+ Head: &SimpleHead{Kind: "ClusterRoleBinding"},
+ },
+ {
+ Name: "J",
+ Head: &SimpleHead{Kind: "ClusterRoleBindingList"},
+ },
+ {
+ Name: "f",
+ Head: &SimpleHead{Kind: "ConfigMap"},
+ },
+ {
+ Name: "u",
+ Head: &SimpleHead{Kind: "CronJob"},
+ },
+ {
+ Name: "2",
+ Head: &SimpleHead{Kind: "CustomResourceDefinition"},
+ },
+ {
+ Name: "n",
+ Head: &SimpleHead{Kind: "DaemonSet"},
+ },
+ {
+ Name: "r",
+ Head: &SimpleHead{Kind: "Deployment"},
+ },
+ {
+ Name: "!",
+ Head: &SimpleHead{Kind: "HonkyTonkSet"},
+ },
+ {
+ Name: "v",
+ Head: &SimpleHead{Kind: "Ingress"},
+ },
+ {
+ Name: "t",
+ Head: &SimpleHead{Kind: "Job"},
+ },
+ {
+ Name: "c",
+ Head: &SimpleHead{Kind: "LimitRange"},
+ },
+ {
+ Name: "a",
+ Head: &SimpleHead{Kind: "Namespace"},
+ },
+ {
+ Name: "A",
+ Head: &SimpleHead{Kind: "NetworkPolicy"},
+ },
+ {
+ Name: "g",
+ Head: &SimpleHead{Kind: "PersistentVolume"},
+ },
+ {
+ Name: "h",
+ Head: &SimpleHead{Kind: "PersistentVolumeClaim"},
+ },
+ {
+ Name: "o",
+ Head: &SimpleHead{Kind: "Pod"},
+ },
+ {
+ Name: "3",
+ Head: &SimpleHead{Kind: "PodDisruptionBudget"},
+ },
+ {
+ Name: "C",
+ Head: &SimpleHead{Kind: "PodSecurityPolicy"},
+ },
+ {
+ Name: "q",
+ Head: &SimpleHead{Kind: "ReplicaSet"},
+ },
+ {
+ Name: "p",
+ Head: &SimpleHead{Kind: "ReplicationController"},
+ },
+ {
+ Name: "b",
+ Head: &SimpleHead{Kind: "ResourceQuota"},
+ },
+ {
+ Name: "k",
+ Head: &SimpleHead{Kind: "Role"},
+ },
+ {
+ Name: "K",
+ Head: &SimpleHead{Kind: "RoleList"},
+ },
+ {
+ Name: "l",
+ Head: &SimpleHead{Kind: "RoleBinding"},
+ },
+ {
+ Name: "L",
+ Head: &SimpleHead{Kind: "RoleBindingList"},
+ },
+ {
+ Name: "e",
+ Head: &SimpleHead{Kind: "Secret"},
+ },
+ {
+ Name: "m",
+ Head: &SimpleHead{Kind: "Service"},
+ },
+ {
+ Name: "d",
+ Head: &SimpleHead{Kind: "ServiceAccount"},
+ },
+ {
+ Name: "s",
+ Head: &SimpleHead{Kind: "StatefulSet"},
+ },
+ {
+ Name: "1",
+ Head: &SimpleHead{Kind: "StorageClass"},
+ },
+ {
+ Name: "w",
+ Head: &SimpleHead{Kind: "APIService"},
+ },
+ {
+ Name: "x",
+ Head: &SimpleHead{Kind: "HorizontalPodAutoscaler"},
+ },
+ {
+ Name: "F",
+ Head: &SimpleHead{Kind: "PriorityClass"},
+ },
+ {
+ Name: "M",
+ Head: &SimpleHead{Kind: "MutatingWebhookConfiguration"},
+ },
+ {
+ Name: "V",
+ Head: &SimpleHead{Kind: "ValidatingWebhookConfiguration"},
+ },
+ }
+
+ for _, test := range []struct {
+ description string
+ order KindSortOrder
+ expected string
+ }{
+ {"install", InstallOrder, "FaAbcC3deEf1gh2iIjJkKlLmnopqrxstuUvwMV!"},
+ {"uninstall", UninstallOrder, "VMwvUmutsxrqponLlKkJjIi2hg1fEed3CcbAaF!"},
+ } {
+ var buf bytes.Buffer
+ t.Run(test.description, func(t *testing.T) {
+ if got, want := len(test.expected), len(manifests); got != want {
+ t.Fatalf("Expected %d names in order, got %d", want, got)
+ }
+ defer buf.Reset()
+ orig := manifests
+ for _, r := range sortManifestsByKind(manifests, test.order) {
+ buf.WriteString(r.Name)
+ }
+ if got := buf.String(); got != test.expected {
+ t.Errorf("Expected %q, got %q", test.expected, got)
+ }
+ for i, manifest := range orig {
+ if manifest != manifests[i] {
+ t.Fatal("Expected input to sortManifestsByKind to stay the same")
+ }
+ }
+ })
+ }
+}
+
+// TestKindSorterKeepOriginalOrder verifies manifests of same kind are kept in original order
+func TestKindSorterKeepOriginalOrder(t *testing.T) {
+ manifests := []Manifest{
+ {
+ Name: "a",
+ Head: &SimpleHead{Kind: "ClusterRole"},
+ },
+ {
+ Name: "A",
+ Head: &SimpleHead{Kind: "ClusterRole"},
+ },
+ {
+ Name: "0",
+ Head: &SimpleHead{Kind: "ConfigMap"},
+ },
+ {
+ Name: "1",
+ Head: &SimpleHead{Kind: "ConfigMap"},
+ },
+ {
+ Name: "z",
+ Head: &SimpleHead{Kind: "ClusterRoleBinding"},
+ },
+ {
+ Name: "!",
+ Head: &SimpleHead{Kind: "ClusterRoleBinding"},
+ },
+ {
+ Name: "u2",
+ Head: &SimpleHead{Kind: "Unknown"},
+ },
+ {
+ Name: "u1",
+ Head: &SimpleHead{Kind: "Unknown"},
+ },
+ {
+ Name: "t3",
+ Head: &SimpleHead{Kind: "Unknown2"},
+ },
+ }
+ for _, test := range []struct {
+ description string
+ order KindSortOrder
+ expected string
+ }{
+ // expectation is sorted by kind (unknown is last) and within each group of same kind, the order is kept
+ {"cm,clusterRole,clusterRoleBinding,Unknown,Unknown2", InstallOrder, "01aAz!u2u1t3"},
+ } {
+ var buf bytes.Buffer
+ t.Run(test.description, func(t *testing.T) {
+ defer buf.Reset()
+ for _, r := range sortManifestsByKind(manifests, test.order) {
+ buf.WriteString(r.Name)
+ }
+ if got := buf.String(); got != test.expected {
+ t.Errorf("Expected %q, got %q", test.expected, got)
+ }
+ })
+ }
+}
+
+func TestKindSorterNamespaceAgainstUnknown(t *testing.T) {
+ unknown := Manifest{
+ Name: "a",
+ Head: &SimpleHead{Kind: "Unknown"},
+ }
+ namespace := Manifest{
+ Name: "b",
+ Head: &SimpleHead{Kind: "Namespace"},
+ }
+
+ manifests := []Manifest{unknown, namespace}
+ manifests = sortManifestsByKind(manifests, InstallOrder)
+
+ expectedOrder := []Manifest{namespace, unknown}
+ for i, manifest := range manifests {
+ if expectedOrder[i].Name != manifest.Name {
+ t.Errorf("Expected %s, got %s", expectedOrder[i].Name, manifest.Name)
+ }
+ }
+}
+
+// test hook sorting with a small subset of kinds, since it uses the same algorithm as sortManifestsByKind
+func TestKindSorterForHooks(t *testing.T) {
+ hooks := []*release.Hook{
+ {
+ Name: "i",
+ Kind: "ClusterRole",
+ },
+ {
+ Name: "j",
+ Kind: "ClusterRoleBinding",
+ },
+ {
+ Name: "c",
+ Kind: "LimitRange",
+ },
+ {
+ Name: "a",
+ Kind: "Namespace",
+ },
+ }
+
+ for _, test := range []struct {
+ description string
+ order KindSortOrder
+ expected string
+ }{
+ {"install", InstallOrder, "acij"},
+ {"uninstall", UninstallOrder, "jica"},
+ } {
+ var buf bytes.Buffer
+ t.Run(test.description, func(t *testing.T) {
+ if got, want := len(test.expected), len(hooks); got != want {
+ t.Fatalf("Expected %d names in order, got %d", want, got)
+ }
+ defer buf.Reset()
+ orig := hooks
+ for _, r := range sortHooksByKind(hooks, test.order) {
+ buf.WriteString(r.Name)
+ }
+ for i, hook := range orig {
+ if hook != hooks[i] {
+ t.Fatal("Expected input to sortHooksByKind to stay the same")
+ }
+ }
+ if got := buf.String(); got != test.expected {
+ t.Errorf("Expected %q, got %q", test.expected, got)
+ }
+ })
+ }
+}
diff --git a/helm/pkg/release/v1/util/manifest.go b/helm/pkg/release/v1/util/manifest.go
new file mode 100644
index 000000000..9a87949f8
--- /dev/null
+++ b/helm/pkg/release/v1/util/manifest.go
@@ -0,0 +1,72 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// SimpleHead defines what the structure of the head of a manifest file
+type SimpleHead struct {
+ Version string `json:"apiVersion"`
+ Kind string `json:"kind,omitempty"`
+ Metadata *struct {
+ Name string `json:"name"`
+ Annotations map[string]string `json:"annotations"`
+ } `json:"metadata,omitempty"`
+}
+
+var sep = regexp.MustCompile("(?:^|\\s*\n)---\\s*")
+
+// SplitManifests takes a string of manifest and returns a map contains individual manifests
+func SplitManifests(bigFile string) map[string]string {
+ // Basically, we're quickly splitting a stream of YAML documents into an
+ // array of YAML docs. The file name is just a place holder, but should be
+ // integer-sortable so that manifests get output in the same order as the
+ // input (see `BySplitManifestsOrder`).
+ tpl := "manifest-%d"
+ res := map[string]string{}
+ // Making sure that any extra whitespace in YAML stream doesn't interfere in splitting documents correctly.
+ bigFileTmp := strings.TrimSpace(bigFile)
+ docs := sep.Split(bigFileTmp, -1)
+ var count int
+ for _, d := range docs {
+ if d == "" {
+ continue
+ }
+
+ d = strings.TrimSpace(d)
+ res[fmt.Sprintf(tpl, count)] = d
+ count = count + 1
+ }
+ return res
+}
+
+// BySplitManifestsOrder sorts by in-file manifest order, as provided in function `SplitManifests`
+type BySplitManifestsOrder []string
+
+func (a BySplitManifestsOrder) Len() int { return len(a) }
+func (a BySplitManifestsOrder) Less(i, j int) bool {
+ // Split `manifest-%d`
+ anum, _ := strconv.ParseInt(a[i][len("manifest-"):], 10, 0)
+ bnum, _ := strconv.ParseInt(a[j][len("manifest-"):], 10, 0)
+ return anum < bnum
+}
+func (a BySplitManifestsOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/helm/pkg/release/v1/util/manifest_sorter.go b/helm/pkg/release/v1/util/manifest_sorter.go
new file mode 100644
index 000000000..6f7b4ea8b
--- /dev/null
+++ b/helm/pkg/release/v1/util/manifest_sorter.go
@@ -0,0 +1,244 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "log/slog"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/chart/common"
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+// Manifest represents a manifest file, which has a name and some content.
+type Manifest struct {
+ Name string
+ Content string
+ Head *SimpleHead
+}
+
+// manifestFile represents a file that contains a manifest.
+type manifestFile struct {
+ entries map[string]string
+ path string
+}
+
+// result is an intermediate structure used during sorting.
+type result struct {
+ hooks []*release.Hook
+ generic []Manifest
+}
+
+// TODO: Refactor this out. It's here because naming conventions were not followed through.
+// So fix the Test hook names and then remove this.
+var events = map[string]release.HookEvent{
+ release.HookPreInstall.String(): release.HookPreInstall,
+ release.HookPostInstall.String(): release.HookPostInstall,
+ release.HookPreDelete.String(): release.HookPreDelete,
+ release.HookPostDelete.String(): release.HookPostDelete,
+ release.HookPreUpgrade.String(): release.HookPreUpgrade,
+ release.HookPostUpgrade.String(): release.HookPostUpgrade,
+ release.HookPreRollback.String(): release.HookPreRollback,
+ release.HookPostRollback.String(): release.HookPostRollback,
+ release.HookTest.String(): release.HookTest,
+ // Support test-success for backward compatibility with Helm 2 tests
+ "test-success": release.HookTest,
+}
+
+// SortManifests takes a map of filename/YAML contents, splits the file
+// by manifest entries, and sorts the entries into hook types.
+//
+// The resulting hooks struct will be populated with all of the generated hooks.
+// Any file that does not declare one of the hook types will be placed in the
+// 'generic' bucket.
+//
+// Files that do not parse into the expected format are simply placed into a map and
+// returned.
+func SortManifests(files map[string]string, _ common.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) {
+ result := &result{}
+
+ var sortedFilePaths []string
+ for filePath := range files {
+ sortedFilePaths = append(sortedFilePaths, filePath)
+ }
+ sort.Strings(sortedFilePaths)
+
+ for _, filePath := range sortedFilePaths {
+ content := files[filePath]
+
+ // Skip partials. We could return these as a separate map, but there doesn't
+ // seem to be any need for that at this time.
+ if strings.HasPrefix(path.Base(filePath), "_") {
+ continue
+ }
+ // Skip empty files and log this.
+ if strings.TrimSpace(content) == "" {
+ continue
+ }
+
+ manifestFile := &manifestFile{
+ entries: SplitManifests(content),
+ path: filePath,
+ }
+
+ if err := manifestFile.sort(result); err != nil {
+ return result.hooks, result.generic, err
+ }
+ }
+
+ return sortHooksByKind(result.hooks, ordering), sortManifestsByKind(result.generic, ordering), nil
+}
+
+// sort takes a manifestFile object which may contain multiple resource definition
+// entries and sorts each entry by hook types, and saves the resulting hooks and
+// generic manifests (or non-hooks) to the result struct.
+//
+// To determine hook type, it looks for a YAML structure like this:
+//
+// kind: SomeKind
+// apiVersion: v1
+// metadata:
+// annotations:
+// helm.sh/hook: pre-install
+//
+// To determine the policy to delete the hook, it looks for a YAML structure like this:
+//
+// kind: SomeKind
+// apiVersion: v1
+// metadata:
+// annotations:
+// helm.sh/hook-delete-policy: hook-succeeded
+//
+// To determine the policy to output logs of the hook (for Pod and Job only), it looks for a YAML structure like this:
+//
+// kind: Pod
+// apiVersion: v1
+// metadata:
+// annotations:
+// helm.sh/hook-output-log-policy: hook-succeeded,hook-failed
+func (file *manifestFile) sort(result *result) error {
+ // Go through manifests in order found in file (function `SplitManifests` creates integer-sortable keys)
+ var sortedEntryKeys []string
+ for entryKey := range file.entries {
+ sortedEntryKeys = append(sortedEntryKeys, entryKey)
+ }
+ sort.Sort(BySplitManifestsOrder(sortedEntryKeys))
+
+ for _, entryKey := range sortedEntryKeys {
+ m := file.entries[entryKey]
+
+ var entry SimpleHead
+ if err := yaml.Unmarshal([]byte(m), &entry); err != nil {
+ return fmt.Errorf("YAML parse error on %s: %w", file.path, err)
+ }
+
+ if !hasAnyAnnotation(entry) {
+ result.generic = append(result.generic, Manifest{
+ Name: file.path,
+ Content: m,
+ Head: &entry,
+ })
+ continue
+ }
+
+ hookTypes, ok := entry.Metadata.Annotations[release.HookAnnotation]
+ if !ok {
+ result.generic = append(result.generic, Manifest{
+ Name: file.path,
+ Content: m,
+ Head: &entry,
+ })
+ continue
+ }
+
+ hw := calculateHookWeight(entry)
+
+ h := &release.Hook{
+ Name: entry.Metadata.Name,
+ Kind: entry.Kind,
+ Path: file.path,
+ Manifest: m,
+ Events: []release.HookEvent{},
+ Weight: hw,
+ DeletePolicies: []release.HookDeletePolicy{},
+ OutputLogPolicies: []release.HookOutputLogPolicy{},
+ }
+
+ isUnknownHook := false
+ for hookType := range strings.SplitSeq(hookTypes, ",") {
+ hookType = strings.ToLower(strings.TrimSpace(hookType))
+ e, ok := events[hookType]
+ if !ok {
+ isUnknownHook = true
+ break
+ }
+ h.Events = append(h.Events, e)
+ }
+
+ if isUnknownHook {
+ slog.Info("skipping unknown hooks", "hookTypes", hookTypes)
+ continue
+ }
+
+ result.hooks = append(result.hooks, h)
+
+ operateAnnotationValues(entry, release.HookDeleteAnnotation, func(value string) {
+ h.DeletePolicies = append(h.DeletePolicies, release.HookDeletePolicy(value))
+ })
+
+ operateAnnotationValues(entry, release.HookOutputLogAnnotation, func(value string) {
+ h.OutputLogPolicies = append(h.OutputLogPolicies, release.HookOutputLogPolicy(value))
+ })
+ }
+
+ return nil
+}
+
+// hasAnyAnnotation returns true if the given entry has any annotations at all.
+func hasAnyAnnotation(entry SimpleHead) bool {
+ return entry.Metadata != nil &&
+ entry.Metadata.Annotations != nil &&
+ len(entry.Metadata.Annotations) != 0
+}
+
+// calculateHookWeight finds the weight in the hook weight annotation.
+//
+// If no weight is found, the assigned weight is 0
+func calculateHookWeight(entry SimpleHead) int {
+ hws := entry.Metadata.Annotations[release.HookWeightAnnotation]
+ hw, err := strconv.Atoi(hws)
+ if err != nil {
+ hw = 0
+ }
+ return hw
+}
+
+// operateAnnotationValues finds the given annotation and runs the operate function with the value of that annotation
+func operateAnnotationValues(entry SimpleHead, annotation string, operate func(p string)) {
+ if dps, ok := entry.Metadata.Annotations[annotation]; ok {
+ for dp := range strings.SplitSeq(dps, ",") {
+ dp = strings.ToLower(strings.TrimSpace(dp))
+ operate(dp)
+ }
+ }
+}
diff --git a/helm/pkg/release/v1/util/manifest_sorter_test.go b/helm/pkg/release/v1/util/manifest_sorter_test.go
new file mode 100644
index 000000000..4360013e5
--- /dev/null
+++ b/helm/pkg/release/v1/util/manifest_sorter_test.go
@@ -0,0 +1,227 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "reflect"
+ "testing"
+
+ "sigs.k8s.io/yaml"
+
+ release "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestSortManifests(t *testing.T) {
+
+ data := []struct {
+ name []string
+ path string
+ kind []string
+ hooks map[string][]release.HookEvent
+ manifest string
+ }{
+ {
+ name: []string{"first"},
+ path: "one",
+ kind: []string{"Job"},
+ hooks: map[string][]release.HookEvent{"first": {release.HookPreInstall}},
+ manifest: `apiVersion: v1
+kind: Job
+metadata:
+ name: first
+ labels:
+ doesnot: matter
+ annotations:
+ "helm.sh/hook": pre-install
+`,
+ },
+ {
+ name: []string{"second"},
+ path: "two",
+ kind: []string{"ReplicaSet"},
+ hooks: map[string][]release.HookEvent{"second": {release.HookPostInstall}},
+ manifest: `kind: ReplicaSet
+apiVersion: v1beta1
+metadata:
+ name: second
+ annotations:
+ "helm.sh/hook": post-install
+`,
+ }, {
+ name: []string{"third"},
+ path: "three",
+ kind: []string{"ReplicaSet"},
+ hooks: map[string][]release.HookEvent{"third": nil},
+ manifest: `kind: ReplicaSet
+apiVersion: v1beta1
+metadata:
+ name: third
+ annotations:
+ "helm.sh/hook": no-such-hook
+`,
+ }, {
+ name: []string{"fourth"},
+ path: "four",
+ kind: []string{"Pod"},
+ hooks: map[string][]release.HookEvent{"fourth": nil},
+ manifest: `kind: Pod
+apiVersion: v1
+metadata:
+ name: fourth
+ annotations:
+ nothing: here`,
+ }, {
+ name: []string{"fifth"},
+ path: "five",
+ kind: []string{"ReplicaSet"},
+ hooks: map[string][]release.HookEvent{"fifth": {release.HookPostDelete, release.HookPostInstall}},
+ manifest: `kind: ReplicaSet
+apiVersion: v1beta1
+metadata:
+ name: fifth
+ annotations:
+ "helm.sh/hook": post-delete, post-install
+`,
+ }, {
+ // Regression test: files with an underscore in the base name should be skipped.
+ name: []string{"sixth"},
+ path: "six/_six",
+ kind: []string{"ReplicaSet"},
+ hooks: map[string][]release.HookEvent{"sixth": nil},
+ manifest: `invalid manifest`, // This will fail if partial is not skipped.
+ }, {
+ // Regression test: files with no content should be skipped.
+ name: []string{"seventh"},
+ path: "seven",
+ kind: []string{"ReplicaSet"},
+ hooks: map[string][]release.HookEvent{"seventh": nil},
+ manifest: "",
+ },
+ {
+ name: []string{"eighth", "example-test"},
+ path: "eight",
+ kind: []string{"ConfigMap", "Pod"},
+ hooks: map[string][]release.HookEvent{"eighth": nil, "example-test": {release.HookTest}},
+ manifest: `kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: eighth
+data:
+ name: value
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: example-test
+ annotations:
+ "helm.sh/hook": test
+`,
+ },
+ }
+
+ manifests := make(map[string]string, len(data))
+ for _, o := range data {
+ manifests[o.path] = o.manifest
+ }
+
+ hs, generic, err := SortManifests(manifests, nil, InstallOrder)
+ if err != nil {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+
+ // This test will fail if 'six' or 'seven' was added.
+ if len(generic) != 2 {
+ t.Errorf("Expected 2 generic manifests, got %d", len(generic))
+ }
+
+ if len(hs) != 4 {
+ t.Errorf("Expected 4 hooks, got %d", len(hs))
+ }
+
+ for _, out := range hs {
+ found := false
+ for _, expect := range data {
+ if out.Path == expect.path {
+ found = true
+ if out.Path != expect.path {
+ t.Errorf("Expected path %s, got %s", expect.path, out.Path)
+ }
+ nameFound := false
+ for _, expectedName := range expect.name {
+ if out.Name == expectedName {
+ nameFound = true
+ }
+ }
+ if !nameFound {
+ t.Errorf("Got unexpected name %s", out.Name)
+ }
+ kindFound := false
+ for _, expectedKind := range expect.kind {
+ if out.Kind == expectedKind {
+ kindFound = true
+ }
+ }
+ if !kindFound {
+ t.Errorf("Got unexpected kind %s", out.Kind)
+ }
+
+ expectedHooks := expect.hooks[out.Name]
+ if !reflect.DeepEqual(expectedHooks, out.Events) {
+ t.Errorf("expected events: %v but got: %v", expectedHooks, out.Events)
+ }
+
+ }
+ }
+ if !found {
+ t.Errorf("Result not found: %v", out)
+ }
+ }
+
+ // Verify the sort order
+ sorted := []Manifest{}
+ for _, s := range data {
+ manifests := SplitManifests(s.manifest)
+
+ for _, m := range manifests {
+ var sh SimpleHead
+ if err := yaml.Unmarshal([]byte(m), &sh); err != nil {
+ // This is expected for manifests that are corrupt or empty.
+ t.Log(err)
+ continue
+ }
+
+ name := sh.Metadata.Name
+
+ // only keep track of non-hook manifests
+ if s.hooks[name] == nil {
+ another := Manifest{
+ Content: m,
+ Name: name,
+ Head: &sh,
+ }
+ sorted = append(sorted, another)
+ }
+ }
+ }
+
+ sorted = sortManifestsByKind(sorted, InstallOrder)
+ for i, m := range generic {
+ if m.Content != sorted[i].Content {
+ t.Errorf("Expected %q, got %q", m.Content, sorted[i].Content)
+ }
+ }
+}
diff --git a/helm/pkg/release/v1/util/manifest_test.go b/helm/pkg/release/v1/util/manifest_test.go
new file mode 100644
index 000000000..754ac1367
--- /dev/null
+++ b/helm/pkg/release/v1/util/manifest_test.go
@@ -0,0 +1,61 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util // import "helm.sh/helm/v4/pkg/release/v1/util"
+
+import (
+ "reflect"
+ "testing"
+)
+
+const mockManifestFile = `
+
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: finding-nemo,
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: nemo-test
+ image: fake-image
+ cmd: fake-command
+`
+
+const expectedManifest = `apiVersion: v1
+kind: Pod
+metadata:
+ name: finding-nemo,
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: nemo-test
+ image: fake-image
+ cmd: fake-command`
+
+func TestSplitManifest(t *testing.T) {
+ manifests := SplitManifests(mockManifestFile)
+ if len(manifests) != 1 {
+ t.Errorf("Expected 1 manifest, got %v", len(manifests))
+ }
+ expected := map[string]string{"manifest-0": expectedManifest}
+ if !reflect.DeepEqual(manifests, expected) {
+ t.Errorf("Expected %v, got %v", expected, manifests)
+ }
+}
diff --git a/helm/pkg/release/v1/util/sorter.go b/helm/pkg/release/v1/util/sorter.go
new file mode 100644
index 000000000..47506fbf2
--- /dev/null
+++ b/helm/pkg/release/v1/util/sorter.go
@@ -0,0 +1,61 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util // import "helm.sh/helm/v4/pkg/release/v1/util"
+
+import (
+ "sort"
+
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+// Reverse reverses the list of releases sorted by the sort func.
+func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) {
+ sortFn(list)
+ for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
+ list[i], list[j] = list[j], list[i]
+ }
+}
+
+// SortByName returns the list of releases sorted
+// in lexicographical order.
+func SortByName(list []*rspb.Release) {
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].Name < list[j].Name
+ })
+}
+
+// SortByDate returns the list of releases sorted by a
+// release's last deployed time (in seconds).
+func SortByDate(list []*rspb.Release) {
+ sort.Slice(list, func(i, j int) bool {
+ ti := list[i].Info.LastDeployed.Unix()
+ tj := list[j].Info.LastDeployed.Unix()
+ if ti != tj {
+ return ti < tj
+ }
+ // Use name as tie-breaker for stable sorting
+ return list[i].Name < list[j].Name
+ })
+}
+
+// SortByRevision returns the list of releases sorted by a
+// release's revision number (release.Version).
+func SortByRevision(list []*rspb.Release) {
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].Version < list[j].Version
+ })
+}
diff --git a/helm/pkg/release/v1/util/sorter_test.go b/helm/pkg/release/v1/util/sorter_test.go
new file mode 100644
index 000000000..f47db7db8
--- /dev/null
+++ b/helm/pkg/release/v1/util/sorter_test.go
@@ -0,0 +1,109 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util // import "helm.sh/helm/v4/pkg/release/v1/util"
+
+import (
+ "testing"
+ "time"
+
+ "helm.sh/helm/v4/pkg/release/common"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+// note: this test data is shared with filter_test.go.
+
+var releases = []*rspb.Release{
+ tsRelease("quiet-bear", 2, 2000, common.StatusSuperseded),
+ tsRelease("angry-bird", 4, 3000, common.StatusDeployed),
+ tsRelease("happy-cats", 1, 4000, common.StatusUninstalled),
+ tsRelease("vocal-dogs", 3, 6000, common.StatusUninstalled),
+}
+
+func tsRelease(name string, vers int, dur time.Duration, status common.Status) *rspb.Release {
+ info := &rspb.Info{Status: status, LastDeployed: time.Now().Add(dur)}
+ return &rspb.Release{
+ Name: name,
+ Version: vers,
+ Info: info,
+ }
+}
+
+func check(t *testing.T, by string, fn func(int, int) bool) {
+ t.Helper()
+ for i := len(releases) - 1; i > 0; i-- {
+ if fn(i, i-1) {
+ t.Errorf("release at positions '(%d,%d)' not sorted by %s", i-1, i, by)
+ }
+ }
+}
+
+func TestSortByName(t *testing.T) {
+ SortByName(releases)
+
+ check(t, "ByName", func(i, j int) bool {
+ ni := releases[i].Name
+ nj := releases[j].Name
+ return ni < nj
+ })
+}
+
+func TestSortByDate(t *testing.T) {
+ SortByDate(releases)
+
+ check(t, "ByDate", func(i, j int) bool {
+ ti := releases[i].Info.LastDeployed.Second()
+ tj := releases[j].Info.LastDeployed.Second()
+ return ti < tj
+ })
+}
+
+func TestSortByRevision(t *testing.T) {
+ SortByRevision(releases)
+
+ check(t, "ByRevision", func(i, j int) bool {
+ vi := releases[i].Version
+ vj := releases[j].Version
+ return vi < vj
+ })
+}
+
+func TestReverseSortByName(t *testing.T) {
+ Reverse(releases, SortByName)
+ check(t, "ByName", func(i, j int) bool {
+ ni := releases[i].Name
+ nj := releases[j].Name
+ return ni > nj
+ })
+}
+
+func TestReverseSortByDate(t *testing.T) {
+ Reverse(releases, SortByDate)
+ check(t, "ByDate", func(i, j int) bool {
+ ti := releases[i].Info.LastDeployed.Second()
+ tj := releases[j].Info.LastDeployed.Second()
+ return ti > tj
+ })
+}
+
+func TestReverseSortByRevision(t *testing.T) {
+ Reverse(releases, SortByRevision)
+ check(t, "ByRevision", func(i, j int) bool {
+ vi := releases[i].Version
+ vj := releases[j].Version
+ return vi > vj
+ })
+}
diff --git a/helm/pkg/repo/v1/chartrepo.go b/helm/pkg/repo/v1/chartrepo.go
new file mode 100644
index 000000000..deef7474e
--- /dev/null
+++ b/helm/pkg/repo/v1/chartrepo.go
@@ -0,0 +1,276 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repo // import "helm.sh/helm/v4/pkg/repo/v1"
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log/slog"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "helm.sh/helm/v4/internal/fileutil"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+// Entry represents a collection of parameters for chart repository
+type Entry struct {
+ Name string `json:"name"`
+ URL string `json:"url"`
+ Username string `json:"username"`
+ Password string `json:"password"`
+ CertFile string `json:"certFile"`
+ KeyFile string `json:"keyFile"`
+ CAFile string `json:"caFile"`
+ InsecureSkipTLSVerify bool `json:"insecure_skip_tls_verify"`
+ PassCredentialsAll bool `json:"pass_credentials_all"`
+}
+
+// ChartRepository represents a chart repository
+type ChartRepository struct {
+ Config *Entry
+ IndexFile *IndexFile
+ Client getter.Getter
+ CachePath string
+}
+
+// NewChartRepository constructs ChartRepository
+func NewChartRepository(cfg *Entry, getters getter.Providers) (*ChartRepository, error) {
+ u, err := url.Parse(cfg.URL)
+ if err != nil {
+ return nil, fmt.Errorf("invalid chart URL format: %s", cfg.URL)
+ }
+
+ client, err := getters.ByScheme(u.Scheme)
+ if err != nil {
+ return nil, fmt.Errorf("could not find protocol handler for: %s", u.Scheme)
+ }
+
+ return &ChartRepository{
+ Config: cfg,
+ IndexFile: NewIndexFile(),
+ Client: client,
+ CachePath: helmpath.CachePath("repository"),
+ }, nil
+}
+
+// DownloadIndexFile fetches the index from a repository.
+func (r *ChartRepository) DownloadIndexFile() (string, error) {
+ indexURL, err := ResolveReferenceURL(r.Config.URL, "index.yaml")
+ if err != nil {
+ return "", err
+ }
+
+ resp, err := r.Client.Get(indexURL,
+ getter.WithURL(r.Config.URL),
+ getter.WithInsecureSkipVerifyTLS(r.Config.InsecureSkipTLSVerify),
+ getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile),
+ getter.WithBasicAuth(r.Config.Username, r.Config.Password),
+ getter.WithPassCredentialsAll(r.Config.PassCredentialsAll),
+ )
+ if err != nil {
+ return "", err
+ }
+
+ index, err := io.ReadAll(resp)
+ if err != nil {
+ return "", err
+ }
+
+ indexFile, err := loadIndex(index, r.Config.URL)
+ if err != nil {
+ return "", err
+ }
+
+ // Create the chart list file in the cache directory
+ var charts strings.Builder
+ for name := range indexFile.Entries {
+ fmt.Fprintln(&charts, name)
+ }
+ chartsFile := filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name))
+ os.MkdirAll(filepath.Dir(chartsFile), 0755)
+
+ fileutil.AtomicWriteFile(chartsFile, bytes.NewReader([]byte(charts.String())), 0644)
+
+ // Create the index file in the cache directory
+ fname := filepath.Join(r.CachePath, helmpath.CacheIndexFile(r.Config.Name))
+ os.MkdirAll(filepath.Dir(fname), 0755)
+ return fname, fileutil.AtomicWriteFile(fname, bytes.NewReader(index), 0644)
+}
+
+type findChartInRepoURLOptions struct {
+ Username string
+ Password string
+ PassCredentialsAll bool
+ InsecureSkipTLSVerify bool
+ CertFile string
+ KeyFile string
+ CAFile string
+ ChartVersion string
+}
+
+type FindChartInRepoURLOption func(*findChartInRepoURLOptions)
+
+// WithChartVersion specifies the chart version to find
+func WithChartVersion(chartVersion string) FindChartInRepoURLOption {
+ return func(options *findChartInRepoURLOptions) {
+ options.ChartVersion = chartVersion
+ }
+}
+
+// WithUsernamePassword specifies the username/password credntials for the repository
+func WithUsernamePassword(username, password string) FindChartInRepoURLOption {
+ return func(options *findChartInRepoURLOptions) {
+ options.Username = username
+ options.Password = password
+ }
+}
+
+// WithPassCredentialsAll flags whether credentials should be passed on to other domains
+func WithPassCredentialsAll(passCredentialsAll bool) FindChartInRepoURLOption {
+ return func(options *findChartInRepoURLOptions) {
+ options.PassCredentialsAll = passCredentialsAll
+ }
+}
+
+// WithClientTLS species the cert, key, and CA files for client mTLS
+func WithClientTLS(certFile, keyFile, caFile string) FindChartInRepoURLOption {
+ return func(options *findChartInRepoURLOptions) {
+ options.CertFile = certFile
+ options.KeyFile = keyFile
+ options.CAFile = caFile
+ }
+}
+
+// WithInsecureSkipTLSVerify skips TLS verification for repository communication
+func WithInsecureSkipTLSVerify(insecureSkipTLSVerify bool) FindChartInRepoURLOption {
+ return func(options *findChartInRepoURLOptions) {
+ options.InsecureSkipTLSVerify = insecureSkipTLSVerify
+ }
+}
+
+// FindChartInRepoURL finds chart in chart repository pointed by repoURL
+// without adding repo to repositories
+func FindChartInRepoURL(repoURL string, chartName string, getters getter.Providers, options ...FindChartInRepoURLOption) (string, error) {
+
+ opts := findChartInRepoURLOptions{}
+ for _, option := range options {
+ option(&opts)
+ }
+
+ // Download and write the index file to a temporary location
+ buf := make([]byte, 20)
+ rand.Read(buf)
+ name := strings.ReplaceAll(base64.StdEncoding.EncodeToString(buf), "/", "-")
+
+ c := Entry{
+ URL: repoURL,
+ Username: opts.Username,
+ Password: opts.Password,
+ PassCredentialsAll: opts.PassCredentialsAll,
+ CertFile: opts.CertFile,
+ KeyFile: opts.KeyFile,
+ CAFile: opts.CAFile,
+ Name: name,
+ InsecureSkipTLSVerify: opts.InsecureSkipTLSVerify,
+ }
+ r, err := NewChartRepository(&c, getters)
+ if err != nil {
+ return "", err
+ }
+ idx, err := r.DownloadIndexFile()
+ if err != nil {
+ return "", fmt.Errorf("looks like %q is not a valid chart repository or cannot be reached: %w", repoURL, err)
+ }
+ defer func() {
+ os.RemoveAll(filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name)))
+ os.RemoveAll(filepath.Join(r.CachePath, helmpath.CacheIndexFile(r.Config.Name)))
+ }()
+
+ // Read the index file for the repository to get chart information and return chart URL
+ repoIndex, err := LoadIndexFile(idx)
+ if err != nil {
+ return "", err
+ }
+
+ errMsg := fmt.Sprintf("chart %q", chartName)
+ if opts.ChartVersion != "" {
+ errMsg = fmt.Sprintf("%s version %q", errMsg, opts.ChartVersion)
+ }
+ cv, err := repoIndex.Get(chartName, opts.ChartVersion)
+ if err != nil {
+ return "", ChartNotFoundError{
+ Chart: errMsg,
+ RepoURL: repoURL,
+ }
+ }
+
+ if len(cv.URLs) == 0 {
+ return "", fmt.Errorf("%s has no downloadable URLs", errMsg)
+ }
+
+ chartURL := cv.URLs[0]
+
+ absoluteChartURL, err := ResolveReferenceURL(repoURL, chartURL)
+ if err != nil {
+ return "", fmt.Errorf("failed to make chart URL absolute: %w", err)
+ }
+
+ return absoluteChartURL, nil
+}
+
+// ResolveReferenceURL resolves refURL relative to baseURL.
+// If refURL is absolute, it simply returns refURL.
+func ResolveReferenceURL(baseURL, refURL string) (string, error) {
+ parsedRefURL, err := url.Parse(refURL)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse %s as URL: %w", refURL, err)
+ }
+
+ if parsedRefURL.IsAbs() {
+ return refURL, nil
+ }
+
+ parsedBaseURL, err := url.Parse(baseURL)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse %s as URL: %w", baseURL, err)
+ }
+
+ // We need a trailing slash for ResolveReference to work, but make sure there isn't already one
+ parsedBaseURL.RawPath = strings.TrimSuffix(parsedBaseURL.RawPath, "/") + "/"
+ parsedBaseURL.Path = strings.TrimSuffix(parsedBaseURL.Path, "/") + "/"
+
+ resolvedURL := parsedBaseURL.ResolveReference(parsedRefURL)
+ resolvedURL.RawQuery = parsedBaseURL.RawQuery
+ return resolvedURL.String(), nil
+}
+
+func (e *Entry) String() string {
+ buf, err := json.Marshal(e)
+ if err != nil {
+ slog.Error("failed to marshal entry", slog.Any("error", err))
+ panic(err)
+ }
+ return string(buf)
+}
diff --git a/helm/pkg/repo/v1/chartrepo_test.go b/helm/pkg/repo/v1/chartrepo_test.go
new file mode 100644
index 000000000..353ab62d6
--- /dev/null
+++ b/helm/pkg/repo/v1/chartrepo_test.go
@@ -0,0 +1,302 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repo
+
+import (
+ "bytes"
+ "errors"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+type CustomGetter struct {
+ repoUrls []string
+}
+
+func (g *CustomGetter) Get(href string, _ ...getter.Option) (*bytes.Buffer, error) {
+ index := &IndexFile{
+ APIVersion: "v1",
+ Generated: time.Now(),
+ }
+ indexBytes, err := yaml.Marshal(index)
+ if err != nil {
+ return nil, err
+ }
+ g.repoUrls = append(g.repoUrls, href)
+ return bytes.NewBuffer(indexBytes), nil
+}
+
+func TestIndexCustomSchemeDownload(t *testing.T) {
+ repoName := "gcs-repo"
+ repoURL := "gs://some-gcs-bucket"
+ myCustomGetter := &CustomGetter{}
+ customGetterConstructor := func(_ ...getter.Option) (getter.Getter, error) {
+ return myCustomGetter, nil
+ }
+ providers := getter.Providers{{
+ Schemes: []string{"gs"},
+ New: customGetterConstructor,
+ }}
+ repo, err := NewChartRepository(&Entry{
+ Name: repoName,
+ URL: repoURL,
+ }, providers)
+ if err != nil {
+ t.Fatalf("Problem loading chart repository from %s: %v", repoURL, err)
+ }
+ repo.CachePath = t.TempDir()
+
+ tempIndexFile, err := os.CreateTemp(t.TempDir(), "test-repo")
+ if err != nil {
+ t.Fatalf("Failed to create temp index file: %v", err)
+ }
+ defer os.Remove(tempIndexFile.Name())
+
+ idx, err := repo.DownloadIndexFile()
+ if err != nil {
+ t.Fatalf("Failed to download index file to %s: %v", idx, err)
+ }
+
+ if len(myCustomGetter.repoUrls) != 1 {
+ t.Fatalf("Custom Getter.Get should be called once")
+ }
+
+ expectedRepoIndexURL := repoURL + "/index.yaml"
+ if myCustomGetter.repoUrls[0] != expectedRepoIndexURL {
+ t.Fatalf("Custom Getter.Get should be called with %s", expectedRepoIndexURL)
+ }
+}
+
+func TestConcurrencyDownloadIndex(t *testing.T) {
+ srv, err := startLocalServerForTests(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer srv.Close()
+
+ repo, err := NewChartRepository(&Entry{
+ Name: "nginx",
+ URL: srv.URL,
+ }, getter.All(&cli.EnvSettings{}))
+
+ if err != nil {
+ t.Fatalf("Problem loading chart repository from %s: %v", srv.URL, err)
+ }
+ repo.CachePath = t.TempDir()
+
+ // initial download index
+ idx, err := repo.DownloadIndexFile()
+ if err != nil {
+ t.Fatalf("Failed to download index file to %s: %v", idx, err)
+ }
+
+ indexFName := filepath.Join(repo.CachePath, helmpath.CacheIndexFile(repo.Config.Name))
+
+ var wg sync.WaitGroup
+
+ // Simultaneously start multiple goroutines that:
+ // 1) download index.yaml via DownloadIndexFile (write operation),
+ // 2) read index.yaml via LoadIndexFile (read operation).
+ // This checks for race conditions and ensures correct behavior under concurrent read/write access.
+ for range 150 {
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ idx, err := repo.DownloadIndexFile()
+ if err != nil {
+ t.Errorf("Failed to download index file to %s: %v", idx, err)
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _, err := LoadIndexFile(indexFName)
+ if err != nil {
+ t.Errorf("Failed to load index file: %v", err)
+ }
+ }()
+ }
+ wg.Wait()
+}
+
+// startLocalServerForTests Start the local helm server
+func startLocalServerForTests(handler http.Handler) (*httptest.Server, error) {
+ if handler == nil {
+ fileBytes, err := os.ReadFile("testdata/local-index.yaml")
+ if err != nil {
+ return nil, err
+ }
+ handler = http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Write(fileBytes)
+ })
+ }
+
+ return httptest.NewServer(handler), nil
+}
+
+// startLocalTLSServerForTests Start the local helm server with TLS
+func startLocalTLSServerForTests(handler http.Handler) (*httptest.Server, error) {
+ if handler == nil {
+ fileBytes, err := os.ReadFile("testdata/local-index.yaml")
+ if err != nil {
+ return nil, err
+ }
+ handler = http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Write(fileBytes)
+ })
+ }
+
+ return httptest.NewTLSServer(handler), nil
+}
+
+func TestFindChartInAuthAndTLSAndPassRepoURL(t *testing.T) {
+ srv, err := startLocalTLSServerForTests(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer srv.Close()
+
+ chartURL, err := FindChartInRepoURL(
+ srv.URL,
+ "nginx",
+ getter.All(&cli.EnvSettings{}),
+ WithInsecureSkipTLSVerify(true),
+ )
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ if chartURL != "https://charts.helm.sh/stable/nginx-0.2.0.tgz" {
+ t.Errorf("%s is not the valid URL", chartURL)
+ }
+
+ // If the insecureSkipTLSVerify is false, it will return an error that contains "x509: certificate signed by unknown authority".
+ _, err = FindChartInRepoURL(srv.URL, "nginx", getter.All(&cli.EnvSettings{}), WithChartVersion("0.1.0"))
+ // Go communicates with the platform and different platforms return different messages. Go itself tests darwin
+ // differently for its message. On newer versions of Darwin the message includes the "Acme Co" portion while older
+ // versions of Darwin do not. As there are people developing Helm using both old and new versions of Darwin we test
+ // for both messages.
+ if runtime.GOOS == "darwin" {
+ if !strings.Contains(err.Error(), "x509: “Acme Co” certificate is not trusted") && !strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
+ t.Errorf("Expected TLS error for function FindChartInAuthAndTLSAndPassRepoURL not found, but got a different error (%v)", err)
+ }
+ } else if !strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
+ t.Errorf("Expected TLS error for function FindChartInAuthAndTLSAndPassRepoURL not found, but got a different error (%v)", err)
+ }
+}
+
+func TestFindChartInRepoURL(t *testing.T) {
+ srv, err := startLocalServerForTests(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer srv.Close()
+
+ chartURL, err := FindChartInRepoURL(srv.URL, "nginx", getter.All(&cli.EnvSettings{}))
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ if chartURL != "https://charts.helm.sh/stable/nginx-0.2.0.tgz" {
+ t.Errorf("%s is not the valid URL", chartURL)
+ }
+
+ chartURL, err = FindChartInRepoURL(srv.URL, "nginx", getter.All(&cli.EnvSettings{}), WithChartVersion("0.1.0"))
+ if err != nil {
+ t.Errorf("%s", err)
+ }
+ if chartURL != "https://charts.helm.sh/stable/nginx-0.1.0.tgz" {
+ t.Errorf("%s is not the valid URL", chartURL)
+ }
+}
+
+func TestErrorFindChartInRepoURL(t *testing.T) {
+
+ g := getter.All(&cli.EnvSettings{
+ RepositoryCache: t.TempDir(),
+ })
+
+ if _, err := FindChartInRepoURL("http://someserver/something", "nginx", g); err == nil {
+ t.Errorf("Expected error for bad chart URL, but did not get any errors")
+ } else if !strings.Contains(err.Error(), `looks like "http://someserver/something" is not a valid chart repository or cannot be reached`) {
+ t.Errorf("Expected error for bad chart URL, but got a different error (%v)", err)
+ }
+
+ srv, err := startLocalServerForTests(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer srv.Close()
+
+ if _, err = FindChartInRepoURL(srv.URL, "nginx1", g); err == nil {
+ t.Errorf("Expected error for chart not found, but did not get any errors")
+ } else if err.Error() != `chart "nginx1" not found in `+srv.URL+` repository` {
+ t.Errorf("Expected error for chart not found, but got a different error (%v)", err)
+ }
+ if !errors.Is(err, ChartNotFoundError{}) {
+ t.Errorf("error is not of correct error type structure")
+ }
+
+ if _, err = FindChartInRepoURL(srv.URL, "nginx1", g, WithChartVersion("0.1.0")); err == nil {
+ t.Errorf("Expected error for chart not found, but did not get any errors")
+ } else if err.Error() != `chart "nginx1" version "0.1.0" not found in `+srv.URL+` repository` {
+ t.Errorf("Expected error for chart not found, but got a different error (%v)", err)
+ }
+
+ if _, err = FindChartInRepoURL(srv.URL, "chartWithNoURL", g); err == nil {
+ t.Errorf("Expected error for no chart URLs available, but did not get any errors")
+ } else if err.Error() != `chart "chartWithNoURL" has no downloadable URLs` {
+ t.Errorf("Expected error for chart not found, but got a different error (%v)", err)
+ }
+}
+
+func TestResolveReferenceURL(t *testing.T) {
+ for _, tt := range []struct {
+ baseURL, refURL, chartURL string
+ }{
+ {"http://localhost:8123/", "/nginx-0.2.0.tgz", "http://localhost:8123/nginx-0.2.0.tgz"},
+ {"http://localhost:8123/charts/", "nginx-0.2.0.tgz", "http://localhost:8123/charts/nginx-0.2.0.tgz"},
+ {"http://localhost:8123/charts/", "/nginx-0.2.0.tgz", "http://localhost:8123/nginx-0.2.0.tgz"},
+ {"http://localhost:8123/charts-with-no-trailing-slash", "nginx-0.2.0.tgz", "http://localhost:8123/charts-with-no-trailing-slash/nginx-0.2.0.tgz"},
+ {"http://localhost:8123", "https://charts.helm.sh/stable/nginx-0.2.0.tgz", "https://charts.helm.sh/stable/nginx-0.2.0.tgz"},
+ {"http://localhost:8123/charts%2fwith%2fescaped%2fslash", "nginx-0.2.0.tgz", "http://localhost:8123/charts%2fwith%2fescaped%2fslash/nginx-0.2.0.tgz"},
+ {"http://localhost:8123/charts%2fwith%2fescaped%2fslash", "/nginx-0.2.0.tgz", "http://localhost:8123/nginx-0.2.0.tgz"},
+ {"http://localhost:8123/charts?with=queryparameter", "nginx-0.2.0.tgz", "http://localhost:8123/charts/nginx-0.2.0.tgz?with=queryparameter"},
+ {"http://localhost:8123/charts?with=queryparameter", "/nginx-0.2.0.tgz", "http://localhost:8123/nginx-0.2.0.tgz?with=queryparameter"},
+ } {
+ chartURL, err := ResolveReferenceURL(tt.baseURL, tt.refURL)
+ if err != nil {
+ t.Errorf("unexpected error in ResolveReferenceURL(%q, %q): %s", tt.baseURL, tt.refURL, err)
+ }
+ if chartURL != tt.chartURL {
+ t.Errorf("expected ResolveReferenceURL(%q, %q) to equal %q, got %q", tt.baseURL, tt.refURL, tt.chartURL, chartURL)
+ }
+ }
+}
diff --git a/helm/pkg/repo/v1/doc.go b/helm/pkg/repo/v1/doc.go
new file mode 100644
index 000000000..fc54bbf7a
--- /dev/null
+++ b/helm/pkg/repo/v1/doc.go
@@ -0,0 +1,94 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package repo implements the Helm Chart Repository.
+
+A chart repository is an HTTP server that provides information on charts. A local
+repository cache is an on-disk representation of a chart repository.
+
+There are two important file formats for chart repositories.
+
+The first is the 'index.yaml' format, which is expressed like this:
+
+ apiVersion: v1
+ entries:
+ frobnitz:
+ - created: 2016-09-29T12:14:34.830161306-06:00
+ description: This is a frobnitz.
+ digest: 587bd19a9bd9d2bc4a6d25ab91c8c8e7042c47b4ac246e37bf8e1e74386190f4
+ home: http://example.com
+ keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+ maintainers:
+ - email: helm@example.com
+ name: The Helm Team
+ - email: nobody@example.com
+ name: Someone Else
+ name: frobnitz
+ urls:
+ - http://example-charts.com/testdata/repository/frobnitz-1.2.3.tgz
+ version: 1.2.3
+ sprocket:
+ - created: 2016-09-29T12:14:34.830507606-06:00
+ description: This is a sprocket"
+ digest: 8505ff813c39502cc849a38e1e4a8ac24b8e6e1dcea88f4c34ad9b7439685ae6
+ home: http://example.com
+ keywords:
+ - frobnitz
+ - sprocket
+ - dodad
+ maintainers:
+ - email: helm@example.com
+ name: The Helm Team
+ - email: nobody@example.com
+ name: Someone Else
+ name: sprocket
+ urls:
+ - http://example-charts.com/testdata/repository/sprocket-1.2.0.tgz
+ version: 1.2.0
+ generated: 2016-09-29T12:14:34.829721375-06:00
+
+An index.yaml file contains the necessary descriptive information about what
+charts are available in a repository, and how to get them.
+
+The second file format is the repositories.yaml file format. This file is for
+facilitating local cached copies of one or more chart repositories.
+
+The format of a repository.yaml file is:
+
+ apiVersion: v1
+ generated: TIMESTAMP
+ repositories:
+ - name: stable
+ url: http://example.com/charts
+ cache: stable-index.yaml
+ - name: incubator
+ url: http://example.com/incubator
+ cache: incubator-index.yaml
+
+This file maps three bits of information about a repository:
+
+ - The name the user uses to refer to it
+ - The fully qualified URL to the repository (index.yaml will be appended)
+ - The name of the local cachefile
+
+The format for both files was changed after Helm v2.0.0-Alpha.4. Helm is not
+backwards compatible with those earlier versions.
+*/
+package repo
diff --git a/helm/pkg/repo/v1/error.go b/helm/pkg/repo/v1/error.go
new file mode 100644
index 000000000..16264ed26
--- /dev/null
+++ b/helm/pkg/repo/v1/error.go
@@ -0,0 +1,35 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repo
+
+import (
+ "fmt"
+)
+
+type ChartNotFoundError struct {
+ RepoURL string
+ Chart string
+}
+
+func (e ChartNotFoundError) Error() string {
+ return fmt.Sprintf("%s not found in %s repository", e.Chart, e.RepoURL)
+}
+
+func (e ChartNotFoundError) Is(err error) bool {
+ _, ok := err.(ChartNotFoundError)
+ return ok
+}
diff --git a/helm/pkg/repo/v1/index.go b/helm/pkg/repo/v1/index.go
new file mode 100644
index 000000000..7969d64e9
--- /dev/null
+++ b/helm/pkg/repo/v1/index.go
@@ -0,0 +1,419 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repo
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/Masterminds/semver/v3"
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/internal/fileutil"
+ "helm.sh/helm/v4/internal/urlutil"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+ "helm.sh/helm/v4/pkg/provenance"
+)
+
+// APIVersionV1 is the v1 API version for index and repository files.
+const APIVersionV1 = "v1"
+
+var (
+ // ErrNoAPIVersion indicates that an API version was not specified.
+ ErrNoAPIVersion = errors.New("no API version specified")
+ // ErrNoChartVersion indicates that a chart with the given version is not found.
+ ErrNoChartVersion = errors.New("no chart version found")
+ // ErrNoChartName indicates that a chart with the given name is not found.
+ ErrNoChartName = errors.New("no chart name found")
+ // ErrEmptyIndexYaml indicates that the content of index.yaml is empty.
+ ErrEmptyIndexYaml = errors.New("empty index.yaml file")
+)
+
+// ChartVersions is a list of versioned chart references.
+// Implements a sorter on Version.
+type ChartVersions []*ChartVersion
+
+// Len returns the length.
+func (c ChartVersions) Len() int { return len(c) }
+
+// Swap swaps the position of two items in the versions slice.
+func (c ChartVersions) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+
+// Less returns true if the version of entry a is less than the version of entry b.
+func (c ChartVersions) Less(a, b int) bool {
+ // Failed parse pushes to the back.
+ i, err := semver.NewVersion(c[a].Version)
+ if err != nil {
+ return true
+ }
+ j, err := semver.NewVersion(c[b].Version)
+ if err != nil {
+ return false
+ }
+ return i.LessThan(j)
+}
+
+// IndexFile represents the index file in a chart repository
+type IndexFile struct {
+ // This is used ONLY for validation against chartmuseum's index files and is discarded after validation.
+ ServerInfo map[string]interface{} `json:"serverInfo,omitempty"`
+ APIVersion string `json:"apiVersion"`
+ Generated time.Time `json:"generated"`
+ Entries map[string]ChartVersions `json:"entries"`
+ PublicKeys []string `json:"publicKeys,omitempty"`
+
+ // Annotations are additional mappings uninterpreted by Helm. They are made available for
+ // other applications to add information to the index file.
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+// NewIndexFile initializes an index.
+func NewIndexFile() *IndexFile {
+ return &IndexFile{
+ APIVersion: APIVersionV1,
+ Generated: time.Now(),
+ Entries: map[string]ChartVersions{},
+ PublicKeys: []string{},
+ }
+}
+
+// LoadIndexFile takes a file at the given path and returns an IndexFile object
+func LoadIndexFile(path string) (*IndexFile, error) {
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ i, err := loadIndex(b, path)
+ if err != nil {
+ return nil, fmt.Errorf("error loading %s: %w", path, err)
+ }
+ return i, nil
+}
+
+// MustAdd adds a file to the index
+// This can leave the index in an unsorted state
+func (i IndexFile) MustAdd(md *chart.Metadata, filename, baseURL, digest string) error {
+ if i.Entries == nil {
+ return errors.New("entries not initialized")
+ }
+
+ if md.APIVersion == "" {
+ md.APIVersion = chart.APIVersionV1
+ }
+ if err := md.Validate(); err != nil {
+ return fmt.Errorf("validate failed for %s: %w", filename, err)
+ }
+
+ u := filename
+ if baseURL != "" {
+ _, file := filepath.Split(filename)
+ var err error
+ u, err = urlutil.URLJoin(baseURL, file)
+ if err != nil {
+ u = path.Join(baseURL, file)
+ }
+ }
+ cr := &ChartVersion{
+ URLs: []string{u},
+ Metadata: md,
+ Digest: digest,
+ Created: time.Now(),
+ }
+ ee := i.Entries[md.Name]
+ i.Entries[md.Name] = append(ee, cr)
+ return nil
+}
+
+// Add adds a file to the index and logs an error.
+//
+// Deprecated: Use index.MustAdd instead.
+func (i IndexFile) Add(md *chart.Metadata, filename, baseURL, digest string) {
+ if err := i.MustAdd(md, filename, baseURL, digest); err != nil {
+ slog.Error("skipping loading invalid entry for chart %q %q from %s: %s", md.Name, md.Version, filename, err)
+ }
+}
+
+// Has returns true if the index has an entry for a chart with the given name and exact version.
+func (i IndexFile) Has(name, version string) bool {
+ _, err := i.Get(name, version)
+ return err == nil
+}
+
+// SortEntries sorts the entries by version in descending order.
+//
+// In canonical form, the individual version records should be sorted so that
+// the most recent release for every version is in the 0th slot in the
+// Entries.ChartVersions array. That way, tooling can predict the newest
+// version without needing to parse SemVers.
+func (i IndexFile) SortEntries() {
+ for _, versions := range i.Entries {
+ sort.Sort(sort.Reverse(versions))
+ }
+}
+
+// Get returns the ChartVersion for the given name.
+//
+// If version is empty, this will return the chart with the latest stable version,
+// prerelease versions will be skipped.
+func (i IndexFile) Get(name, version string) (*ChartVersion, error) {
+ vs, ok := i.Entries[name]
+ if !ok {
+ return nil, ErrNoChartName
+ }
+ if len(vs) == 0 {
+ return nil, ErrNoChartVersion
+ }
+
+ var constraint *semver.Constraints
+ if version == "" {
+ constraint, _ = semver.NewConstraint("*")
+ } else {
+ var err error
+ constraint, err = semver.NewConstraint(version)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // when customer inputs specific version, check whether there's an exact match first
+ if len(version) != 0 {
+ for _, ver := range vs {
+ if version == ver.Version {
+ return ver, nil
+ }
+ }
+ }
+
+ for _, ver := range vs {
+ test, err := semver.NewVersion(ver.Version)
+ if err != nil {
+ continue
+ }
+
+ if constraint.Check(test) {
+ if len(version) != 0 {
+ slog.Warn("unable to find exact version requested; falling back to closest available version", "chart", name, "requested", version, "selected", ver.Version)
+ }
+ return ver, nil
+ }
+ }
+ return nil, fmt.Errorf("no chart version found for %s-%s", name, version)
+}
+
+// WriteFile writes an index file to the given destination path.
+//
+// The mode on the file is set to 'mode'.
+func (i IndexFile) WriteFile(dest string, mode os.FileMode) error {
+ b, err := yaml.Marshal(i)
+ if err != nil {
+ return err
+ }
+ return fileutil.AtomicWriteFile(dest, bytes.NewReader(b), mode)
+}
+
+// WriteJSONFile writes an index file in JSON format to the given destination
+// path.
+//
+// The mode on the file is set to 'mode'.
+func (i IndexFile) WriteJSONFile(dest string, mode os.FileMode) error {
+ b, err := json.MarshalIndent(i, "", " ")
+ if err != nil {
+ return err
+ }
+ return fileutil.AtomicWriteFile(dest, bytes.NewReader(b), mode)
+}
+
+// Merge merges the given index file into this index.
+//
+// This merges by name and version.
+//
+// If one of the entries in the given index does _not_ already exist, it is added.
+// In all other cases, the existing record is preserved.
+//
+// This can leave the index in an unsorted state
+func (i *IndexFile) Merge(f *IndexFile) {
+ for _, cvs := range f.Entries {
+ for _, cv := range cvs {
+ if !i.Has(cv.Name, cv.Version) {
+ e := i.Entries[cv.Name]
+ i.Entries[cv.Name] = append(e, cv)
+ }
+ }
+ }
+}
+
+// ChartVersion represents a chart entry in the IndexFile
+type ChartVersion struct {
+ *chart.Metadata
+ URLs []string `json:"urls"`
+ Created time.Time `json:"created,omitempty"`
+ Removed bool `json:"removed,omitempty"`
+ Digest string `json:"digest,omitempty"`
+
+ // ChecksumDeprecated is deprecated in Helm 3, and therefore ignored. Helm 3 replaced
+ // this with Digest. However, with a strict YAML parser enabled, a field must be
+ // present on the struct for backwards compatibility.
+ ChecksumDeprecated string `json:"checksum,omitempty"`
+
+ // EngineDeprecated is deprecated in Helm 3, and therefore ignored. However, with a strict
+ // YAML parser enabled, this field must be present.
+ EngineDeprecated string `json:"engine,omitempty"`
+
+ // TillerVersionDeprecated is deprecated in Helm 3, and therefore ignored. However, with a strict
+ // YAML parser enabled, this field must be present.
+ TillerVersionDeprecated string `json:"tillerVersion,omitempty"`
+
+ // URLDeprecated is deprecated in Helm 3, superseded by URLs. It is ignored. However,
+ // with a strict YAML parser enabled, this must be present on the struct.
+ URLDeprecated string `json:"url,omitempty"`
+}
+
+// IndexDirectory reads a (flat) directory and generates an index.
+//
+// It indexes only charts that have been packaged (*.tgz).
+//
+// The index returned will be in an unsorted state
+func IndexDirectory(dir, baseURL string) (*IndexFile, error) {
+ archives, err := filepath.Glob(filepath.Join(dir, "*.tgz"))
+ if err != nil {
+ return nil, err
+ }
+ moreArchives, err := filepath.Glob(filepath.Join(dir, "**/*.tgz"))
+ if err != nil {
+ return nil, err
+ }
+ archives = append(archives, moreArchives...)
+
+ index := NewIndexFile()
+ for _, arch := range archives {
+ fname, err := filepath.Rel(dir, arch)
+ if err != nil {
+ return index, err
+ }
+
+ var parentDir string
+ parentDir, fname = filepath.Split(fname)
+ // filepath.Split appends an extra slash to the end of parentDir. We want to strip that out.
+ parentDir = strings.TrimSuffix(parentDir, string(os.PathSeparator))
+ parentURL, err := urlutil.URLJoin(baseURL, parentDir)
+ if err != nil {
+ parentURL = path.Join(baseURL, parentDir)
+ }
+
+ c, err := loader.Load(arch)
+ if err != nil {
+ // Assume this is not a chart.
+ continue
+ }
+ hash, err := provenance.DigestFile(arch)
+ if err != nil {
+ return index, err
+ }
+ if err := index.MustAdd(c.Metadata, fname, parentURL, hash); err != nil {
+ return index, fmt.Errorf("failed adding to %s to index: %w", fname, err)
+ }
+ }
+ return index, nil
+}
+
+// loadIndex loads an index file and does minimal validity checking.
+//
+// The source parameter is only used for logging.
+// This will fail if API Version is not set (ErrNoAPIVersion) or if the unmarshal fails.
+func loadIndex(data []byte, source string) (*IndexFile, error) {
+ i := &IndexFile{}
+
+ if len(data) == 0 {
+ return i, ErrEmptyIndexYaml
+ }
+
+ if err := jsonOrYamlUnmarshal(data, i); err != nil {
+ return i, err
+ }
+
+ for name, cvs := range i.Entries {
+ for idx := len(cvs) - 1; idx >= 0; idx-- {
+ if cvs[idx] == nil {
+ slog.Warn(fmt.Sprintf("skipping loading invalid entry for chart %q from %s: empty entry", name, source))
+ cvs = append(cvs[:idx], cvs[idx+1:]...)
+ continue
+ }
+ // When metadata section missing, initialize with no data
+ if cvs[idx].Metadata == nil {
+ cvs[idx].Metadata = &chart.Metadata{}
+ }
+ if cvs[idx].APIVersion == "" {
+ cvs[idx].APIVersion = chart.APIVersionV1
+ }
+ if err := cvs[idx].Validate(); ignoreSkippableChartValidationError(err) != nil {
+ slog.Warn(fmt.Sprintf("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err))
+ cvs = append(cvs[:idx], cvs[idx+1:]...)
+ }
+ }
+ // adjust slice to only contain a set of valid versions
+ i.Entries[name] = cvs
+ }
+ i.SortEntries()
+ if i.APIVersion == "" {
+ return i, ErrNoAPIVersion
+ }
+ return i, nil
+}
+
+// jsonOrYamlUnmarshal unmarshals the given byte slice containing JSON or YAML
+// into the provided interface.
+//
+// It automatically detects whether the data is in JSON or YAML format by
+// checking its validity as JSON. If the data is valid JSON, it will use the
+// `encoding/json` package to unmarshal it. Otherwise, it will use the
+// `sigs.k8s.io/yaml` package to unmarshal the YAML data.
+func jsonOrYamlUnmarshal(b []byte, i interface{}) error {
+ if json.Valid(b) {
+ return json.Unmarshal(b, i)
+ }
+ return yaml.UnmarshalStrict(b, i)
+}
+
+// ignoreSkippableChartValidationError inspect the given error and returns nil if
+// the error isn't important for index loading
+//
+// In particular, charts may introduce validations that don't impact repository indexes
+// And repository indexes may be generated by older/non-compliant software, which doesn't
+// conform to all validations.
+func ignoreSkippableChartValidationError(err error) error {
+ verr, ok := err.(chart.ValidationError)
+ if !ok {
+ return err
+ }
+
+ // https://github.com/helm/helm/issues/12748 (JFrog repository strips alias field)
+ if strings.HasPrefix(verr.Error(), "validation: more than one dependency with name or alias") {
+ return nil
+ }
+
+ return err
+}
diff --git a/helm/pkg/repo/v1/index_test.go b/helm/pkg/repo/v1/index_test.go
new file mode 100644
index 000000000..517457dc4
--- /dev/null
+++ b/helm/pkg/repo/v1/index_test.go
@@ -0,0 +1,720 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repo
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "testing"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/cli"
+ "helm.sh/helm/v4/pkg/getter"
+ "helm.sh/helm/v4/pkg/helmpath"
+)
+
+const (
+ testfile = "testdata/local-index.yaml"
+ annotationstestfile = "testdata/local-index-annotations.yaml"
+ chartmuseumtestfile = "testdata/chartmuseum-index.yaml"
+ unorderedTestfile = "testdata/local-index-unordered.yaml"
+ jsonTestfile = "testdata/local-index.json"
+ testRepo = "test-repo"
+ indexWithDuplicates = `
+apiVersion: v1
+entries:
+ nginx:
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.2.0.tgz
+ name: nginx
+ description: string
+ version: 0.2.0
+ home: https://github.com/something/else
+ digest: "sha256:1234567890abcdef"
+ nginx:
+ - urls:
+ - https://charts.helm.sh/stable/alpine-1.0.0.tgz
+ - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz
+ name: alpine
+ description: string
+ version: 1.0.0
+ home: https://github.com/something
+ digest: "sha256:1234567890abcdef"
+`
+ indexWithEmptyEntry = `
+apiVersion: v1
+entries:
+ grafana:
+ - apiVersion: v2
+ name: grafana
+ - null
+ foo:
+ -
+ bar:
+ - digest: "sha256:1234567890abcdef"
+ urls:
+ - https://charts.helm.sh/stable/alpine-1.0.0.tgz
+`
+)
+
+func TestIndexFile(t *testing.T) {
+ i := NewIndexFile()
+ for _, x := range []struct {
+ md *chart.Metadata
+ filename string
+ baseURL string
+ digest string
+ }{
+ {&chart.Metadata{APIVersion: "v2", Name: "clipper", Version: "0.1.0"}, "clipper-0.1.0.tgz", "http://example.com/charts", "sha256:1234567890"},
+ {&chart.Metadata{APIVersion: "v2", Name: "cutter", Version: "0.1.1"}, "cutter-0.1.1.tgz", "http://example.com/charts", "sha256:1234567890abc"},
+ {&chart.Metadata{APIVersion: "v2", Name: "cutter", Version: "0.1.0"}, "cutter-0.1.0.tgz", "http://example.com/charts", "sha256:1234567890abc"},
+ {&chart.Metadata{APIVersion: "v2", Name: "cutter", Version: "0.2.0"}, "cutter-0.2.0.tgz", "http://example.com/charts", "sha256:1234567890abc"},
+ {&chart.Metadata{APIVersion: "v2", Name: "setter", Version: "0.1.9+alpha"}, "setter-0.1.9+alpha.tgz", "http://example.com/charts", "sha256:1234567890abc"},
+ {&chart.Metadata{APIVersion: "v2", Name: "setter", Version: "0.1.9+beta"}, "setter-0.1.9+beta.tgz", "http://example.com/charts", "sha256:1234567890abc"},
+ {&chart.Metadata{APIVersion: "v2", Name: "setter", Version: "0.1.8"}, "setter-0.1.8.tgz", "http://example.com/charts", "sha256:1234567890abc"},
+ {&chart.Metadata{APIVersion: "v2", Name: "setter", Version: "0.1.8+beta"}, "setter-0.1.8+beta.tgz", "http://example.com/charts", "sha256:1234567890abc"},
+ } {
+ if err := i.MustAdd(x.md, x.filename, x.baseURL, x.digest); err != nil {
+ t.Errorf("unexpected error adding to index: %s", err)
+ }
+ }
+
+ i.SortEntries()
+
+ if i.APIVersion != APIVersionV1 {
+ t.Error("Expected API version v1")
+ }
+
+ if len(i.Entries) != 3 {
+ t.Errorf("Expected 3 charts. Got %d", len(i.Entries))
+ }
+
+ if i.Entries["clipper"][0].Name != "clipper" {
+ t.Errorf("Expected clipper, got %s", i.Entries["clipper"][0].Name)
+ }
+
+ if len(i.Entries["cutter"]) != 3 {
+ t.Error("Expected three cutters.")
+ }
+
+ // Test that the sort worked. 0.2 should be at the first index for Cutter.
+ if v := i.Entries["cutter"][0].Version; v != "0.2.0" {
+ t.Errorf("Unexpected first version: %s", v)
+ }
+
+ cv, err := i.Get("setter", "0.1.9")
+ if err == nil && !strings.Contains(cv.Version, "0.1.9") {
+ t.Errorf("Unexpected version: %s", cv.Version)
+ }
+
+ cv, err = i.Get("setter", "0.1.9+alpha")
+ if err != nil || cv.Version != "0.1.9+alpha" {
+ t.Errorf("Expected version: 0.1.9+alpha")
+ }
+
+ cv, err = i.Get("setter", "0.1.8")
+ if err != nil || cv.Version != "0.1.8" {
+ t.Errorf("Expected version: 0.1.8")
+ }
+}
+
+func TestLoadIndex(t *testing.T) {
+
+ tests := []struct {
+ Name string
+ Filename string
+ }{
+ {
+ Name: "regular index file",
+ Filename: testfile,
+ },
+ {
+ Name: "chartmuseum index file",
+ Filename: chartmuseumtestfile,
+ },
+ {
+ Name: "JSON index file",
+ Filename: jsonTestfile,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.Name, func(t *testing.T) {
+ t.Parallel()
+ i, err := LoadIndexFile(tc.Filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ verifyLocalIndex(t, i)
+ })
+ }
+}
+
+// TestLoadIndex_Duplicates is a regression to make sure that we don't non-deterministically allow duplicate packages.
+func TestLoadIndex_Duplicates(t *testing.T) {
+ if _, err := loadIndex([]byte(indexWithDuplicates), "indexWithDuplicates"); err == nil {
+ t.Errorf("Expected an error when duplicate entries are present")
+ }
+}
+
+func TestLoadIndex_EmptyEntry(t *testing.T) {
+ if _, err := loadIndex([]byte(indexWithEmptyEntry), "indexWithEmptyEntry"); err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+}
+
+func TestLoadIndex_Empty(t *testing.T) {
+ if _, err := loadIndex([]byte(""), "indexWithEmpty"); err == nil {
+ t.Errorf("Expected an error when index.yaml is empty.")
+ }
+}
+
+func TestLoadIndexFileAnnotations(t *testing.T) {
+ i, err := LoadIndexFile(annotationstestfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ verifyLocalIndex(t, i)
+
+ if len(i.Annotations) != 1 {
+ t.Fatalf("Expected 1 annotation but got %d", len(i.Annotations))
+ }
+ if i.Annotations["helm.sh/test"] != "foo bar" {
+ t.Error("Did not get expected value for helm.sh/test annotation")
+ }
+}
+
+func TestLoadUnorderedIndex(t *testing.T) {
+ i, err := LoadIndexFile(unorderedTestfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ verifyLocalIndex(t, i)
+}
+
+func TestMerge(t *testing.T) {
+ ind1 := NewIndexFile()
+
+ if err := ind1.MustAdd(&chart.Metadata{APIVersion: "v2", Name: "dreadnought", Version: "0.1.0"}, "dreadnought-0.1.0.tgz", "http://example.com", "aaaa"); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ ind2 := NewIndexFile()
+
+ for _, x := range []struct {
+ md *chart.Metadata
+ filename string
+ baseURL string
+ digest string
+ }{
+ {&chart.Metadata{APIVersion: "v2", Name: "dreadnought", Version: "0.2.0"}, "dreadnought-0.2.0.tgz", "http://example.com", "aaaabbbb"},
+ {&chart.Metadata{APIVersion: "v2", Name: "doughnut", Version: "0.2.0"}, "doughnut-0.2.0.tgz", "http://example.com", "ccccbbbb"},
+ } {
+ if err := ind2.MustAdd(x.md, x.filename, x.baseURL, x.digest); err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ }
+
+ ind1.Merge(ind2)
+
+ if len(ind1.Entries) != 2 {
+ t.Errorf("Expected 2 entries, got %d", len(ind1.Entries))
+ }
+
+ vs := ind1.Entries["dreadnought"]
+ if len(vs) != 2 {
+ t.Errorf("Expected 2 versions, got %d", len(vs))
+ }
+
+ if v := vs[1]; v.Version != "0.2.0" {
+ t.Errorf("Expected %q version to be 0.2.0, got %s", v.Name, v.Version)
+ }
+
+}
+
+func TestDownloadIndexFile(t *testing.T) {
+ t.Run("should download index file", func(t *testing.T) {
+ srv, err := startLocalServerForTests(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer srv.Close()
+
+ r, err := NewChartRepository(&Entry{
+ Name: testRepo,
+ URL: srv.URL,
+ }, getter.All(&cli.EnvSettings{}))
+ if err != nil {
+ t.Errorf("Problem creating chart repository from %s: %v", testRepo, err)
+ }
+
+ idx, err := r.DownloadIndexFile()
+ if err != nil {
+ t.Fatalf("Failed to download index file to %s: %#v", idx, err)
+ }
+
+ if _, err := os.Stat(idx); err != nil {
+ t.Fatalf("error finding created index file: %#v", err)
+ }
+
+ i, err := LoadIndexFile(idx)
+ if err != nil {
+ t.Fatalf("Index %q failed to parse: %s", testfile, err)
+ }
+ verifyLocalIndex(t, i)
+
+ // Check that charts file is also created
+ idx = filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name))
+ if _, err := os.Stat(idx); err != nil {
+ t.Fatalf("error finding created charts file: %#v", err)
+ }
+
+ b, err := os.ReadFile(idx)
+ if err != nil {
+ t.Fatalf("error reading charts file: %#v", err)
+ }
+ verifyLocalChartsFile(t, b, i)
+ })
+
+ t.Run("should not decode the path in the repo url while downloading index", func(t *testing.T) {
+ chartRepoURLPath := "/some%2Fpath/test"
+ fileBytes, err := os.ReadFile("testdata/local-index.yaml")
+ if err != nil {
+ t.Fatal(err)
+ }
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.RawPath == chartRepoURLPath+"/index.yaml" {
+ w.Write(fileBytes)
+ }
+ })
+ srv, err := startLocalServerForTests(handler)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer srv.Close()
+
+ r, err := NewChartRepository(&Entry{
+ Name: testRepo,
+ URL: srv.URL + chartRepoURLPath,
+ }, getter.All(&cli.EnvSettings{}))
+ if err != nil {
+ t.Errorf("Problem creating chart repository from %s: %v", testRepo, err)
+ }
+
+ idx, err := r.DownloadIndexFile()
+ if err != nil {
+ t.Fatalf("Failed to download index file to %s: %#v", idx, err)
+ }
+
+ if _, err := os.Stat(idx); err != nil {
+ t.Fatalf("error finding created index file: %#v", err)
+ }
+
+ i, err := LoadIndexFile(idx)
+ if err != nil {
+ t.Fatalf("Index %q failed to parse: %s", testfile, err)
+ }
+ verifyLocalIndex(t, i)
+
+ // Check that charts file is also created
+ idx = filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name))
+ if _, err := os.Stat(idx); err != nil {
+ t.Fatalf("error finding created charts file: %#v", err)
+ }
+
+ b, err := os.ReadFile(idx)
+ if err != nil {
+ t.Fatalf("error reading charts file: %#v", err)
+ }
+ verifyLocalChartsFile(t, b, i)
+ })
+}
+
+func verifyLocalIndex(t *testing.T, i *IndexFile) {
+ t.Helper()
+ numEntries := len(i.Entries)
+ if numEntries != 3 {
+ t.Errorf("Expected 3 entries in index file but got %d", numEntries)
+ }
+
+ alpine, ok := i.Entries["alpine"]
+ if !ok {
+ t.Fatalf("'alpine' section not found.")
+ }
+
+ if l := len(alpine); l != 1 {
+ t.Fatalf("'alpine' should have 1 chart, got %d", l)
+ }
+
+ nginx, ok := i.Entries["nginx"]
+ if !ok || len(nginx) != 2 {
+ t.Fatalf("Expected 2 nginx entries")
+ }
+
+ expects := []*ChartVersion{
+ {
+ Metadata: &chart.Metadata{
+ APIVersion: "v2",
+ Name: "alpine",
+ Description: "string",
+ Version: "1.0.0",
+ Keywords: []string{"linux", "alpine", "small", "sumtin"},
+ Home: "https://github.com/something",
+ },
+ URLs: []string{
+ "https://charts.helm.sh/stable/alpine-1.0.0.tgz",
+ "http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz",
+ },
+ Digest: "sha256:1234567890abcdef",
+ },
+ {
+ Metadata: &chart.Metadata{
+ APIVersion: "v2",
+ Name: "nginx",
+ Description: "string",
+ Version: "0.2.0",
+ Keywords: []string{"popular", "web server", "proxy"},
+ Home: "https://github.com/something/else",
+ },
+ URLs: []string{
+ "https://charts.helm.sh/stable/nginx-0.2.0.tgz",
+ },
+ Digest: "sha256:1234567890abcdef",
+ },
+ {
+ Metadata: &chart.Metadata{
+ APIVersion: "v2",
+ Name: "nginx",
+ Description: "string",
+ Version: "0.1.0",
+ Keywords: []string{"popular", "web server", "proxy"},
+ Home: "https://github.com/something",
+ },
+ URLs: []string{
+ "https://charts.helm.sh/stable/nginx-0.1.0.tgz",
+ },
+ Digest: "sha256:1234567890abcdef",
+ },
+ }
+ tests := []*ChartVersion{alpine[0], nginx[0], nginx[1]}
+
+ for i, tt := range tests {
+ expect := expects[i]
+ if tt.Name != expect.Name {
+ t.Errorf("Expected name %q, got %q", expect.Name, tt.Name)
+ }
+ if tt.Description != expect.Description {
+ t.Errorf("Expected description %q, got %q", expect.Description, tt.Description)
+ }
+ if tt.Version != expect.Version {
+ t.Errorf("Expected version %q, got %q", expect.Version, tt.Version)
+ }
+ if tt.Digest != expect.Digest {
+ t.Errorf("Expected digest %q, got %q", expect.Digest, tt.Digest)
+ }
+ if tt.Home != expect.Home {
+ t.Errorf("Expected home %q, got %q", expect.Home, tt.Home)
+ }
+
+ for i, url := range tt.URLs {
+ if url != expect.URLs[i] {
+ t.Errorf("Expected URL %q, got %q", expect.URLs[i], url)
+ }
+ }
+ for i, kw := range tt.Keywords {
+ if kw != expect.Keywords[i] {
+ t.Errorf("Expected keywords %q, got %q", expect.Keywords[i], kw)
+ }
+ }
+ }
+}
+
+func verifyLocalChartsFile(t *testing.T, chartsContent []byte, indexContent *IndexFile) {
+ t.Helper()
+ var expected, reald []string
+ for chart := range indexContent.Entries {
+ expected = append(expected, chart)
+ }
+ sort.Strings(expected)
+
+ scanner := bufio.NewScanner(bytes.NewReader(chartsContent))
+ for scanner.Scan() {
+ reald = append(reald, scanner.Text())
+ }
+ sort.Strings(reald)
+
+ if strings.Join(expected, " ") != strings.Join(reald, " ") {
+ t.Errorf("Cached charts file content unexpected. Expected:\n%s\ngot:\n%s", expected, reald)
+ }
+}
+
+func TestIndexDirectory(t *testing.T) {
+ dir := "testdata/repository"
+ index, err := IndexDirectory(dir, "http://localhost:8080")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if l := len(index.Entries); l != 3 {
+ t.Fatalf("Expected 3 entries, got %d", l)
+ }
+
+ // Other things test the entry generation more thoroughly. We just test a
+ // few fields.
+
+ corpus := []struct{ chartName, downloadLink string }{
+ {"frobnitz", "http://localhost:8080/frobnitz-1.2.3.tgz"},
+ {"zarthal", "http://localhost:8080/universe/zarthal-1.0.0.tgz"},
+ }
+
+ for _, test := range corpus {
+ cname := test.chartName
+ frobs, ok := index.Entries[cname]
+ if !ok {
+ t.Fatalf("Could not read chart %s", cname)
+ }
+
+ frob := frobs[0]
+ if frob.Digest == "" {
+ t.Errorf("Missing digest of file %s.", frob.Name)
+ }
+ if frob.URLs[0] != test.downloadLink {
+ t.Errorf("Unexpected URLs: %v", frob.URLs)
+ }
+ if frob.Name != cname {
+ t.Errorf("Expected %q, got %q", cname, frob.Name)
+ }
+ }
+}
+
+func TestIndexAdd(t *testing.T) {
+ i := NewIndexFile()
+
+ for _, x := range []struct {
+ md *chart.Metadata
+ filename string
+ baseURL string
+ digest string
+ }{
+
+ {&chart.Metadata{APIVersion: "v2", Name: "clipper", Version: "0.1.0"}, "clipper-0.1.0.tgz", "http://example.com/charts", "sha256:1234567890"},
+ {&chart.Metadata{APIVersion: "v2", Name: "alpine", Version: "0.1.0"}, "/home/charts/alpine-0.1.0.tgz", "http://example.com/charts", "sha256:1234567890"},
+ {&chart.Metadata{APIVersion: "v2", Name: "deis", Version: "0.1.0"}, "/home/charts/deis-0.1.0.tgz", "http://example.com/charts/", "sha256:1234567890"},
+ } {
+ if err := i.MustAdd(x.md, x.filename, x.baseURL, x.digest); err != nil {
+ t.Errorf("unexpected error adding to index: %s", err)
+ }
+ }
+
+ if i.Entries["clipper"][0].URLs[0] != "http://example.com/charts/clipper-0.1.0.tgz" {
+ t.Errorf("Expected http://example.com/charts/clipper-0.1.0.tgz, got %s", i.Entries["clipper"][0].URLs[0])
+ }
+ if i.Entries["alpine"][0].URLs[0] != "http://example.com/charts/alpine-0.1.0.tgz" {
+ t.Errorf("Expected http://example.com/charts/alpine-0.1.0.tgz, got %s", i.Entries["alpine"][0].URLs[0])
+ }
+ if i.Entries["deis"][0].URLs[0] != "http://example.com/charts/deis-0.1.0.tgz" {
+ t.Errorf("Expected http://example.com/charts/deis-0.1.0.tgz, got %s", i.Entries["deis"][0].URLs[0])
+ }
+
+ // test error condition
+ if err := i.MustAdd(&chart.Metadata{}, "error-0.1.0.tgz", "", ""); err == nil {
+ t.Fatal("expected error adding to index")
+ }
+}
+
+func TestIndexWrite(t *testing.T) {
+ i := NewIndexFile()
+ if err := i.MustAdd(&chart.Metadata{APIVersion: "v2", Name: "clipper", Version: "0.1.0"}, "clipper-0.1.0.tgz", "http://example.com/charts", "sha256:1234567890"); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ dir := t.TempDir()
+ testpath := filepath.Join(dir, "test")
+ i.WriteFile(testpath, 0600)
+
+ got, err := os.ReadFile(testpath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !strings.Contains(string(got), "clipper-0.1.0.tgz") {
+ t.Fatal("Index files doesn't contain expected content")
+ }
+}
+
+func TestIndexJSONWrite(t *testing.T) {
+ i := NewIndexFile()
+ if err := i.MustAdd(&chart.Metadata{APIVersion: "v2", Name: "clipper", Version: "0.1.0"}, "clipper-0.1.0.tgz", "http://example.com/charts", "sha256:1234567890"); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ dir := t.TempDir()
+ testpath := filepath.Join(dir, "test")
+ i.WriteJSONFile(testpath, 0600)
+
+ got, err := os.ReadFile(testpath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !json.Valid(got) {
+ t.Fatal("Index files doesn't contain valid JSON")
+ }
+ if !strings.Contains(string(got), "clipper-0.1.0.tgz") {
+ t.Fatal("Index files doesn't contain expected content")
+ }
+}
+
+func TestAddFileIndexEntriesNil(t *testing.T) {
+ i := NewIndexFile()
+ i.APIVersion = chart.APIVersionV1
+ i.Entries = nil
+ for _, x := range []struct {
+ md *chart.Metadata
+ filename string
+ baseURL string
+ digest string
+ }{
+ {&chart.Metadata{APIVersion: "v2", Name: " ", Version: "8033-5.apinie+s.r"}, "setter-0.1.9+beta.tgz", "http://example.com/charts", "sha256:1234567890abc"},
+ } {
+ if err := i.MustAdd(x.md, x.filename, x.baseURL, x.digest); err == nil {
+ t.Errorf("expected err to be non-nil when entries not initialized")
+ }
+ }
+}
+
+func TestIgnoreSkippableChartValidationError(t *testing.T) {
+ type TestCase struct {
+ Input error
+ ErrorSkipped bool
+ }
+ testCases := map[string]TestCase{
+ "nil": {
+ Input: nil,
+ },
+ "generic_error": {
+ Input: fmt.Errorf("foo"),
+ },
+ "non_skipped_validation_error": {
+ Input: chart.ValidationError("chart.metadata.type must be application or library"),
+ },
+ "skipped_validation_error": {
+ Input: chart.ValidationErrorf("more than one dependency with name or alias %q", "foo"),
+ ErrorSkipped: true,
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ result := ignoreSkippableChartValidationError(tc.Input)
+
+ if tc.Input == nil {
+ if result != nil {
+ t.Error("expected nil result for nil input")
+ }
+ return
+ }
+
+ if tc.ErrorSkipped {
+ if result != nil {
+ t.Error("expected nil result for skipped error")
+ }
+ return
+ }
+
+ if tc.Input != result {
+ t.Error("expected the result equal to input")
+ }
+
+ })
+ }
+}
+
+var indexWithDuplicatesInChartDeps = `
+apiVersion: v1
+entries:
+ nginx:
+ - urls:
+ - https://charts.helm.sh/stable/alpine-1.0.0.tgz
+ - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz
+ name: alpine
+ description: string
+ home: https://github.com/something
+ digest: "sha256:1234567890abcdef"
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.2.0.tgz
+ name: nginx
+ description: string
+ version: 0.2.0
+ home: https://github.com/something/else
+ digest: "sha256:1234567890abcdef"
+`
+var indexWithDuplicatesInLastChartDeps = `
+apiVersion: v1
+entries:
+ nginx:
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.2.0.tgz
+ name: nginx
+ description: string
+ version: 0.2.0
+ home: https://github.com/something/else
+ digest: "sha256:1234567890abcdef"
+ - urls:
+ - https://charts.helm.sh/stable/alpine-1.0.0.tgz
+ - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz
+ name: alpine
+ description: string
+ home: https://github.com/something
+ digest: "sha256:111"
+`
+
+func TestLoadIndex_DuplicateChartDeps(t *testing.T) {
+ tests := []struct {
+ source string
+ data string
+ }{
+ {
+ source: "indexWithDuplicatesInChartDeps",
+ data: indexWithDuplicatesInChartDeps,
+ },
+ {
+ source: "indexWithDuplicatesInLastChartDeps",
+ data: indexWithDuplicatesInLastChartDeps,
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.source, func(t *testing.T) {
+ idx, err := loadIndex([]byte(tc.data), tc.source)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ cvs := idx.Entries["nginx"]
+ if cvs == nil {
+ t.Error("expected one chart version not to be filtered out")
+ }
+ for _, v := range cvs {
+ if v.Name == "alpine" {
+ t.Error("malformed version was not filtered out")
+ }
+ }
+ })
+ }
+}
diff --git a/helm/pkg/repo/v1/repo.go b/helm/pkg/repo/v1/repo.go
new file mode 100644
index 000000000..38d2b0ca1
--- /dev/null
+++ b/helm/pkg/repo/v1/repo.go
@@ -0,0 +1,125 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repo // import "helm.sh/helm/v4/pkg/repo/v1"
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "sigs.k8s.io/yaml"
+)
+
+// File represents the repositories.yaml file
+type File struct {
+ APIVersion string `json:"apiVersion"`
+ Generated time.Time `json:"generated"`
+ Repositories []*Entry `json:"repositories"`
+}
+
+// NewFile generates an empty repositories file.
+//
+// Generated and APIVersion are automatically set.
+func NewFile() *File {
+ return &File{
+ APIVersion: APIVersionV1,
+ Generated: time.Now(),
+ Repositories: []*Entry{},
+ }
+}
+
+// LoadFile takes a file at the given path and returns a File object
+func LoadFile(path string) (*File, error) {
+ r := new(File)
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return r, fmt.Errorf("couldn't load repositories file (%s): %w", path, err)
+ }
+
+ err = yaml.Unmarshal(b, r)
+ return r, err
+}
+
+// Add adds one or more repo entries to a repo file.
+func (r *File) Add(re ...*Entry) {
+ r.Repositories = append(r.Repositories, re...)
+}
+
+// Update attempts to replace one or more repo entries in a repo file. If an
+// entry with the same name doesn't exist in the repo file it will add it.
+func (r *File) Update(re ...*Entry) {
+ for _, target := range re {
+ r.update(target)
+ }
+}
+
+func (r *File) update(e *Entry) {
+ for j, repo := range r.Repositories {
+ if repo.Name == e.Name {
+ r.Repositories[j] = e
+ return
+ }
+ }
+ r.Add(e)
+}
+
+// Has returns true if the given name is already a repository name.
+func (r *File) Has(name string) bool {
+ entry := r.Get(name)
+ return entry != nil
+}
+
+// Get returns an entry with the given name if it exists, otherwise returns nil
+func (r *File) Get(name string) *Entry {
+ for _, entry := range r.Repositories {
+ if entry.Name == name {
+ return entry
+ }
+ }
+ return nil
+}
+
+// Remove removes the entry from the list of repositories.
+func (r *File) Remove(name string) bool {
+ cp := []*Entry{}
+ found := false
+ for _, rf := range r.Repositories {
+ if rf == nil {
+ continue
+ }
+ if rf.Name == name {
+ found = true
+ continue
+ }
+ cp = append(cp, rf)
+ }
+ r.Repositories = cp
+ return found
+}
+
+// WriteFile writes a repositories file to the given path.
+func (r *File) WriteFile(path string, perm os.FileMode) error {
+ data, err := yaml.Marshal(r)
+ if err != nil {
+ return err
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ return os.WriteFile(path, data, perm)
+}
diff --git a/helm/pkg/repo/v1/repo_test.go b/helm/pkg/repo/v1/repo_test.go
new file mode 100644
index 000000000..bdaa61eda
--- /dev/null
+++ b/helm/pkg/repo/v1/repo_test.go
@@ -0,0 +1,257 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repo
+
+import (
+ "os"
+ "strings"
+ "testing"
+)
+
+const testRepositoriesFile = "testdata/repositories.yaml"
+
+func TestFile(t *testing.T) {
+ rf := NewFile()
+ rf.Add(
+ &Entry{
+ Name: "stable",
+ URL: "https://example.com/stable/charts",
+ },
+ &Entry{
+ Name: "incubator",
+ URL: "https://example.com/incubator",
+ },
+ )
+
+ if len(rf.Repositories) != 2 {
+ t.Fatal("Expected 2 repositories")
+ }
+
+ if rf.Has("nosuchrepo") {
+ t.Error("Found nonexistent repo")
+ }
+ if !rf.Has("incubator") {
+ t.Error("incubator repo is missing")
+ }
+
+ stable := rf.Repositories[0]
+ if stable.Name != "stable" {
+ t.Error("stable is not named stable")
+ }
+ if stable.URL != "https://example.com/stable/charts" {
+ t.Error("Wrong URL for stable")
+ }
+}
+
+func TestNewFile(t *testing.T) {
+ expects := NewFile()
+ expects.Add(
+ &Entry{
+ Name: "stable",
+ URL: "https://example.com/stable/charts",
+ },
+ &Entry{
+ Name: "incubator",
+ URL: "https://example.com/incubator",
+ },
+ )
+
+ file, err := LoadFile(testRepositoriesFile)
+ if err != nil {
+ t.Errorf("%q could not be loaded: %s", testRepositoriesFile, err)
+ }
+
+ if len(expects.Repositories) != len(file.Repositories) {
+ t.Fatalf("Unexpected repo data: %#v", file.Repositories)
+ }
+
+ for i, expect := range expects.Repositories {
+ got := file.Repositories[i]
+ if expect.Name != got.Name {
+ t.Errorf("Expected name %q, got %q", expect.Name, got.Name)
+ }
+ if expect.URL != got.URL {
+ t.Errorf("Expected url %q, got %q", expect.URL, got.URL)
+ }
+ }
+}
+
+func TestRepoFile_Get(t *testing.T) {
+ repo := NewFile()
+ repo.Add(
+ &Entry{
+ Name: "first",
+ URL: "https://example.com/first",
+ },
+ &Entry{
+ Name: "second",
+ URL: "https://example.com/second",
+ },
+ &Entry{
+ Name: "third",
+ URL: "https://example.com/third",
+ },
+ &Entry{
+ Name: "fourth",
+ URL: "https://example.com/fourth",
+ },
+ )
+
+ name := "second"
+
+ entry := repo.Get(name)
+ if entry == nil { //nolint:staticcheck
+ t.Fatalf("Expected repo entry %q to be found", name)
+ }
+
+ if entry.URL != "https://example.com/second" { //nolint:staticcheck
+ t.Errorf("Expected repo URL to be %q but got %q", "https://example.com/second", entry.URL)
+ }
+
+ entry = repo.Get("nonexistent")
+ if entry != nil {
+ t.Errorf("Got unexpected entry %+v", entry)
+ }
+}
+
+func TestRemoveRepository(t *testing.T) {
+ sampleRepository := NewFile()
+ sampleRepository.Add(
+ &Entry{
+ Name: "stable",
+ URL: "https://example.com/stable/charts",
+ },
+ &Entry{
+ Name: "incubator",
+ URL: "https://example.com/incubator",
+ },
+ )
+
+ removeRepository := "stable"
+ found := sampleRepository.Remove(removeRepository)
+ if !found {
+ t.Errorf("expected repository %s not found", removeRepository)
+ }
+
+ found = sampleRepository.Has(removeRepository)
+ if found {
+ t.Errorf("repository %s not deleted", removeRepository)
+ }
+}
+
+func TestUpdateRepository(t *testing.T) {
+ sampleRepository := NewFile()
+ sampleRepository.Add(
+ &Entry{
+ Name: "stable",
+ URL: "https://example.com/stable/charts",
+ },
+ &Entry{
+ Name: "incubator",
+ URL: "https://example.com/incubator",
+ },
+ )
+ newRepoName := "sample"
+ sampleRepository.Update(&Entry{Name: newRepoName,
+ URL: "https://example.com/sample",
+ })
+
+ if !sampleRepository.Has(newRepoName) {
+ t.Errorf("expected repository %s not found", newRepoName)
+ }
+ repoCount := len(sampleRepository.Repositories)
+
+ sampleRepository.Update(&Entry{Name: newRepoName,
+ URL: "https://example.com/sample",
+ })
+
+ if repoCount != len(sampleRepository.Repositories) {
+ t.Errorf("invalid number of repositories found %d, expected number of repositories %d", len(sampleRepository.Repositories), repoCount)
+ }
+}
+
+func TestWriteFile(t *testing.T) {
+ sampleRepository := NewFile()
+ sampleRepository.Add(
+ &Entry{
+ Name: "stable",
+ URL: "https://example.com/stable/charts",
+ },
+ &Entry{
+ Name: "incubator",
+ URL: "https://example.com/incubator",
+ },
+ )
+
+ file, err := os.CreateTemp(t.TempDir(), "helm-repo")
+ if err != nil {
+ t.Errorf("failed to create test-file (%v)", err)
+ }
+ defer os.Remove(file.Name())
+ if err := sampleRepository.WriteFile(file.Name(), 0600); err != nil {
+ t.Errorf("failed to write file (%v)", err)
+ }
+
+ repos, err := LoadFile(file.Name())
+ if err != nil {
+ t.Errorf("failed to load file (%v)", err)
+ }
+ for _, repo := range sampleRepository.Repositories {
+ if !repos.Has(repo.Name) {
+ t.Errorf("expected repository %s not found", repo.Name)
+ }
+ }
+}
+
+func TestRepoNotExists(t *testing.T) {
+ if _, err := LoadFile("/this/path/does/not/exist.yaml"); err == nil {
+ t.Errorf("expected err to be non-nil when path does not exist")
+ } else if !strings.Contains(err.Error(), "couldn't load repositories file") {
+ t.Errorf("expected prompt `couldn't load repositories file`")
+ }
+}
+
+func TestRemoveRepositoryInvalidEntries(t *testing.T) {
+ sampleRepository := NewFile()
+ sampleRepository.Add(
+ &Entry{
+ Name: "stable",
+ URL: "https://example.com/stable/charts",
+ },
+ &Entry{
+ Name: "incubator",
+ URL: "https://example.com/incubator",
+ },
+ &Entry{},
+ nil,
+ &Entry{
+ Name: "test",
+ URL: "https://example.com/test",
+ },
+ )
+
+ removeRepository := "stable"
+ found := sampleRepository.Remove(removeRepository)
+ if !found {
+ t.Errorf("expected repository %s not found", removeRepository)
+ }
+
+ found = sampleRepository.Has(removeRepository)
+ if found {
+ t.Errorf("repository %s not deleted", removeRepository)
+ }
+}
diff --git a/helm/pkg/repo/v1/repotest/doc.go b/helm/pkg/repo/v1/repotest/doc.go
new file mode 100644
index 000000000..c01daad64
--- /dev/null
+++ b/helm/pkg/repo/v1/repotest/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package repotest provides utilities for testing.
+
+The server provides a testing server that can be set up and torn down quickly.
+*/
+package repotest
diff --git a/helm/pkg/repo/v1/repotest/server.go b/helm/pkg/repo/v1/repotest/server.go
new file mode 100644
index 000000000..12b96de5a
--- /dev/null
+++ b/helm/pkg/repo/v1/repotest/server.go
@@ -0,0 +1,409 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repotest
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/distribution/distribution/v3/configuration"
+ "github.com/distribution/distribution/v3/registry"
+ _ "github.com/distribution/distribution/v3/registry/auth/htpasswd" // used for docker test registry
+ _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" // used for docker test registry
+ "golang.org/x/crypto/bcrypt"
+ "sigs.k8s.io/yaml"
+
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ ociRegistry "helm.sh/helm/v4/pkg/registry"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+func BasicAuthMiddleware(t *testing.T) http.HandlerFunc {
+ t.Helper()
+ return http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
+ username, password, ok := r.BasicAuth()
+ if !ok || username != "username" || password != "password" {
+ t.Errorf("Expected request to use basic auth and for username == 'username' and password == 'password', got '%v', '%s', '%s'", ok, username, password)
+ }
+ })
+}
+
+type ServerOption func(*testing.T, *Server)
+
+func WithTLSConfig(tlsConfig *tls.Config) ServerOption {
+ return func(_ *testing.T, server *Server) {
+ server.tlsConfig = tlsConfig
+ }
+}
+
+func WithMiddleware(middleware http.HandlerFunc) ServerOption {
+ return func(_ *testing.T, server *Server) {
+ server.middleware = middleware
+ }
+}
+
+func WithChartSourceGlob(glob string) ServerOption {
+ return func(_ *testing.T, server *Server) {
+ server.chartSourceGlob = glob
+ }
+}
+
+// Server is an implementation of a repository server for testing.
+type Server struct {
+ docroot string
+ srv *httptest.Server
+ middleware http.HandlerFunc
+ tlsConfig *tls.Config
+ chartSourceGlob string
+}
+
+// NewTempServer creates a server inside of a temp dir.
+//
+// If the passed in string is not "", it will be treated as a shell glob, and files
+// will be copied from that path to the server's docroot.
+//
+// The server is started automatically. The caller is responsible for stopping
+// the server.
+//
+// The temp dir will be removed by testing package automatically when test finished.
+func NewTempServer(t *testing.T, options ...ServerOption) *Server {
+ t.Helper()
+ docrootTempDir := t.TempDir()
+
+ srv := newServer(t, docrootTempDir, options...)
+
+ t.Cleanup(func() { os.RemoveAll(srv.docroot) })
+
+ if srv.chartSourceGlob != "" {
+ if _, err := srv.CopyCharts(srv.chartSourceGlob); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ return srv
+}
+
+// Create the server, but don't yet start it
+func newServer(t *testing.T, docroot string, options ...ServerOption) *Server {
+ t.Helper()
+ absdocroot, err := filepath.Abs(docroot)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s := &Server{
+ docroot: absdocroot,
+ }
+
+ for _, option := range options {
+ option(t, s)
+ }
+
+ s.srv = httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if s.middleware != nil {
+ s.middleware.ServeHTTP(w, r)
+ }
+ http.FileServer(http.Dir(s.Root())).ServeHTTP(w, r)
+ }))
+
+ s.start()
+
+ // Add the testing repository as the only repo. Server must be started for the server's URL to be valid
+ if err := setTestingRepository(s.URL(), filepath.Join(s.docroot, "repositories.yaml")); err != nil {
+ t.Fatal(err)
+ }
+
+ return s
+}
+
+type OCIServer struct {
+ *registry.Registry
+ RegistryURL string
+ Dir string
+ TestUsername string
+ TestPassword string
+ Client *ociRegistry.Client
+}
+
+type OCIServerRunConfig struct {
+ DependingChart *chart.Chart
+}
+
+type OCIServerOpt func(config *OCIServerRunConfig)
+
+func WithDependingChart(c *chart.Chart) OCIServerOpt {
+ return func(config *OCIServerRunConfig) {
+ config.DependingChart = c
+ }
+}
+
+func NewOCIServer(t *testing.T, dir string) (*OCIServer, error) {
+ t.Helper()
+ testHtpasswdFileBasename := "authtest.htpasswd"
+ testUsername, testPassword := "username", "password"
+
+ pwBytes, err := bcrypt.GenerateFromPassword([]byte(testPassword), bcrypt.DefaultCost)
+ if err != nil {
+ t.Fatal("error generating bcrypt password for test htpasswd file")
+ }
+ htpasswdPath := filepath.Join(dir, testHtpasswdFileBasename)
+ err = os.WriteFile(htpasswdPath, fmt.Appendf(nil, "%s:%s\n", testUsername, string(pwBytes)), 0o644)
+ if err != nil {
+ t.Fatalf("error creating test htpasswd file")
+ }
+
+ // Registry config
+ config := &configuration.Configuration{}
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("error finding free port for test registry")
+ }
+ defer ln.Close()
+
+ port := ln.Addr().(*net.TCPAddr).Port
+ config.HTTP.Addr = ln.Addr().String()
+ config.HTTP.DrainTimeout = time.Duration(10) * time.Second
+ config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}}
+ config.Auth = configuration.Auth{
+ "htpasswd": configuration.Parameters{
+ "realm": "localhost",
+ "path": htpasswdPath,
+ },
+ }
+
+ registryURL := fmt.Sprintf("localhost:%d", port)
+
+ r, err := registry.NewRegistry(t.Context(), config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return &OCIServer{
+ Registry: r,
+ RegistryURL: registryURL,
+ TestUsername: testUsername,
+ TestPassword: testPassword,
+ Dir: dir,
+ }, nil
+}
+
+func (srv *OCIServer) Run(t *testing.T, opts ...OCIServerOpt) {
+ t.Helper()
+ cfg := &OCIServerRunConfig{}
+ for _, fn := range opts {
+ fn(cfg)
+ }
+
+ go srv.ListenAndServe()
+
+ credentialsFile := filepath.Join(srv.Dir, "config.json")
+
+ // init test client
+ registryClient, err := ociRegistry.NewClient(
+ ociRegistry.ClientOptDebug(true),
+ ociRegistry.ClientOptEnableCache(true),
+ ociRegistry.ClientOptWriter(os.Stdout),
+ ociRegistry.ClientOptCredentialsFile(credentialsFile),
+ )
+ if err != nil {
+ t.Fatalf("error creating registry client")
+ }
+
+ err = registryClient.Login(
+ srv.RegistryURL,
+ ociRegistry.LoginOptBasicAuth(srv.TestUsername, srv.TestPassword),
+ ociRegistry.LoginOptInsecure(true),
+ ociRegistry.LoginOptPlainText(true))
+ if err != nil {
+ t.Fatalf("error logging into registry with good credentials: %v", err)
+ }
+
+ ref := fmt.Sprintf("%s/u/ocitestuser/oci-dependent-chart:0.1.0", srv.RegistryURL)
+
+ err = chartutil.ExpandFile(srv.Dir, filepath.Join(srv.Dir, "oci-dependent-chart-0.1.0.tgz"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // valid chart
+ ch, err := loader.LoadDir(filepath.Join(srv.Dir, "oci-dependent-chart"))
+ if err != nil {
+ t.Fatal("error loading chart")
+ }
+
+ err = os.RemoveAll(filepath.Join(srv.Dir, "oci-dependent-chart"))
+ if err != nil {
+ t.Fatal("error removing chart before push")
+ }
+
+ // save it back to disk..
+ absPath, err := chartutil.Save(ch, srv.Dir)
+ if err != nil {
+ t.Fatal("could not create chart archive")
+ }
+
+ // load it into memory...
+ contentBytes, err := os.ReadFile(absPath)
+ if err != nil {
+ t.Fatal("could not load chart into memory")
+ }
+
+ result, err := registryClient.Push(contentBytes, ref)
+ if err != nil {
+ t.Fatalf("error pushing dependent chart: %s", err)
+ }
+ t.Logf("Manifest.Digest: %s, Manifest.Size: %d, "+
+ "Config.Digest: %s, Config.Size: %d, "+
+ "Chart.Digest: %s, Chart.Size: %d",
+ result.Manifest.Digest, result.Manifest.Size,
+ result.Config.Digest, result.Config.Size,
+ result.Chart.Digest, result.Chart.Size)
+
+ srv.Client = registryClient
+ c := cfg.DependingChart
+ if c == nil {
+ return
+ }
+
+ dependingRef := fmt.Sprintf("%s/u/ocitestuser/%s:%s",
+ srv.RegistryURL, c.Metadata.Name, c.Metadata.Version)
+
+ // load it into memory...
+ absPath = filepath.Join(srv.Dir,
+ fmt.Sprintf("%s-%s.tgz", c.Metadata.Name, c.Metadata.Version))
+ contentBytes, err = os.ReadFile(absPath)
+ if err != nil {
+ t.Fatal("could not load chart into memory")
+ }
+
+ result, err = registryClient.Push(contentBytes, dependingRef)
+ if err != nil {
+ t.Fatalf("error pushing depending chart: %s", err)
+ }
+ t.Logf("Manifest.Digest: %s, Manifest.Size: %d, "+
+ "Config.Digest: %s, Config.Size: %d, "+
+ "Chart.Digest: %s, Chart.Size: %d",
+ result.Manifest.Digest, result.Manifest.Size,
+ result.Config.Digest, result.Config.Size,
+ result.Chart.Digest, result.Chart.Size)
+}
+
+// Root gets the docroot for the server.
+func (s *Server) Root() string {
+ return s.docroot
+}
+
+// CopyCharts takes a glob expression and copies those charts to the server root.
+func (s *Server) CopyCharts(origin string) ([]string, error) {
+ files, err := filepath.Glob(origin)
+ if err != nil {
+ return []string{}, err
+ }
+ copied := make([]string, len(files))
+ for i, f := range files {
+ base := filepath.Base(f)
+ newname := filepath.Join(s.docroot, base)
+ data, err := os.ReadFile(f)
+ if err != nil {
+ return []string{}, err
+ }
+ if err := os.WriteFile(newname, data, 0o644); err != nil {
+ return []string{}, err
+ }
+ copied[i] = newname
+ }
+
+ err = s.CreateIndex()
+ return copied, err
+}
+
+// CreateIndex will read docroot and generate an index.yaml file.
+func (s *Server) CreateIndex() error {
+ // generate the index
+ index, err := repo.IndexDirectory(s.docroot, s.URL())
+ if err != nil {
+ return err
+ }
+
+ d, err := yaml.Marshal(index)
+ if err != nil {
+ return err
+ }
+
+ ifile := filepath.Join(s.docroot, "index.yaml")
+ return os.WriteFile(ifile, d, 0o644)
+}
+
+func (s *Server) start() {
+ if s.tlsConfig != nil {
+ s.srv.TLS = s.tlsConfig
+ s.srv.StartTLS()
+ } else {
+ s.srv.Start()
+ }
+}
+
+// Stop stops the server and closes all connections.
+//
+// It should be called explicitly.
+func (s *Server) Stop() {
+ s.srv.Close()
+}
+
+// URL returns the URL of the server.
+//
+// Example:
+//
+// http://localhost:1776
+func (s *Server) URL() string {
+ return s.srv.URL
+}
+
+func (s *Server) Client() *http.Client {
+ return s.srv.Client()
+}
+
+// LinkIndices links the index created with CreateIndex and makes a symbolic link to the cache index.
+//
+// This makes it possible to simulate a local cache of a repository.
+func (s *Server) LinkIndices() error {
+ lstart := filepath.Join(s.docroot, "index.yaml")
+ ldest := filepath.Join(s.docroot, "test-index.yaml")
+ return os.Symlink(lstart, ldest)
+}
+
+// setTestingRepository sets up a testing repository.yaml with only the given URL.
+func setTestingRepository(url, fname string) error {
+ if url == "" {
+ panic("no url")
+ }
+
+ r := repo.NewFile()
+ r.Add(&repo.Entry{
+ Name: "test",
+ URL: url,
+ })
+ return r.WriteFile(fname, 0o640)
+}
diff --git a/helm/pkg/repo/v1/repotest/server_test.go b/helm/pkg/repo/v1/repotest/server_test.go
new file mode 100644
index 000000000..f0e374fc0
--- /dev/null
+++ b/helm/pkg/repo/v1/repotest/server_test.go
@@ -0,0 +1,222 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repotest
+
+import (
+ "io"
+ "net/http"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "sigs.k8s.io/yaml"
+
+ "helm.sh/helm/v4/internal/test/ensure"
+ "helm.sh/helm/v4/pkg/repo/v1"
+)
+
+// Young'n, in these here parts, we test our tests.
+
+func TestServer(t *testing.T) {
+ ensure.HelmHome(t)
+
+ rootDir := t.TempDir()
+
+ srv := newServer(t, rootDir)
+ defer srv.Stop()
+
+ c, err := srv.CopyCharts("testdata/*.tgz")
+ if err != nil {
+ // Some versions of Go don't correctly fire defer on Fatal.
+ t.Fatal(err)
+ }
+
+ if len(c) != 1 {
+ t.Errorf("Unexpected chart count: %d", len(c))
+ }
+
+ if filepath.Base(c[0]) != "examplechart-0.1.0.tgz" {
+ t.Errorf("Unexpected chart: %s", c[0])
+ }
+
+ res, err := http.Get(srv.URL() + "/examplechart-0.1.0.tgz")
+ res.Body.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.ContentLength < 500 {
+ t.Errorf("Expected at least 500 bytes of data, got %d", res.ContentLength)
+ }
+
+ res, err = http.Get(srv.URL() + "/index.yaml")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data, err := io.ReadAll(res.Body)
+ res.Body.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ m := repo.NewIndexFile()
+ if err := yaml.Unmarshal(data, m); err != nil {
+ t.Fatal(err)
+ }
+
+ if l := len(m.Entries); l != 1 {
+ t.Fatalf("Expected 1 entry, got %d", l)
+ }
+
+ expect := "examplechart"
+ if !m.Has(expect, "0.1.0") {
+ t.Errorf("missing %q", expect)
+ }
+
+ res, err = http.Get(srv.URL() + "/index.yaml-nosuchthing")
+ res.Body.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.StatusCode != http.StatusNotFound {
+ t.Fatalf("Expected 404, got %d", res.StatusCode)
+ }
+}
+
+func TestNewTempServer(t *testing.T) {
+ ensure.HelmHome(t)
+
+ type testCase struct {
+ options []ServerOption
+ }
+
+ testCases := map[string]testCase{
+ "plainhttp": {
+ options: []ServerOption{
+ WithChartSourceGlob("testdata/examplechart-0.1.0.tgz"),
+ },
+ },
+ "tls": {
+ options: []ServerOption{
+ WithChartSourceGlob("testdata/examplechart-0.1.0.tgz"),
+ WithTLSConfig(MakeTestTLSConfig(t, "../../../../testdata")),
+ },
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ srv := NewTempServer(
+ t,
+ tc.options...,
+ )
+ defer srv.Stop()
+
+ if srv.srv.URL == "" {
+ t.Fatal("unstarted server")
+ }
+
+ client := srv.Client()
+
+ {
+ res, err := client.Head(srv.URL() + "/repositories.yaml")
+ if err != nil {
+ t.Error(err)
+ }
+
+ res.Body.Close()
+
+ if res.StatusCode != http.StatusOK {
+ t.Errorf("Expected 200, got %d", res.StatusCode)
+ }
+
+ }
+
+ {
+ res, err := client.Head(srv.URL() + "/examplechart-0.1.0.tgz")
+ if err != nil {
+ t.Error(err)
+ }
+ res.Body.Close()
+
+ if res.StatusCode != http.StatusOK {
+ t.Errorf("Expected 200, got %d", res.StatusCode)
+ }
+ }
+
+ res, err := client.Get(srv.URL() + "/examplechart-0.1.0.tgz")
+ res.Body.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.ContentLength < 500 {
+ t.Errorf("Expected at least 500 bytes of data, got %d", res.ContentLength)
+ }
+
+ res, err = client.Get(srv.URL() + "/index.yaml")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data, err := io.ReadAll(res.Body)
+ res.Body.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ m := repo.NewIndexFile()
+ if err := yaml.Unmarshal(data, m); err != nil {
+ t.Fatal(err)
+ }
+
+ if l := len(m.Entries); l != 1 {
+ t.Fatalf("Expected 1 entry, got %d", l)
+ }
+
+ expect := "examplechart"
+ if !m.Has(expect, "0.1.0") {
+ t.Errorf("missing %q", expect)
+ }
+
+ res, err = client.Get(srv.URL() + "/index.yaml-nosuchthing")
+ res.Body.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.StatusCode != http.StatusNotFound {
+ t.Fatalf("Expected 404, got %d", res.StatusCode)
+ }
+ })
+ }
+
+}
+
+func TestNewTempServer_TLS(t *testing.T) {
+ ensure.HelmHome(t)
+
+ srv := NewTempServer(
+ t,
+ WithChartSourceGlob("testdata/examplechart-0.1.0.tgz"),
+ WithTLSConfig(MakeTestTLSConfig(t, "../../../../testdata")),
+ )
+ defer srv.Stop()
+
+ if !strings.HasPrefix(srv.URL(), "https://") {
+ t.Fatal("non-TLS server")
+ }
+}
diff --git a/helm/pkg/repo/v1/repotest/testdata/examplechart-0.1.0.tgz b/helm/pkg/repo/v1/repotest/testdata/examplechart-0.1.0.tgz
new file mode 100644
index 000000000..c5ea741eb
Binary files /dev/null and b/helm/pkg/repo/v1/repotest/testdata/examplechart-0.1.0.tgz differ
diff --git a/helm/pkg/repo/v1/repotest/testdata/examplechart/.helmignore b/helm/pkg/repo/v1/repotest/testdata/examplechart/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/pkg/repo/v1/repotest/testdata/examplechart/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/pkg/repo/v1/repotest/testdata/examplechart/Chart.yaml b/helm/pkg/repo/v1/repotest/testdata/examplechart/Chart.yaml
new file mode 100644
index 000000000..a7d297285
--- /dev/null
+++ b/helm/pkg/repo/v1/repotest/testdata/examplechart/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: examplechart
+version: 0.1.0
diff --git a/helm/pkg/repo/v1/repotest/testdata/examplechart/values.yaml b/helm/pkg/repo/v1/repotest/testdata/examplechart/values.yaml
new file mode 100644
index 000000000..5170c61e3
--- /dev/null
+++ b/helm/pkg/repo/v1/repotest/testdata/examplechart/values.yaml
@@ -0,0 +1,4 @@
+# Default values for examplechart.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
diff --git a/helm/pkg/repo/v1/repotest/tlsconfig.go b/helm/pkg/repo/v1/repotest/tlsconfig.go
new file mode 100644
index 000000000..3ea7338ff
--- /dev/null
+++ b/helm/pkg/repo/v1/repotest/tlsconfig.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package repotest
+
+import (
+ "crypto/tls"
+ "path/filepath"
+ "testing"
+
+ "helm.sh/helm/v4/internal/tlsutil"
+
+ "github.com/stretchr/testify/require"
+)
+
+func MakeTestTLSConfig(t *testing.T, path string) *tls.Config {
+ t.Helper()
+ ca, pub, priv := filepath.Join(path, "rootca.crt"), filepath.Join(path, "crt.pem"), filepath.Join(path, "key.pem")
+
+ insecure := false
+ tlsConf, err := tlsutil.NewTLSConfig(
+ tlsutil.WithInsecureSkipVerify(insecure),
+ tlsutil.WithCertKeyPairFiles(pub, priv),
+ tlsutil.WithCAFile(ca),
+ )
+ //require.Nil(t, err, err.Error())
+ require.Nil(t, err)
+
+ tlsConf.ServerName = "helm.sh"
+
+ return tlsConf
+}
diff --git a/helm/pkg/repo/v1/testdata/chartmuseum-index.yaml b/helm/pkg/repo/v1/testdata/chartmuseum-index.yaml
new file mode 100644
index 000000000..349a529aa
--- /dev/null
+++ b/helm/pkg/repo/v1/testdata/chartmuseum-index.yaml
@@ -0,0 +1,54 @@
+serverInfo:
+ contextPath: /v1/helm
+apiVersion: v1
+entries:
+ nginx:
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.2.0.tgz
+ name: nginx
+ description: string
+ version: 0.2.0
+ home: https://github.com/something/else
+ digest: "sha256:1234567890abcdef"
+ keywords:
+ - popular
+ - web server
+ - proxy
+ apiVersion: v2
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.1.0.tgz
+ name: nginx
+ description: string
+ version: 0.1.0
+ home: https://github.com/something
+ digest: "sha256:1234567890abcdef"
+ keywords:
+ - popular
+ - web server
+ - proxy
+ apiVersion: v2
+ alpine:
+ - urls:
+ - https://charts.helm.sh/stable/alpine-1.0.0.tgz
+ - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz
+ name: alpine
+ description: string
+ version: 1.0.0
+ home: https://github.com/something
+ keywords:
+ - linux
+ - alpine
+ - small
+ - sumtin
+ digest: "sha256:1234567890abcdef"
+ apiVersion: v2
+ chartWithNoURL:
+ - name: chartWithNoURL
+ description: string
+ version: 1.0.0
+ home: https://github.com/something
+ keywords:
+ - small
+ - sumtin
+ digest: "sha256:1234567890abcdef"
+ apiVersion: v2
diff --git a/helm/pkg/repo/v1/testdata/local-index-annotations.yaml b/helm/pkg/repo/v1/testdata/local-index-annotations.yaml
new file mode 100644
index 000000000..833ab854b
--- /dev/null
+++ b/helm/pkg/repo/v1/testdata/local-index-annotations.yaml
@@ -0,0 +1,54 @@
+apiVersion: v1
+entries:
+ nginx:
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.2.0.tgz
+ name: nginx
+ description: string
+ version: 0.2.0
+ home: https://github.com/something/else
+ digest: "sha256:1234567890abcdef"
+ keywords:
+ - popular
+ - web server
+ - proxy
+ apiVersion: v2
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.1.0.tgz
+ name: nginx
+ description: string
+ version: 0.1.0
+ home: https://github.com/something
+ digest: "sha256:1234567890abcdef"
+ keywords:
+ - popular
+ - web server
+ - proxy
+ apiVersion: v2
+ alpine:
+ - urls:
+ - https://charts.helm.sh/stable/alpine-1.0.0.tgz
+ - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz
+ name: alpine
+ description: string
+ version: 1.0.0
+ home: https://github.com/something
+ keywords:
+ - linux
+ - alpine
+ - small
+ - sumtin
+ digest: "sha256:1234567890abcdef"
+ apiVersion: v2
+ chartWithNoURL:
+ - name: chartWithNoURL
+ description: string
+ version: 1.0.0
+ home: https://github.com/something
+ keywords:
+ - small
+ - sumtin
+ digest: "sha256:1234567890abcdef"
+ apiVersion: v2
+annotations:
+ helm.sh/test: foo bar
diff --git a/helm/pkg/repo/v1/testdata/local-index-unordered.yaml b/helm/pkg/repo/v1/testdata/local-index-unordered.yaml
new file mode 100644
index 000000000..cdfaa7f24
--- /dev/null
+++ b/helm/pkg/repo/v1/testdata/local-index-unordered.yaml
@@ -0,0 +1,52 @@
+apiVersion: v1
+entries:
+ nginx:
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.1.0.tgz
+ name: nginx
+ description: string
+ version: 0.1.0
+ home: https://github.com/something
+ digest: "sha256:1234567890abcdef"
+ keywords:
+ - popular
+ - web server
+ - proxy
+ apiVersion: v2
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.2.0.tgz
+ name: nginx
+ description: string
+ version: 0.2.0
+ home: https://github.com/something/else
+ digest: "sha256:1234567890abcdef"
+ keywords:
+ - popular
+ - web server
+ - proxy
+ apiVersion: v2
+ alpine:
+ - urls:
+ - https://charts.helm.sh/stable/alpine-1.0.0.tgz
+ - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz
+ name: alpine
+ description: string
+ version: 1.0.0
+ home: https://github.com/something
+ keywords:
+ - linux
+ - alpine
+ - small
+ - sumtin
+ digest: "sha256:1234567890abcdef"
+ apiVersion: v2
+ chartWithNoURL:
+ - name: chartWithNoURL
+ description: string
+ version: 1.0.0
+ home: https://github.com/something
+ keywords:
+ - small
+ - sumtin
+ digest: "sha256:1234567890abcdef"
+ apiVersion: v2
diff --git a/helm/pkg/repo/v1/testdata/local-index.json b/helm/pkg/repo/v1/testdata/local-index.json
new file mode 100644
index 000000000..25296d5ca
--- /dev/null
+++ b/helm/pkg/repo/v1/testdata/local-index.json
@@ -0,0 +1,53 @@
+{
+ "apiVersion": "v1",
+ "entries": {
+ "nginx": [
+ {
+ "urls": ["https://charts.helm.sh/stable/nginx-0.2.0.tgz"],
+ "name": "nginx",
+ "description": "string",
+ "version": "0.2.0",
+ "home": "https://github.com/something/else",
+ "digest": "sha256:1234567890abcdef",
+ "keywords": ["popular", "web server", "proxy"],
+ "apiVersion": "v2"
+ },
+ {
+ "urls": ["https://charts.helm.sh/stable/nginx-0.1.0.tgz"],
+ "name": "nginx",
+ "description": "string",
+ "version": "0.1.0",
+ "home": "https://github.com/something",
+ "digest": "sha256:1234567890abcdef",
+ "keywords": ["popular", "web server", "proxy"],
+ "apiVersion": "v2"
+ }
+ ],
+ "alpine": [
+ {
+ "urls": [
+ "https://charts.helm.sh/stable/alpine-1.0.0.tgz",
+ "http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz"
+ ],
+ "name": "alpine",
+ "description": "string",
+ "version": "1.0.0",
+ "home": "https://github.com/something",
+ "keywords": ["linux", "alpine", "small", "sumtin"],
+ "digest": "sha256:1234567890abcdef",
+ "apiVersion": "v2"
+ }
+ ],
+ "chartWithNoURL": [
+ {
+ "name": "chartWithNoURL",
+ "description": "string",
+ "version": "1.0.0",
+ "home": "https://github.com/something",
+ "keywords": ["small", "sumtin"],
+ "digest": "sha256:1234567890abcdef",
+ "apiVersion": "v2"
+ }
+ ]
+ }
+}
diff --git a/helm/pkg/repo/v1/testdata/local-index.yaml b/helm/pkg/repo/v1/testdata/local-index.yaml
new file mode 100644
index 000000000..d61f40dda
--- /dev/null
+++ b/helm/pkg/repo/v1/testdata/local-index.yaml
@@ -0,0 +1,52 @@
+apiVersion: v1
+entries:
+ nginx:
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.2.0.tgz
+ name: nginx
+ description: string
+ version: 0.2.0
+ home: https://github.com/something/else
+ digest: "sha256:1234567890abcdef"
+ keywords:
+ - popular
+ - web server
+ - proxy
+ apiVersion: v2
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.1.0.tgz
+ name: nginx
+ description: string
+ version: 0.1.0
+ home: https://github.com/something
+ digest: "sha256:1234567890abcdef"
+ keywords:
+ - popular
+ - web server
+ - proxy
+ apiVersion: v2
+ alpine:
+ - urls:
+ - https://charts.helm.sh/stable/alpine-1.0.0.tgz
+ - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz
+ name: alpine
+ description: string
+ version: 1.0.0
+ home: https://github.com/something
+ keywords:
+ - linux
+ - alpine
+ - small
+ - sumtin
+ digest: "sha256:1234567890abcdef"
+ apiVersion: v2
+ chartWithNoURL:
+ - name: chartWithNoURL
+ description: string
+ version: 1.0.0
+ home: https://github.com/something
+ keywords:
+ - small
+ - sumtin
+ digest: "sha256:1234567890abcdef"
+ apiVersion: v2
diff --git a/helm/pkg/repo/v1/testdata/old-repositories.yaml b/helm/pkg/repo/v1/testdata/old-repositories.yaml
new file mode 100644
index 000000000..3fb55b060
--- /dev/null
+++ b/helm/pkg/repo/v1/testdata/old-repositories.yaml
@@ -0,0 +1,3 @@
+best-charts-ever: http://best-charts-ever.com
+okay-charts: http://okay-charts.org
+example123: http://examplecharts.net/charts/123
diff --git a/helm/pkg/repo/v1/testdata/repositories.yaml b/helm/pkg/repo/v1/testdata/repositories.yaml
new file mode 100644
index 000000000..a28c48eab
--- /dev/null
+++ b/helm/pkg/repo/v1/testdata/repositories.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+repositories:
+ - name: stable
+ url: https://example.com/stable/charts
+ cache: stable-index.yaml
+ - name: incubator
+ url: https://example.com/incubator
+ cache: incubator-index.yaml
diff --git a/helm/pkg/repo/v1/testdata/repository/frobnitz-1.2.3.tgz b/helm/pkg/repo/v1/testdata/repository/frobnitz-1.2.3.tgz
new file mode 100644
index 000000000..8731dce02
Binary files /dev/null and b/helm/pkg/repo/v1/testdata/repository/frobnitz-1.2.3.tgz differ
diff --git a/helm/pkg/repo/v1/testdata/repository/sprocket-1.1.0.tgz b/helm/pkg/repo/v1/testdata/repository/sprocket-1.1.0.tgz
new file mode 100644
index 000000000..48d65f491
Binary files /dev/null and b/helm/pkg/repo/v1/testdata/repository/sprocket-1.1.0.tgz differ
diff --git a/helm/pkg/repo/v1/testdata/repository/sprocket-1.2.0.tgz b/helm/pkg/repo/v1/testdata/repository/sprocket-1.2.0.tgz
new file mode 100644
index 000000000..6fdc73c2b
Binary files /dev/null and b/helm/pkg/repo/v1/testdata/repository/sprocket-1.2.0.tgz differ
diff --git a/helm/pkg/repo/v1/testdata/repository/universe/zarthal-1.0.0.tgz b/helm/pkg/repo/v1/testdata/repository/universe/zarthal-1.0.0.tgz
new file mode 100644
index 000000000..6f1e8564c
Binary files /dev/null and b/helm/pkg/repo/v1/testdata/repository/universe/zarthal-1.0.0.tgz differ
diff --git a/helm/pkg/repo/v1/testdata/server/index.yaml b/helm/pkg/repo/v1/testdata/server/index.yaml
new file mode 100644
index 000000000..d627928b2
--- /dev/null
+++ b/helm/pkg/repo/v1/testdata/server/index.yaml
@@ -0,0 +1,39 @@
+apiVersion: v1
+entries:
+ nginx:
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.1.0.tgz
+ name: nginx
+ description: string
+ version: 0.1.0
+ home: https://github.com/something
+ digest: "sha256:1234567890abcdef"
+ keywords:
+ - popular
+ - web server
+ - proxy
+ - urls:
+ - https://charts.helm.sh/stable/nginx-0.2.0.tgz
+ name: nginx
+ description: string
+ version: 0.2.0
+ home: https://github.com/something/else
+ digest: "sha256:1234567890abcdef"
+ keywords:
+ - popular
+ - web server
+ - proxy
+ alpine:
+ - urls:
+ - https://charts.helm.sh/stable/alpine-1.0.0.tgz
+ - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz
+ name: alpine
+ description: string
+ version: 1.0.0
+ home: https://github.com/something
+ keywords:
+ - linux
+ - alpine
+ - small
+ - sumtin
+ digest: "sha256:1234567890abcdef"
diff --git a/helm/pkg/repo/v1/testdata/server/test.txt b/helm/pkg/repo/v1/testdata/server/test.txt
new file mode 100644
index 000000000..557db03de
--- /dev/null
+++ b/helm/pkg/repo/v1/testdata/server/test.txt
@@ -0,0 +1 @@
+Hello World
diff --git a/helm/pkg/storage/driver/cfgmaps.go b/helm/pkg/storage/driver/cfgmaps.go
new file mode 100644
index 000000000..f82ade5e9
--- /dev/null
+++ b/helm/pkg/storage/driver/cfgmaps.go
@@ -0,0 +1,288 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v4/pkg/storage/driver"
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "strconv"
+ "strings"
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kblabels "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/util/validation"
+ corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+
+ "helm.sh/helm/v4/internal/logging"
+ "helm.sh/helm/v4/pkg/release"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+var _ Driver = (*ConfigMaps)(nil)
+
+// ConfigMapsDriverName is the string name of the driver.
+const ConfigMapsDriverName = "ConfigMap"
+
+// ConfigMaps is a wrapper around an implementation of a kubernetes
+// ConfigMapsInterface.
+type ConfigMaps struct {
+ impl corev1.ConfigMapInterface
+
+ // Embed a LogHolder to provide logger functionality
+ logging.LogHolder
+}
+
+// NewConfigMaps initializes a new ConfigMaps wrapping an implementation of
+// the kubernetes ConfigMapsInterface.
+func NewConfigMaps(impl corev1.ConfigMapInterface) *ConfigMaps {
+ c := &ConfigMaps{
+ impl: impl,
+ }
+ c.SetLogger(slog.Default().Handler())
+ return c
+}
+
+// Name returns the name of the driver.
+func (cfgmaps *ConfigMaps) Name() string {
+ return ConfigMapsDriverName
+}
+
+// Get fetches the release named by key. The corresponding release is returned
+// or error if not found.
+func (cfgmaps *ConfigMaps) Get(key string) (release.Releaser, error) {
+ // fetch the configmap holding the release named by key
+ obj, err := cfgmaps.impl.Get(context.Background(), key, metav1.GetOptions{})
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return nil, ErrReleaseNotFound
+ }
+
+ cfgmaps.Logger().Debug("failed to get release", slog.String("key", key), slog.Any("error", err))
+ return nil, err
+ }
+ // found the configmap, decode the base64 data string
+ r, err := decodeRelease(obj.Data["release"])
+ if err != nil {
+ cfgmaps.Logger().Debug("failed to decode data", slog.String("key", key), slog.Any("error", err))
+ return nil, err
+ }
+ r.Labels = filterSystemLabels(obj.Labels)
+ // return the release object
+ return r, nil
+}
+
+// List fetches all releases and returns the list releases such
+// that filter(release) == true. An error is returned if the
+// configmap fails to retrieve the releases.
+func (cfgmaps *ConfigMaps) List(filter func(release.Releaser) bool) ([]release.Releaser, error) {
+ lsel := kblabels.Set{"owner": "helm"}.AsSelector()
+ opts := metav1.ListOptions{LabelSelector: lsel.String()}
+
+ list, err := cfgmaps.impl.List(context.Background(), opts)
+ if err != nil {
+ cfgmaps.Logger().Debug("failed to list releases", slog.Any("error", err))
+ return nil, err
+ }
+
+ var results []release.Releaser
+
+ // iterate over the configmaps object list
+ // and decode each release
+ for _, item := range list.Items {
+ rls, err := decodeRelease(item.Data["release"])
+ if err != nil {
+ cfgmaps.Logger().Debug("failed to decode release", slog.Any("item", item), slog.Any("error", err))
+ continue
+ }
+
+ rls.Labels = item.Labels
+
+ if filter(rls) {
+ results = append(results, rls)
+ }
+ }
+ return results, nil
+}
+
+// Query fetches all releases that match the provided map of labels.
+// An error is returned if the configmap fails to retrieve the releases.
+func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]release.Releaser, error) {
+ ls := kblabels.Set{}
+ for k, v := range labels {
+ if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
+ return nil, fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; "))
+ }
+ ls[k] = v
+ }
+
+ opts := metav1.ListOptions{LabelSelector: ls.AsSelector().String()}
+
+ list, err := cfgmaps.impl.List(context.Background(), opts)
+ if err != nil {
+ cfgmaps.Logger().Debug("failed to query with labels", slog.Any("error", err))
+ return nil, err
+ }
+
+ if len(list.Items) == 0 {
+ return nil, ErrReleaseNotFound
+ }
+
+ var results []release.Releaser
+ for _, item := range list.Items {
+ rls, err := decodeRelease(item.Data["release"])
+ if err != nil {
+ cfgmaps.Logger().Debug("failed to decode release", slog.Any("error", err))
+ continue
+ }
+ rls.Labels = item.Labels
+ results = append(results, rls)
+ }
+ return results, nil
+}
+
+// Create creates a new ConfigMap holding the release. If the
+// ConfigMap already exists, ErrReleaseExists is returned.
+func (cfgmaps *ConfigMaps) Create(key string, rls release.Releaser) error {
+ // set labels for configmaps object meta data
+ var lbs labels
+
+ rac, err := release.NewAccessor(rls)
+ if err != nil {
+ return err
+ }
+
+ lbs.init()
+ lbs.fromMap(rac.Labels())
+ lbs.set("createdAt", fmt.Sprintf("%v", time.Now().Unix()))
+
+ rel, err := releaserToV1Release(rls)
+ if err != nil {
+ return err
+ }
+
+ // create a new configmap to hold the release
+ obj, err := newConfigMapsObject(key, rel, lbs)
+ if err != nil {
+ cfgmaps.Logger().Debug("failed to encode release", slog.String("name", rac.Name()), slog.Any("error", err))
+ return err
+ }
+ // push the configmap object out into the kubiverse
+ if _, err := cfgmaps.impl.Create(context.Background(), obj, metav1.CreateOptions{}); err != nil {
+ if apierrors.IsAlreadyExists(err) {
+ return ErrReleaseExists
+ }
+
+ cfgmaps.Logger().Debug("failed to create release", slog.Any("error", err))
+ return err
+ }
+ return nil
+}
+
+// Update updates the ConfigMap holding the release. If not found
+// the ConfigMap is created to hold the release.
+func (cfgmaps *ConfigMaps) Update(key string, rel release.Releaser) error {
+ // set labels for configmaps object meta data
+ var lbs labels
+
+ rls, err := releaserToV1Release(rel)
+ if err != nil {
+ return err
+ }
+
+ lbs.init()
+ lbs.fromMap(rls.Labels)
+ lbs.set("modifiedAt", fmt.Sprintf("%v", time.Now().Unix()))
+
+ // create a new configmap object to hold the release
+ obj, err := newConfigMapsObject(key, rls, lbs)
+ if err != nil {
+ cfgmaps.Logger().Debug(
+ "failed to encode release",
+ slog.String("name", rls.Name),
+ slog.Any("error", err),
+ )
+ return err
+ }
+ // push the configmap object out into the kubiverse
+ _, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{})
+ if err != nil {
+ cfgmaps.Logger().Debug("failed to update release", slog.Any("error", err))
+ return err
+ }
+ return nil
+}
+
+// Delete deletes the ConfigMap holding the release named by key.
+func (cfgmaps *ConfigMaps) Delete(key string) (rls release.Releaser, err error) {
+ // fetch the release to check existence
+ if rls, err = cfgmaps.Get(key); err != nil {
+ return nil, err
+ }
+ // delete the release
+ if err = cfgmaps.impl.Delete(context.Background(), key, metav1.DeleteOptions{}); err != nil {
+ return rls, err
+ }
+ return rls, nil
+}
+
+// newConfigMapsObject constructs a kubernetes ConfigMap object
+// to store a release. Each configmap data entry is the base64
+// encoded gzipped string of a release.
+//
+// The following labels are used within each configmap:
+//
+// "modifiedAt" - timestamp indicating when this configmap was last modified. (set in Update)
+// "createdAt" - timestamp indicating when this configmap was created. (set in Create)
+// "version" - version of the release.
+// "status" - status of the release (see pkg/release/status.go for variants)
+// "owner" - owner of the configmap, currently "helm".
+// "name" - name of the release.
+func newConfigMapsObject(key string, rls *rspb.Release, lbs labels) (*v1.ConfigMap, error) {
+ const owner = "helm"
+
+ // encode the release
+ s, err := encodeRelease(rls)
+ if err != nil {
+ return nil, err
+ }
+
+ if lbs == nil {
+ lbs.init()
+ }
+
+ // apply custom labels
+ lbs.fromMap(rls.Labels)
+
+ // apply labels
+ lbs.set("name", rls.Name)
+ lbs.set("owner", owner)
+ lbs.set("status", rls.Info.Status.String())
+ lbs.set("version", strconv.Itoa(rls.Version))
+
+ // create and return configmap object
+ return &v1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: key,
+ Labels: lbs.toMap(),
+ },
+ Data: map[string]string{"release": s},
+ }, nil
+}
diff --git a/helm/pkg/storage/driver/cfgmaps_test.go b/helm/pkg/storage/driver/cfgmaps_test.go
new file mode 100644
index 000000000..8beb45547
--- /dev/null
+++ b/helm/pkg/storage/driver/cfgmaps_test.go
@@ -0,0 +1,271 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "reflect"
+ "testing"
+
+ v1 "k8s.io/api/core/v1"
+
+ "helm.sh/helm/v4/pkg/release"
+ "helm.sh/helm/v4/pkg/release/common"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestConfigMapName(t *testing.T) {
+ c := newTestFixtureCfgMaps(t)
+ if c.Name() != ConfigMapsDriverName {
+ t.Errorf("Expected name to be %q, got %q", ConfigMapsDriverName, c.Name())
+ }
+}
+
+func TestConfigMapGet(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ cfgmaps := newTestFixtureCfgMaps(t, []*rspb.Release{rel}...)
+
+ // get release with key
+ got, err := cfgmaps.Get(key)
+ if err != nil {
+ t.Fatalf("Failed to get release: %s", err)
+ }
+ // compare fetched release with original
+ if !reflect.DeepEqual(rel, got) {
+ t.Errorf("Expected {%v}, got {%v}", rel, got)
+ }
+}
+
+func TestUncompressedConfigMapGet(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ // Create a test fixture which contains an uncompressed release
+ cfgmap, err := newConfigMapsObject(key, rel, nil)
+ if err != nil {
+ t.Fatalf("Failed to create configmap: %s", err)
+ }
+ b, err := json.Marshal(rel)
+ if err != nil {
+ t.Fatalf("Failed to marshal release: %s", err)
+ }
+ cfgmap.Data["release"] = base64.StdEncoding.EncodeToString(b)
+ var mock MockConfigMapsInterface
+ mock.objects = map[string]*v1.ConfigMap{key: cfgmap}
+ cfgmaps := NewConfigMaps(&mock)
+
+ // get release with key
+ got, err := cfgmaps.Get(key)
+ if err != nil {
+ t.Fatalf("Failed to get release: %s", err)
+ }
+ // compare fetched release with original
+ if !reflect.DeepEqual(rel, got) {
+ t.Errorf("Expected {%v}, got {%v}", rel, got)
+ }
+}
+
+func convertReleaserToV1(t *testing.T, rel release.Releaser) *rspb.Release {
+ t.Helper()
+ switch r := rel.(type) {
+ case rspb.Release:
+ return &r
+ case *rspb.Release:
+ return r
+ case nil:
+ return nil
+ }
+
+ t.Fatalf("Unsupported release type: %T", rel)
+ return nil
+}
+
+func TestConfigMapList(t *testing.T) {
+ cfgmaps := newTestFixtureCfgMaps(t, []*rspb.Release{
+ releaseStub("key-1", 1, "default", common.StatusUninstalled),
+ releaseStub("key-2", 1, "default", common.StatusUninstalled),
+ releaseStub("key-3", 1, "default", common.StatusDeployed),
+ releaseStub("key-4", 1, "default", common.StatusDeployed),
+ releaseStub("key-5", 1, "default", common.StatusSuperseded),
+ releaseStub("key-6", 1, "default", common.StatusSuperseded),
+ }...)
+
+ // list all deleted releases
+ del, err := cfgmaps.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusUninstalled
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list deleted: %s", err)
+ }
+ if len(del) != 2 {
+ t.Errorf("Expected 2 deleted, got %d:\n%v\n", len(del), del)
+ }
+
+ // list all deployed releases
+ dpl, err := cfgmaps.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusDeployed
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list deployed: %s", err)
+ }
+ if len(dpl) != 2 {
+ t.Errorf("Expected 2 deployed, got %d", len(dpl))
+ }
+
+ // list all superseded releases
+ ssd, err := cfgmaps.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusSuperseded
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list superseded: %s", err)
+ }
+ if len(ssd) != 2 {
+ t.Errorf("Expected 2 superseded, got %d", len(ssd))
+ }
+ // Check if release having both system and custom labels, this is needed to ensure that selector filtering would work.
+ rls := convertReleaserToV1(t, ssd[0])
+ _, ok := rls.Labels["name"]
+ if !ok {
+ t.Fatalf("Expected 'name' label in results, actual %v", rls.Labels)
+ }
+ _, ok = rls.Labels["key1"]
+ if !ok {
+ t.Fatalf("Expected 'key1' label in results, actual %v", rls.Labels)
+ }
+}
+
+func TestConfigMapQuery(t *testing.T) {
+ cfgmaps := newTestFixtureCfgMaps(t, []*rspb.Release{
+ releaseStub("key-1", 1, "default", common.StatusUninstalled),
+ releaseStub("key-2", 1, "default", common.StatusUninstalled),
+ releaseStub("key-3", 1, "default", common.StatusDeployed),
+ releaseStub("key-4", 1, "default", common.StatusDeployed),
+ releaseStub("key-5", 1, "default", common.StatusSuperseded),
+ releaseStub("key-6", 1, "default", common.StatusSuperseded),
+ }...)
+
+ rls, err := cfgmaps.Query(map[string]string{"status": "deployed"})
+ if err != nil {
+ t.Errorf("Failed to query: %s", err)
+ }
+ if len(rls) != 2 {
+ t.Errorf("Expected 2 results, got %d", len(rls))
+ }
+
+ _, err = cfgmaps.Query(map[string]string{"name": "notExist"})
+ if err != ErrReleaseNotFound {
+ t.Errorf("Expected {%v}, got {%v}", ErrReleaseNotFound, err)
+ }
+}
+
+func TestConfigMapCreate(t *testing.T) {
+ cfgmaps := newTestFixtureCfgMaps(t)
+
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ // store the release in a configmap
+ if err := cfgmaps.Create(key, rel); err != nil {
+ t.Fatalf("Failed to create release with key %q: %s", key, err)
+ }
+
+ // get the release back
+ got, err := cfgmaps.Get(key)
+ if err != nil {
+ t.Fatalf("Failed to get release with key %q: %s", key, err)
+ }
+
+ // compare created release with original
+ if !reflect.DeepEqual(rel, got) {
+ t.Errorf("Expected {%v}, got {%v}", rel, got)
+ }
+}
+
+func TestConfigMapUpdate(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ cfgmaps := newTestFixtureCfgMaps(t, []*rspb.Release{rel}...)
+
+ // modify release status code
+ rel.Info.Status = common.StatusSuperseded
+
+ // perform the update
+ if err := cfgmaps.Update(key, rel); err != nil {
+ t.Fatalf("Failed to update release: %s", err)
+ }
+
+ // fetch the updated release
+ goti, err := cfgmaps.Get(key)
+ if err != nil {
+ t.Fatalf("Failed to get release with key %q: %s", key, err)
+ }
+ got := convertReleaserToV1(t, goti)
+
+ // check release has actually been updated by comparing modified fields
+ if rel.Info.Status != got.Info.Status {
+ t.Errorf("Expected status %s, got status %s", rel.Info.Status.String(), got.Info.Status.String())
+ }
+}
+
+func TestConfigMapDelete(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ cfgmaps := newTestFixtureCfgMaps(t, []*rspb.Release{rel}...)
+
+ // perform the delete on a non-existent release
+ _, err := cfgmaps.Delete("nonexistent")
+ if err != ErrReleaseNotFound {
+ t.Fatalf("Expected ErrReleaseNotFound: got {%v}", err)
+ }
+
+ // perform the delete
+ rls, err := cfgmaps.Delete(key)
+ if err != nil {
+ t.Fatalf("Failed to delete release with key %q: %s", key, err)
+ }
+ if !reflect.DeepEqual(rel, rls) {
+ t.Errorf("Expected {%v}, got {%v}", rel, rls)
+ }
+ _, err = cfgmaps.Get(key)
+ if !errors.Is(err, ErrReleaseNotFound) {
+ t.Errorf("Expected {%v}, got {%v}", ErrReleaseNotFound, err)
+ }
+}
diff --git a/helm/pkg/storage/driver/driver.go b/helm/pkg/storage/driver/driver.go
new file mode 100644
index 000000000..6efd1dbaa
--- /dev/null
+++ b/helm/pkg/storage/driver/driver.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v4/pkg/storage/driver"
+
+import (
+ "errors"
+ "fmt"
+
+ "helm.sh/helm/v4/pkg/release"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+var (
+ // ErrReleaseNotFound indicates that a release is not found.
+ ErrReleaseNotFound = errors.New("release: not found")
+ // ErrReleaseExists indicates that a release already exists.
+ ErrReleaseExists = errors.New("release: already exists")
+ // ErrInvalidKey indicates that a release key could not be parsed.
+ ErrInvalidKey = errors.New("release: invalid key")
+ // ErrNoDeployedReleases indicates that there are no releases with the given key in the deployed state
+ ErrNoDeployedReleases = errors.New("has no deployed releases")
+)
+
+// StorageDriverError records an error and the release name that caused it
+type StorageDriverError struct {
+ ReleaseName string
+ Err error
+}
+
+func (e *StorageDriverError) Error() string {
+ return fmt.Sprintf("%q %s", e.ReleaseName, e.Err.Error())
+}
+
+func (e *StorageDriverError) Unwrap() error { return e.Err }
+
+func NewErrNoDeployedReleases(releaseName string) error {
+ return &StorageDriverError{
+ ReleaseName: releaseName,
+ Err: ErrNoDeployedReleases,
+ }
+}
+
+// Creator is the interface that wraps the Create method.
+//
+// Create stores the release or returns ErrReleaseExists
+// if an identical release already exists.
+type Creator interface {
+ Create(key string, rls release.Releaser) error
+}
+
+// Updator is the interface that wraps the Update method.
+//
+// Update updates an existing release or returns
+// ErrReleaseNotFound if the release does not exist.
+type Updator interface {
+ Update(key string, rls release.Releaser) error
+}
+
+// Deletor is the interface that wraps the Delete method.
+//
+// Delete deletes the release named by key or returns
+// ErrReleaseNotFound if the release does not exist.
+type Deletor interface {
+ Delete(key string) (release.Releaser, error)
+}
+
+// Queryor is the interface that wraps the Get and List methods.
+//
+// Get returns the release named by key or returns ErrReleaseNotFound
+// if the release does not exist.
+//
+// List returns the set of all releases that satisfy the filter predicate.
+//
+// Query returns the set of all releases that match the provided label set.
+type Queryor interface {
+ Get(key string) (release.Releaser, error)
+ List(filter func(release.Releaser) bool) ([]release.Releaser, error)
+ Query(labels map[string]string) ([]release.Releaser, error)
+}
+
+// Driver is the interface composed of Creator, Updator, Deletor, and Queryor
+// interfaces. It defines the behavior for storing, updating, deleted,
+// and retrieving Helm releases from some underlying storage mechanism,
+// e.g. memory, configmaps.
+type Driver interface {
+ Creator
+ Updator
+ Deletor
+ Queryor
+ Name() string
+}
+
+// releaserToV1Release is a helper function to convert a v1 release passed by interface
+// into the type object.
+func releaserToV1Release(rel release.Releaser) (*rspb.Release, error) {
+ switch r := rel.(type) {
+ case rspb.Release:
+ return &r, nil
+ case *rspb.Release:
+ return r, nil
+ case nil:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unsupported release type: %T", rel)
+ }
+}
diff --git a/helm/pkg/storage/driver/labels.go b/helm/pkg/storage/driver/labels.go
new file mode 100644
index 000000000..eb7118fe5
--- /dev/null
+++ b/helm/pkg/storage/driver/labels.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver
+
+// labels is a map of key value pairs to be included as metadata in a configmap object.
+type labels map[string]string
+
+func (lbs *labels) init() { *lbs = labels(make(map[string]string)) }
+func (lbs labels) get(key string) string { return lbs[key] }
+func (lbs labels) set(key, val string) { lbs[key] = val }
+
+func (lbs labels) keys() (ls []string) {
+ for key := range lbs {
+ ls = append(ls, key)
+ }
+ return
+}
+
+func (lbs labels) match(set labels) bool {
+ for _, key := range set.keys() {
+ if lbs.get(key) != set.get(key) {
+ return false
+ }
+ }
+ return true
+}
+
+func (lbs labels) toMap() map[string]string { return lbs }
+
+func (lbs *labels) fromMap(kvs map[string]string) {
+ for k, v := range kvs {
+ lbs.set(k, v)
+ }
+}
diff --git a/helm/pkg/storage/driver/labels_test.go b/helm/pkg/storage/driver/labels_test.go
new file mode 100644
index 000000000..81e561c15
--- /dev/null
+++ b/helm/pkg/storage/driver/labels_test.go
@@ -0,0 +1,49 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v4/pkg/storage/driver"
+
+import (
+ "testing"
+)
+
+func TestLabelsMatch(t *testing.T) {
+ var tests = []struct {
+ desc string
+ set1 labels
+ set2 labels
+ expect bool
+ }{
+ {
+ "equal labels sets",
+ labels(map[string]string{"KEY_A": "VAL_A", "KEY_B": "VAL_B"}),
+ labels(map[string]string{"KEY_A": "VAL_A", "KEY_B": "VAL_B"}),
+ true,
+ },
+ {
+ "disjoint label sets",
+ labels(map[string]string{"KEY_C": "VAL_C", "KEY_D": "VAL_D"}),
+ labels(map[string]string{"KEY_A": "VAL_A", "KEY_B": "VAL_B"}),
+ false,
+ },
+ }
+
+ for _, tt := range tests {
+ if !tt.set1.match(tt.set2) && tt.expect {
+ t.Fatalf("Expected match '%s'\n", tt.desc)
+ }
+ }
+}
diff --git a/helm/pkg/storage/driver/memory.go b/helm/pkg/storage/driver/memory.go
new file mode 100644
index 000000000..7ea4a014a
--- /dev/null
+++ b/helm/pkg/storage/driver/memory.go
@@ -0,0 +1,255 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver
+
+import (
+ "log/slog"
+ "strconv"
+ "strings"
+ "sync"
+
+ "helm.sh/helm/v4/internal/logging"
+ "helm.sh/helm/v4/pkg/release"
+)
+
+var _ Driver = (*Memory)(nil)
+
+const (
+ // MemoryDriverName is the string name of this driver.
+ MemoryDriverName = "Memory"
+
+ defaultNamespace = "default"
+)
+
+// A map of release names to list of release records
+type memReleases map[string]records
+
+// Memory is the in-memory storage driver implementation.
+type Memory struct {
+ sync.RWMutex
+ namespace string
+ // A map of namespaces to releases
+ cache map[string]memReleases
+ // Embed a LogHolder to provide logger functionality
+ logging.LogHolder
+}
+
+// NewMemory initializes a new memory driver.
+func NewMemory() *Memory {
+ m := &Memory{cache: map[string]memReleases{}, namespace: "default"}
+ m.SetLogger(slog.Default().Handler())
+ return m
+}
+
+// SetNamespace sets a specific namespace in which releases will be accessed.
+// An empty string indicates all namespaces (for the list operation)
+func (mem *Memory) SetNamespace(ns string) {
+ mem.namespace = ns
+}
+
+// Name returns the name of the driver.
+func (mem *Memory) Name() string {
+ return MemoryDriverName
+}
+
+// Get returns the release named by key or returns ErrReleaseNotFound.
+func (mem *Memory) Get(key string) (release.Releaser, error) {
+ defer unlock(mem.rlock())
+
+ keyWithoutPrefix := strings.TrimPrefix(key, "sh.helm.release.v1.")
+ switch elems := strings.Split(keyWithoutPrefix, ".v"); len(elems) {
+ case 2:
+ name, ver := elems[0], elems[1]
+ if _, err := strconv.Atoi(ver); err != nil {
+ return nil, ErrInvalidKey
+ }
+ if recs, ok := mem.cache[mem.namespace][name]; ok {
+ if r := recs.Get(key); r != nil {
+ return r.rls, nil
+ }
+ }
+ return nil, ErrReleaseNotFound
+ default:
+ return nil, ErrInvalidKey
+ }
+}
+
+// List returns the list of all releases such that filter(release) == true
+func (mem *Memory) List(filter func(release.Releaser) bool) ([]release.Releaser, error) {
+ defer unlock(mem.rlock())
+
+ var ls []release.Releaser
+ for namespace := range mem.cache {
+ if mem.namespace != "" {
+ // Should only list releases of this namespace
+ namespace = mem.namespace
+ }
+ for _, recs := range mem.cache[namespace] {
+ recs.Iter(func(_ int, rec *record) bool {
+ if filter(rec.rls) {
+ ls = append(ls, rec.rls)
+ }
+ return true
+ })
+ }
+ if mem.namespace != "" {
+ // Should only list releases of this namespace
+ break
+ }
+ }
+ return ls, nil
+}
+
+// Query returns the set of releases that match the provided set of labels
+func (mem *Memory) Query(keyvals map[string]string) ([]release.Releaser, error) {
+ defer unlock(mem.rlock())
+
+ var lbs labels
+
+ lbs.init()
+ lbs.fromMap(keyvals)
+
+ var ls []release.Releaser
+ for namespace := range mem.cache {
+ if mem.namespace != "" {
+ // Should only query releases of this namespace
+ namespace = mem.namespace
+ }
+ for _, recs := range mem.cache[namespace] {
+ recs.Iter(func(_ int, rec *record) bool {
+ // A query for a release name that doesn't exist (has been deleted)
+ // can cause rec to be nil.
+ if rec == nil {
+ return false
+ }
+ if rec.lbs.match(lbs) {
+ ls = append(ls, rec.rls)
+ }
+ return true
+ })
+ }
+ if mem.namespace != "" {
+ // Should only query releases of this namespace
+ break
+ }
+ }
+
+ if len(ls) == 0 {
+ return nil, ErrReleaseNotFound
+ }
+
+ return ls, nil
+}
+
+// Create creates a new release or returns ErrReleaseExists.
+func (mem *Memory) Create(key string, rel release.Releaser) error {
+ defer unlock(mem.wlock())
+
+ rls, err := releaserToV1Release(rel)
+ if err != nil {
+ return err
+ }
+ // For backwards compatibility, we protect against an unset namespace
+ namespace := rls.Namespace
+ if namespace == "" {
+ namespace = defaultNamespace
+ }
+ mem.SetNamespace(namespace)
+
+ if _, ok := mem.cache[namespace]; !ok {
+ mem.cache[namespace] = memReleases{}
+ }
+
+ if recs, ok := mem.cache[namespace][rls.Name]; ok {
+ if err := recs.Add(newRecord(key, rls)); err != nil {
+ return err
+ }
+ mem.cache[namespace][rls.Name] = recs
+ return nil
+ }
+ mem.cache[namespace][rls.Name] = records{newRecord(key, rls)}
+ return nil
+}
+
+// Update updates a release or returns ErrReleaseNotFound.
+func (mem *Memory) Update(key string, rel release.Releaser) error {
+ defer unlock(mem.wlock())
+
+ rls, err := releaserToV1Release(rel)
+ if err != nil {
+ return err
+ }
+
+ // For backwards compatibility, we protect against an unset namespace
+ namespace := rls.Namespace
+ if namespace == "" {
+ namespace = defaultNamespace
+ }
+ mem.SetNamespace(namespace)
+
+ if _, ok := mem.cache[namespace]; ok {
+ if rs, ok := mem.cache[namespace][rls.Name]; ok && rs.Exists(key) {
+ rs.Replace(key, newRecord(key, rls))
+ return nil
+ }
+ }
+ return ErrReleaseNotFound
+}
+
+// Delete deletes a release or returns ErrReleaseNotFound.
+func (mem *Memory) Delete(key string) (release.Releaser, error) {
+ defer unlock(mem.wlock())
+
+ keyWithoutPrefix := strings.TrimPrefix(key, "sh.helm.release.v1.")
+ elems := strings.Split(keyWithoutPrefix, ".v")
+
+ if len(elems) != 2 {
+ return nil, ErrInvalidKey
+ }
+
+ name, ver := elems[0], elems[1]
+ if _, err := strconv.Atoi(ver); err != nil {
+ return nil, ErrInvalidKey
+ }
+ if _, ok := mem.cache[mem.namespace]; ok {
+ if recs, ok := mem.cache[mem.namespace][name]; ok {
+ if r := recs.Remove(key); r != nil {
+ // recs.Remove changes the slice reference, so we have to re-assign it.
+ mem.cache[mem.namespace][name] = recs
+ return r.rls, nil
+ }
+ }
+ }
+ return nil, ErrReleaseNotFound
+}
+
+// wlock locks mem for writing
+func (mem *Memory) wlock() func() {
+ mem.Lock()
+ return func() { mem.Unlock() }
+}
+
+// rlock locks mem for reading
+func (mem *Memory) rlock() func() {
+ mem.RLock()
+ return func() { mem.RUnlock() }
+}
+
+// unlock calls fn which reverses a mem.rlock or mem.wlock. e.g:
+// ```defer unlock(mem.rlock())```, locks mem for reading at the
+// call point of defer and unlocks upon exiting the block.
+func unlock(fn func()) { fn() }
diff --git a/helm/pkg/storage/driver/memory_test.go b/helm/pkg/storage/driver/memory_test.go
new file mode 100644
index 000000000..329b82b2f
--- /dev/null
+++ b/helm/pkg/storage/driver/memory_test.go
@@ -0,0 +1,304 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/release"
+ "helm.sh/helm/v4/pkg/release/common"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestMemoryName(t *testing.T) {
+ if mem := NewMemory(); mem.Name() != MemoryDriverName {
+ t.Errorf("Expected name to be %q, got %q", MemoryDriverName, mem.Name())
+ }
+}
+
+func TestMemoryCreate(t *testing.T) {
+ var tests = []struct {
+ desc string
+ rls *rspb.Release
+ err bool
+ }{
+ {
+ "create should succeed",
+ releaseStub("rls-c", 1, "default", common.StatusDeployed),
+ false,
+ },
+ {
+ "create should fail (release already exists)",
+ releaseStub("rls-a", 1, "default", common.StatusDeployed),
+ true,
+ },
+ {
+ "create in namespace should succeed",
+ releaseStub("rls-a", 1, "mynamespace", common.StatusDeployed),
+ false,
+ },
+ {
+ "create in other namespace should fail (release already exists)",
+ releaseStub("rls-c", 1, "mynamespace", common.StatusDeployed),
+ true,
+ },
+ }
+
+ ts := tsFixtureMemory(t)
+ for _, tt := range tests {
+ key := testKey(tt.rls.Name, tt.rls.Version)
+ rls := tt.rls
+
+ if err := ts.Create(key, rls); err != nil {
+ if !tt.err {
+ t.Fatalf("failed to create %q: %s", tt.desc, err)
+ }
+ } else if tt.err {
+ t.Fatalf("Did not get expected error for %q\n", tt.desc)
+ }
+ }
+}
+
+func TestMemoryGet(t *testing.T) {
+ var tests = []struct {
+ desc string
+ key string
+ namespace string
+ err bool
+ }{
+ {"release key should exist", "rls-a.v1", "default", false},
+ {"release key should not exist", "rls-a.v5", "default", true},
+ {"release key in namespace should exist", "rls-c.v1", "mynamespace", false},
+ {"release key in namespace should not exist", "rls-a.v1", "mynamespace", true},
+ }
+
+ ts := tsFixtureMemory(t)
+ for _, tt := range tests {
+ ts.SetNamespace(tt.namespace)
+ if _, err := ts.Get(tt.key); err != nil {
+ if !tt.err {
+ t.Fatalf("Failed %q to get '%s': %q\n", tt.desc, tt.key, err)
+ }
+ } else if tt.err {
+ t.Fatalf("Did not get expected error for %q '%s'\n", tt.desc, tt.key)
+ }
+ }
+}
+
+func TestMemoryList(t *testing.T) {
+ ts := tsFixtureMemory(t)
+ ts.SetNamespace("default")
+
+ // list all deployed releases
+ dpl, err := ts.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusDeployed
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list deployed releases: %s", err)
+ }
+ if len(dpl) != 2 {
+ t.Errorf("Expected 2 deployed, got %d", len(dpl))
+ }
+
+ // list all superseded releases
+ ssd, err := ts.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusSuperseded
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list superseded releases: %s", err)
+ }
+ if len(ssd) != 6 {
+ t.Errorf("Expected 6 superseded, got %d", len(ssd))
+ }
+
+ // list all deleted releases
+ del, err := ts.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusUninstalled
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list deleted releases: %s", err)
+ }
+ if len(del) != 0 {
+ t.Errorf("Expected 0 deleted, got %d", len(del))
+ }
+}
+
+func TestMemoryQuery(t *testing.T) {
+ var tests = []struct {
+ desc string
+ xlen int
+ namespace string
+ lbs map[string]string
+ }{
+ {
+ "should be 2 query results",
+ 2,
+ "default",
+ map[string]string{"status": "deployed"},
+ },
+ {
+ "should be 1 query result",
+ 1,
+ "mynamespace",
+ map[string]string{"status": "deployed"},
+ },
+ }
+
+ ts := tsFixtureMemory(t)
+ for _, tt := range tests {
+ ts.SetNamespace(tt.namespace)
+ l, err := ts.Query(tt.lbs)
+ if err != nil {
+ t.Fatalf("Failed to query: %s\n", err)
+ }
+
+ if tt.xlen != len(l) {
+ t.Fatalf("Expected %d results, actual %d\n", tt.xlen, len(l))
+ }
+ }
+}
+
+func TestMemoryUpdate(t *testing.T) {
+ var tests = []struct {
+ desc string
+ key string
+ rls *rspb.Release
+ err bool
+ }{
+ {
+ "update release status",
+ "rls-a.v4",
+ releaseStub("rls-a", 4, "default", common.StatusSuperseded),
+ false,
+ },
+ {
+ "update release does not exist",
+ "rls-c.v1",
+ releaseStub("rls-c", 1, "default", common.StatusUninstalled),
+ true,
+ },
+ {
+ "update release status in namespace",
+ "rls-c.v4",
+ releaseStub("rls-c", 4, "mynamespace", common.StatusSuperseded),
+ false,
+ },
+ {
+ "update release in namespace does not exist",
+ "rls-a.v1",
+ releaseStub("rls-a", 1, "mynamespace", common.StatusUninstalled),
+ true,
+ },
+ }
+
+ ts := tsFixtureMemory(t)
+ for _, tt := range tests {
+ if err := ts.Update(tt.key, tt.rls); err != nil {
+ if !tt.err {
+ t.Fatalf("Failed %q: %s\n", tt.desc, err)
+ }
+ continue
+ } else if tt.err {
+ t.Fatalf("Did not get expected error for %q '%s'\n", tt.desc, tt.key)
+ }
+
+ ts.SetNamespace(tt.rls.Namespace)
+ r, err := ts.Get(tt.key)
+ if err != nil {
+ t.Fatalf("Failed to get: %s\n", err)
+ }
+
+ if !reflect.DeepEqual(r, tt.rls) {
+ t.Fatalf("Expected %v, actual %v\n", tt.rls, r)
+ }
+ }
+}
+
+func TestMemoryDelete(t *testing.T) {
+ var tests = []struct {
+ desc string
+ key string
+ namespace string
+ err bool
+ }{
+ {"release key should exist", "rls-a.v4", "default", false},
+ {"release key should not exist", "rls-a.v5", "default", true},
+ {"release key from other namespace should not exist", "rls-c.v4", "default", true},
+ {"release key from namespace should exist", "rls-c.v4", "mynamespace", false},
+ {"release key from namespace should not exist", "rls-c.v5", "mynamespace", true},
+ {"release key from namespace2 should not exist", "rls-a.v4", "mynamespace", true},
+ }
+
+ ts := tsFixtureMemory(t)
+ ts.SetNamespace("")
+ start, err := ts.Query(map[string]string{"status": "deployed"})
+ if err != nil {
+ t.Errorf("Query failed: %s", err)
+ }
+ startLen := len(start)
+ for _, tt := range tests {
+ ts.SetNamespace(tt.namespace)
+
+ rel, err := ts.Delete(tt.key)
+ var rls *rspb.Release
+ if err == nil {
+ rls = convertReleaserToV1(t, rel)
+ }
+ if err != nil {
+ if !tt.err {
+ t.Fatalf("Failed %q to get '%s': %q\n", tt.desc, tt.key, err)
+ }
+ continue
+ } else if tt.err {
+ t.Fatalf("Did not get expected error for %q '%s'\n", tt.desc, tt.key)
+ } else if fmt.Sprintf("%s.v%d", rls.Name, rls.Version) != tt.key {
+ t.Fatalf("Asked for delete on %s, but deleted %d", tt.key, rls.Version)
+ }
+ _, err = ts.Get(tt.key)
+ if err == nil {
+ t.Errorf("Expected an error when asking for a deleted key")
+ }
+ }
+
+ // Make sure that the deleted records are gone.
+ ts.SetNamespace("")
+ end, err := ts.Query(map[string]string{"status": "deployed"})
+ if err != nil {
+ t.Errorf("Query failed: %s", err)
+ }
+ endLen := len(end)
+
+ if startLen-2 != endLen {
+ t.Errorf("expected end to be %d instead of %d", startLen-2, endLen)
+ for _, ee := range end {
+ rac, err := release.NewAccessor(ee)
+ assert.NoError(t, err, "unable to get release accessor")
+ t.Logf("Name: %s, Version: %d", rac.Name(), rac.Version())
+ }
+ }
+
+}
diff --git a/helm/pkg/storage/driver/mock_test.go b/helm/pkg/storage/driver/mock_test.go
new file mode 100644
index 000000000..e62b02f43
--- /dev/null
+++ b/helm/pkg/storage/driver/mock_test.go
@@ -0,0 +1,275 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v4/pkg/storage/driver"
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ sqlmock "github.com/DATA-DOG/go-sqlmock"
+ sq "github.com/Masterminds/squirrel"
+ "github.com/jmoiron/sqlx"
+
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kblabels "k8s.io/apimachinery/pkg/labels"
+ corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+
+ "helm.sh/helm/v4/pkg/release/common"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func releaseStub(name string, vers int, namespace string, status common.Status) *rspb.Release {
+ return &rspb.Release{
+ Name: name,
+ Version: vers,
+ Namespace: namespace,
+ Info: &rspb.Info{Status: status},
+ Labels: map[string]string{
+ "key1": "val1",
+ "key2": "val2",
+ },
+ }
+}
+
+func testKey(name string, vers int) string {
+ return fmt.Sprintf("%s.v%d", name, vers)
+}
+
+func tsFixtureMemory(t *testing.T) *Memory {
+ t.Helper()
+ hs := []*rspb.Release{
+ // rls-a
+ releaseStub("rls-a", 4, "default", common.StatusDeployed),
+ releaseStub("rls-a", 1, "default", common.StatusSuperseded),
+ releaseStub("rls-a", 3, "default", common.StatusSuperseded),
+ releaseStub("rls-a", 2, "default", common.StatusSuperseded),
+ // rls-b
+ releaseStub("rls-b", 4, "default", common.StatusDeployed),
+ releaseStub("rls-b", 1, "default", common.StatusSuperseded),
+ releaseStub("rls-b", 3, "default", common.StatusSuperseded),
+ releaseStub("rls-b", 2, "default", common.StatusSuperseded),
+ // rls-c in other namespace
+ releaseStub("rls-c", 4, "mynamespace", common.StatusDeployed),
+ releaseStub("rls-c", 1, "mynamespace", common.StatusSuperseded),
+ releaseStub("rls-c", 3, "mynamespace", common.StatusSuperseded),
+ releaseStub("rls-c", 2, "mynamespace", common.StatusSuperseded),
+ }
+
+ mem := NewMemory()
+ for _, tt := range hs {
+ err := mem.Create(testKey(tt.Name, tt.Version), tt)
+ if err != nil {
+ t.Fatalf("Test setup failed to create: %s\n", err)
+ }
+ }
+ return mem
+}
+
+// newTestFixtureCfgMaps initializes a MockConfigMapsInterface.
+// ConfigMaps are created for each release provided.
+func newTestFixtureCfgMaps(t *testing.T, releases ...*rspb.Release) *ConfigMaps {
+ t.Helper()
+ var mock MockConfigMapsInterface
+ mock.Init(t, releases...)
+
+ return NewConfigMaps(&mock)
+}
+
+// MockConfigMapsInterface mocks a kubernetes ConfigMapsInterface
+type MockConfigMapsInterface struct {
+ corev1.ConfigMapInterface
+
+ objects map[string]*v1.ConfigMap
+}
+
+// Init initializes the MockConfigMapsInterface with the set of releases.
+func (mock *MockConfigMapsInterface) Init(t *testing.T, releases ...*rspb.Release) {
+ t.Helper()
+ mock.objects = map[string]*v1.ConfigMap{}
+
+ for _, rls := range releases {
+ objkey := testKey(rls.Name, rls.Version)
+
+ cfgmap, err := newConfigMapsObject(objkey, rls, nil)
+ if err != nil {
+ t.Fatalf("Failed to create configmap: %s", err)
+ }
+ mock.objects[objkey] = cfgmap
+ }
+}
+
+// Get returns the ConfigMap by name.
+func (mock *MockConfigMapsInterface) Get(_ context.Context, name string, _ metav1.GetOptions) (*v1.ConfigMap, error) {
+ object, ok := mock.objects[name]
+ if !ok {
+ return nil, apierrors.NewNotFound(v1.Resource("tests"), name)
+ }
+ return object, nil
+}
+
+// List returns all ConfigMaps.
+func (mock *MockConfigMapsInterface) List(_ context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error) {
+ var list v1.ConfigMapList
+
+ labelSelector, err := kblabels.Parse(opts.LabelSelector)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, cfgmap := range mock.objects {
+ if labelSelector.Matches(kblabels.Set(cfgmap.Labels)) {
+ list.Items = append(list.Items, *cfgmap)
+ }
+ }
+ return &list, nil
+}
+
+// Create creates a new ConfigMap.
+func (mock *MockConfigMapsInterface) Create(_ context.Context, cfgmap *v1.ConfigMap, _ metav1.CreateOptions) (*v1.ConfigMap, error) {
+ name := cfgmap.Name
+ if object, ok := mock.objects[name]; ok {
+ return object, apierrors.NewAlreadyExists(v1.Resource("tests"), name)
+ }
+ mock.objects[name] = cfgmap
+ return cfgmap, nil
+}
+
+// Update updates a ConfigMap.
+func (mock *MockConfigMapsInterface) Update(_ context.Context, cfgmap *v1.ConfigMap, _ metav1.UpdateOptions) (*v1.ConfigMap, error) {
+ name := cfgmap.Name
+ if _, ok := mock.objects[name]; !ok {
+ return nil, apierrors.NewNotFound(v1.Resource("tests"), name)
+ }
+ mock.objects[name] = cfgmap
+ return cfgmap, nil
+}
+
+// Delete deletes a ConfigMap by name.
+func (mock *MockConfigMapsInterface) Delete(_ context.Context, name string, _ metav1.DeleteOptions) error {
+ if _, ok := mock.objects[name]; !ok {
+ return apierrors.NewNotFound(v1.Resource("tests"), name)
+ }
+ delete(mock.objects, name)
+ return nil
+}
+
+// newTestFixtureSecrets initializes a MockSecretsInterface.
+// Secrets are created for each release provided.
+func newTestFixtureSecrets(t *testing.T, releases ...*rspb.Release) *Secrets {
+ t.Helper()
+ var mock MockSecretsInterface
+ mock.Init(t, releases...)
+
+ return NewSecrets(&mock)
+}
+
+// MockSecretsInterface mocks a kubernetes SecretsInterface
+type MockSecretsInterface struct {
+ corev1.SecretInterface
+
+ objects map[string]*v1.Secret
+}
+
+// Init initializes the MockSecretsInterface with the set of releases.
+func (mock *MockSecretsInterface) Init(t *testing.T, releases ...*rspb.Release) {
+ t.Helper()
+ mock.objects = map[string]*v1.Secret{}
+
+ for _, rls := range releases {
+ objkey := testKey(rls.Name, rls.Version)
+
+ secret, err := newSecretsObject(objkey, rls, nil)
+ if err != nil {
+ t.Fatalf("Failed to create secret: %s", err)
+ }
+ mock.objects[objkey] = secret
+ }
+}
+
+// Get returns the Secret by name.
+func (mock *MockSecretsInterface) Get(_ context.Context, name string, _ metav1.GetOptions) (*v1.Secret, error) {
+ object, ok := mock.objects[name]
+ if !ok {
+ return nil, apierrors.NewNotFound(v1.Resource("tests"), name)
+ }
+ return object, nil
+}
+
+// List returns all Secrets.
+func (mock *MockSecretsInterface) List(_ context.Context, opts metav1.ListOptions) (*v1.SecretList, error) {
+ var list v1.SecretList
+
+ labelSelector, err := kblabels.Parse(opts.LabelSelector)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, secret := range mock.objects {
+ if labelSelector.Matches(kblabels.Set(secret.Labels)) {
+ list.Items = append(list.Items, *secret)
+ }
+ }
+ return &list, nil
+}
+
+// Create creates a new Secret.
+func (mock *MockSecretsInterface) Create(_ context.Context, secret *v1.Secret, _ metav1.CreateOptions) (*v1.Secret, error) {
+ name := secret.Name
+ if object, ok := mock.objects[name]; ok {
+ return object, apierrors.NewAlreadyExists(v1.Resource("tests"), name)
+ }
+ mock.objects[name] = secret
+ return secret, nil
+}
+
+// Update updates a Secret.
+func (mock *MockSecretsInterface) Update(_ context.Context, secret *v1.Secret, _ metav1.UpdateOptions) (*v1.Secret, error) {
+ name := secret.Name
+ if _, ok := mock.objects[name]; !ok {
+ return nil, apierrors.NewNotFound(v1.Resource("tests"), name)
+ }
+ mock.objects[name] = secret
+ return secret, nil
+}
+
+// Delete deletes a Secret by name.
+func (mock *MockSecretsInterface) Delete(_ context.Context, name string, _ metav1.DeleteOptions) error {
+ if _, ok := mock.objects[name]; !ok {
+ return apierrors.NewNotFound(v1.Resource("tests"), name)
+ }
+ delete(mock.objects, name)
+ return nil
+}
+
+// newTestFixtureSQL mocks the SQL database (for testing purposes)
+func newTestFixtureSQL(t *testing.T, _ ...*rspb.Release) (*SQL, sqlmock.Sqlmock) {
+ t.Helper()
+ sqlDB, mock, err := sqlmock.New()
+ if err != nil {
+ t.Fatalf("error when opening stub database connection: %v", err)
+ }
+
+ sqlxDB := sqlx.NewDb(sqlDB, "sqlmock")
+ return &SQL{
+ db: sqlxDB,
+ namespace: "default",
+ statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar),
+ }, mock
+}
diff --git a/helm/pkg/storage/driver/records.go b/helm/pkg/storage/driver/records.go
new file mode 100644
index 000000000..6b4efef3a
--- /dev/null
+++ b/helm/pkg/storage/driver/records.go
@@ -0,0 +1,124 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v4/pkg/storage/driver"
+
+import (
+ "sort"
+ "strconv"
+
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+// records holds a list of in-memory release records
+type records []*record
+
+func (rs records) Len() int { return len(rs) }
+func (rs records) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] }
+func (rs records) Less(i, j int) bool { return rs[i].rls.Version < rs[j].rls.Version }
+
+func (rs *records) Add(r *record) error {
+ if r == nil {
+ return nil
+ }
+
+ if rs.Exists(r.key) {
+ return ErrReleaseExists
+ }
+
+ *rs = append(*rs, r)
+ sort.Sort(*rs)
+
+ return nil
+}
+
+func (rs records) Get(key string) *record {
+ if i, ok := rs.Index(key); ok {
+ return rs[i]
+ }
+ return nil
+}
+
+func (rs *records) Iter(fn func(int, *record) bool) {
+ cp := make([]*record, len(*rs))
+ copy(cp, *rs)
+
+ for i, r := range cp {
+ if !fn(i, r) {
+ return
+ }
+ }
+}
+
+func (rs *records) Index(key string) (int, bool) {
+ for i, r := range *rs {
+ if r.key == key {
+ return i, true
+ }
+ }
+ return -1, false
+}
+
+func (rs records) Exists(key string) bool {
+ _, ok := rs.Index(key)
+ return ok
+}
+
+func (rs *records) Remove(key string) (r *record) {
+ if i, ok := rs.Index(key); ok {
+ return rs.removeAt(i)
+ }
+ return nil
+}
+
+func (rs *records) Replace(key string, rec *record) *record {
+ if i, ok := rs.Index(key); ok {
+ old := (*rs)[i]
+ (*rs)[i] = rec
+ return old
+ }
+ return nil
+}
+
+func (rs *records) removeAt(index int) *record {
+ r := (*rs)[index]
+ (*rs)[index] = nil
+ copy((*rs)[index:], (*rs)[index+1:])
+ *rs = (*rs)[:len(*rs)-1]
+ return r
+}
+
+// record is the data structure used to cache releases
+// for the in-memory storage driver
+type record struct {
+ key string
+ lbs labels
+ rls *rspb.Release
+}
+
+// newRecord creates a new in-memory release record
+func newRecord(key string, rls *rspb.Release) *record {
+ var lbs labels
+
+ lbs.init()
+ lbs.set("name", rls.Name)
+ lbs.set("owner", "helm")
+ lbs.set("status", rls.Info.Status.String())
+ lbs.set("version", strconv.Itoa(rls.Version))
+
+ // return &record{key: key, lbs: lbs, rls: proto.Clone(rls).(*rspb.Release)}
+ return &record{key: key, lbs: lbs, rls: rls}
+}
diff --git a/helm/pkg/storage/driver/records_test.go b/helm/pkg/storage/driver/records_test.go
new file mode 100644
index 000000000..24e4ccb4e
--- /dev/null
+++ b/helm/pkg/storage/driver/records_test.go
@@ -0,0 +1,240 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v4/pkg/storage/driver"
+
+import (
+ "reflect"
+ "testing"
+
+ "helm.sh/helm/v4/pkg/release/common"
+)
+
+func TestRecordsAdd(t *testing.T) {
+ rs := records([]*record{
+ newRecord("rls-a.v1", releaseStub("rls-a", 1, "default", common.StatusSuperseded)),
+ newRecord("rls-a.v2", releaseStub("rls-a", 2, "default", common.StatusDeployed)),
+ })
+
+ var tests = []struct {
+ desc string
+ key string
+ ok bool
+ rec *record
+ }{
+ {
+ "add valid key",
+ "rls-a.v3",
+ false,
+ newRecord("rls-a.v3", releaseStub("rls-a", 3, "default", common.StatusSuperseded)),
+ },
+ {
+ "add already existing key",
+ "rls-a.v1",
+ true,
+ newRecord("rls-a.v1", releaseStub("rls-a", 1, "default", common.StatusDeployed)),
+ },
+ }
+
+ for _, tt := range tests {
+ if err := rs.Add(tt.rec); err != nil {
+ if !tt.ok {
+ t.Fatalf("failed: %q: %s\n", tt.desc, err)
+ }
+ }
+ }
+}
+
+func TestRecordsRemove(t *testing.T) {
+ var tests = []struct {
+ desc string
+ key string
+ ok bool
+ }{
+ {"remove valid key", "rls-a.v1", false},
+ {"remove invalid key", "rls-a.v", true},
+ {"remove non-existent key", "rls-z.v1", true},
+ }
+
+ rs := records([]*record{
+ newRecord("rls-a.v1", releaseStub("rls-a", 1, "default", common.StatusSuperseded)),
+ newRecord("rls-a.v2", releaseStub("rls-a", 2, "default", common.StatusDeployed)),
+ })
+
+ startLen := rs.Len()
+
+ for _, tt := range tests {
+ if r := rs.Remove(tt.key); r == nil {
+ if !tt.ok {
+ t.Fatalf("Failed to %q (key = %s). Expected nil, got %v",
+ tt.desc,
+ tt.key,
+ r,
+ )
+ }
+ }
+ }
+
+ // We expect the total number of records will be less now than there were
+ // when we started.
+ endLen := rs.Len()
+ if endLen >= startLen {
+ t.Errorf("expected ending length %d to be less than starting length %d", endLen, startLen)
+ }
+}
+
+func TestRecordsRemoveAt(t *testing.T) {
+ rs := records([]*record{
+ newRecord("rls-a.v1", releaseStub("rls-a", 1, "default", common.StatusSuperseded)),
+ newRecord("rls-a.v2", releaseStub("rls-a", 2, "default", common.StatusDeployed)),
+ })
+
+ if len(rs) != 2 {
+ t.Fatal("Expected len=2 for mock")
+ }
+
+ rs.Remove("rls-a.v1")
+ if len(rs) != 1 {
+ t.Fatalf("Expected length of rs to be 1, got %d", len(rs))
+ }
+}
+
+func TestRecordsGet(t *testing.T) {
+ rs := records([]*record{
+ newRecord("rls-a.v1", releaseStub("rls-a", 1, "default", common.StatusSuperseded)),
+ newRecord("rls-a.v2", releaseStub("rls-a", 2, "default", common.StatusDeployed)),
+ })
+
+ var tests = []struct {
+ desc string
+ key string
+ rec *record
+ }{
+ {
+ "get valid key",
+ "rls-a.v1",
+ newRecord("rls-a.v1", releaseStub("rls-a", 1, "default", common.StatusSuperseded)),
+ },
+ {
+ "get invalid key",
+ "rls-a.v3",
+ nil,
+ },
+ }
+
+ for _, tt := range tests {
+ got := rs.Get(tt.key)
+ if !reflect.DeepEqual(tt.rec, got) {
+ t.Fatalf("Expected %v, got %v", tt.rec, got)
+ }
+ }
+}
+
+func TestRecordsIndex(t *testing.T) {
+ rs := records([]*record{
+ newRecord("rls-a.v1", releaseStub("rls-a", 1, "default", common.StatusSuperseded)),
+ newRecord("rls-a.v2", releaseStub("rls-a", 2, "default", common.StatusDeployed)),
+ })
+
+ var tests = []struct {
+ desc string
+ key string
+ sort int
+ }{
+ {
+ "get valid key",
+ "rls-a.v1",
+ 0,
+ },
+ {
+ "get invalid key",
+ "rls-a.v3",
+ -1,
+ },
+ }
+
+ for _, tt := range tests {
+ got, _ := rs.Index(tt.key)
+ if got != tt.sort {
+ t.Fatalf("Expected %d, got %d", tt.sort, got)
+ }
+ }
+}
+
+func TestRecordsExists(t *testing.T) {
+ rs := records([]*record{
+ newRecord("rls-a.v1", releaseStub("rls-a", 1, "default", common.StatusSuperseded)),
+ newRecord("rls-a.v2", releaseStub("rls-a", 2, "default", common.StatusDeployed)),
+ })
+
+ var tests = []struct {
+ desc string
+ key string
+ ok bool
+ }{
+ {
+ "get valid key",
+ "rls-a.v1",
+ true,
+ },
+ {
+ "get invalid key",
+ "rls-a.v3",
+ false,
+ },
+ }
+
+ for _, tt := range tests {
+ got := rs.Exists(tt.key)
+ if got != tt.ok {
+ t.Fatalf("Expected %t, got %t", tt.ok, got)
+ }
+ }
+}
+
+func TestRecordsReplace(t *testing.T) {
+ rs := records([]*record{
+ newRecord("rls-a.v1", releaseStub("rls-a", 1, "default", common.StatusSuperseded)),
+ newRecord("rls-a.v2", releaseStub("rls-a", 2, "default", common.StatusDeployed)),
+ })
+
+ var tests = []struct {
+ desc string
+ key string
+ rec *record
+ expected *record
+ }{
+ {
+ "replace with existing key",
+ "rls-a.v2",
+ newRecord("rls-a.v3", releaseStub("rls-a", 3, "default", common.StatusSuperseded)),
+ newRecord("rls-a.v2", releaseStub("rls-a", 2, "default", common.StatusDeployed)),
+ },
+ {
+ "replace with non existing key",
+ "rls-a.v4",
+ newRecord("rls-a.v4", releaseStub("rls-a", 4, "default", common.StatusDeployed)),
+ nil,
+ },
+ }
+
+ for _, tt := range tests {
+ got := rs.Replace(tt.key, tt.rec)
+ if !reflect.DeepEqual(tt.expected, got) {
+ t.Fatalf("Expected %v, got %v", tt.expected, got)
+ }
+ }
+}
diff --git a/helm/pkg/storage/driver/secrets.go b/helm/pkg/storage/driver/secrets.go
new file mode 100644
index 000000000..a73f3cf05
--- /dev/null
+++ b/helm/pkg/storage/driver/secrets.go
@@ -0,0 +1,287 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v4/pkg/storage/driver"
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "strconv"
+ "strings"
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kblabels "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/util/validation"
+ corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+
+ "helm.sh/helm/v4/internal/logging"
+ "helm.sh/helm/v4/pkg/release"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+var _ Driver = (*Secrets)(nil)
+
+// SecretsDriverName is the string name of the driver.
+const SecretsDriverName = "Secret"
+
+// Secrets is a wrapper around an implementation of a kubernetes
+// SecretsInterface.
+type Secrets struct {
+ impl corev1.SecretInterface
+ // Embed a LogHolder to provide logger functionality
+ logging.LogHolder
+}
+
+// NewSecrets initializes a new Secrets wrapping an implementation of
+// the kubernetes SecretsInterface.
+func NewSecrets(impl corev1.SecretInterface) *Secrets {
+ s := &Secrets{
+ impl: impl,
+ }
+ s.SetLogger(slog.Default().Handler())
+ return s
+}
+
+// Name returns the name of the driver.
+func (secrets *Secrets) Name() string {
+ return SecretsDriverName
+}
+
+// Get fetches the release named by key. The corresponding release is returned
+// or error if not found.
+func (secrets *Secrets) Get(key string) (release.Releaser, error) {
+ // fetch the secret holding the release named by key
+ obj, err := secrets.impl.Get(context.Background(), key, metav1.GetOptions{})
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return nil, ErrReleaseNotFound
+ }
+ return nil, fmt.Errorf("get: failed to get %q: %w", key, err)
+ }
+ // found the secret, decode the base64 data string
+ r, err := decodeRelease(string(obj.Data["release"]))
+ if err != nil {
+ return r, fmt.Errorf("get: failed to decode data %q: %w", key, err)
+ }
+ r.Labels = filterSystemLabels(obj.Labels)
+ return r, nil
+}
+
+// List fetches all releases and returns the list releases such
+// that filter(release) == true. An error is returned if the
+// secret fails to retrieve the releases.
+func (secrets *Secrets) List(filter func(release.Releaser) bool) ([]release.Releaser, error) {
+ lsel := kblabels.Set{"owner": "helm"}.AsSelector()
+ opts := metav1.ListOptions{LabelSelector: lsel.String()}
+
+ list, err := secrets.impl.List(context.Background(), opts)
+ if err != nil {
+ return nil, fmt.Errorf("list: failed to list: %w", err)
+ }
+
+ var results []release.Releaser
+
+ // iterate over the secrets object list
+ // and decode each release
+ for _, item := range list.Items {
+ rls, err := decodeRelease(string(item.Data["release"]))
+ if err != nil {
+ secrets.Logger().Debug(
+ "list failed to decode release", slog.String("key", item.Name),
+ slog.Any("error", err),
+ )
+ continue
+ }
+
+ rls.Labels = item.Labels
+
+ if filter(rls) {
+ results = append(results, rls)
+ }
+ }
+ return results, nil
+}
+
+// Query fetches all releases that match the provided map of labels.
+// An error is returned if the secret fails to retrieve the releases.
+func (secrets *Secrets) Query(labels map[string]string) ([]release.Releaser, error) {
+ ls := kblabels.Set{}
+ for k, v := range labels {
+ if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
+ return nil, fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; "))
+ }
+ ls[k] = v
+ }
+
+ opts := metav1.ListOptions{LabelSelector: ls.AsSelector().String()}
+
+ list, err := secrets.impl.List(context.Background(), opts)
+ if err != nil {
+ return nil, fmt.Errorf("query: failed to query with labels: %w", err)
+ }
+
+ if len(list.Items) == 0 {
+ return nil, ErrReleaseNotFound
+ }
+
+ var results []release.Releaser
+ for _, item := range list.Items {
+ rls, err := decodeRelease(string(item.Data["release"]))
+ if err != nil {
+ secrets.Logger().Debug(
+ "failed to decode release",
+ slog.String("key", item.Name),
+ slog.Any("error", err),
+ )
+ continue
+ }
+ rls.Labels = item.Labels
+ results = append(results, rls)
+ }
+ return results, nil
+}
+
+// Create creates a new Secret holding the release. If the
+// Secret already exists, ErrReleaseExists is returned.
+func (secrets *Secrets) Create(key string, rel release.Releaser) error {
+ // set labels for secrets object meta data
+ var lbs labels
+
+ rls, err := releaserToV1Release(rel)
+ if err != nil {
+ return err
+ }
+
+ lbs.init()
+ lbs.fromMap(rls.Labels)
+ lbs.set("createdAt", fmt.Sprintf("%v", time.Now().Unix()))
+
+ // create a new secret to hold the release
+ obj, err := newSecretsObject(key, rls, lbs)
+ if err != nil {
+ return fmt.Errorf("create: failed to encode release %q: %w", rls.Name, err)
+ }
+ // push the secret object out into the kubiverse
+ if _, err := secrets.impl.Create(context.Background(), obj, metav1.CreateOptions{}); err != nil {
+ if apierrors.IsAlreadyExists(err) {
+ return ErrReleaseExists
+ }
+
+ return fmt.Errorf("create: failed to create: %w", err)
+ }
+ return nil
+}
+
+// Update updates the Secret holding the release. If not found
+// the Secret is created to hold the release.
+func (secrets *Secrets) Update(key string, rel release.Releaser) error {
+ // set labels for secrets object meta data
+ var lbs labels
+
+ rls, err := releaserToV1Release(rel)
+ if err != nil {
+ return err
+ }
+
+ lbs.init()
+ lbs.fromMap(rls.Labels)
+ lbs.set("modifiedAt", fmt.Sprintf("%v", time.Now().Unix()))
+
+ // create a new secret object to hold the release
+ obj, err := newSecretsObject(key, rls, lbs)
+ if err != nil {
+ return fmt.Errorf("update: failed to encode release %q: %w", rls.Name, err)
+ }
+ // push the secret object out into the kubiverse
+ _, err = secrets.impl.Update(context.Background(), obj, metav1.UpdateOptions{})
+ if err != nil {
+ return fmt.Errorf("update: failed to update: %w", err)
+ }
+ return nil
+}
+
+// Delete deletes the Secret holding the release named by key.
+func (secrets *Secrets) Delete(key string) (rls release.Releaser, err error) {
+ // fetch the release to check existence
+ if rls, err = secrets.Get(key); err != nil {
+ return nil, err
+ }
+ // delete the release
+ err = secrets.impl.Delete(context.Background(), key, metav1.DeleteOptions{})
+ if err != nil {
+ return nil, err
+ }
+ return rls, nil
+}
+
+// newSecretsObject constructs a kubernetes Secret object
+// to store a release. Each secret data entry is the base64
+// encoded gzipped string of a release.
+//
+// The following labels are used within each secret:
+//
+// "modifiedAt" - timestamp indicating when this secret was last modified. (set in Update)
+// "createdAt" - timestamp indicating when this secret was created. (set in Create)
+// "version" - version of the release.
+// "status" - status of the release (see pkg/release/status.go for variants)
+// "owner" - owner of the secret, currently "helm".
+// "name" - name of the release.
+func newSecretsObject(key string, rls *rspb.Release, lbs labels) (*v1.Secret, error) {
+ const owner = "helm"
+
+ // encode the release
+ s, err := encodeRelease(rls)
+ if err != nil {
+ return nil, err
+ }
+
+ if lbs == nil {
+ lbs.init()
+ }
+
+ // apply custom labels
+ lbs.fromMap(rls.Labels)
+
+ // apply labels
+ lbs.set("name", rls.Name)
+ lbs.set("owner", owner)
+ lbs.set("status", rls.Info.Status.String())
+ lbs.set("version", strconv.Itoa(rls.Version))
+
+ // create and return secret object.
+ // Helm 3 introduced setting the 'Type' field
+ // in the Kubernetes storage object.
+ // Helm defines the field content as follows:
+ // /.v
+ // Type field for Helm 3: helm.sh/release.v1
+ // Note: Version starts at 'v1' for Helm 3 and
+ // should be incremented if the release object
+ // metadata is modified.
+ // This would potentially be a breaking change
+ // and should only happen between major versions.
+ return &v1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: key,
+ Labels: lbs.toMap(),
+ },
+ Type: "helm.sh/release.v1",
+ Data: map[string][]byte{"release": []byte(s)},
+ }, nil
+}
diff --git a/helm/pkg/storage/driver/secrets_test.go b/helm/pkg/storage/driver/secrets_test.go
new file mode 100644
index 000000000..f4aa1176c
--- /dev/null
+++ b/helm/pkg/storage/driver/secrets_test.go
@@ -0,0 +1,256 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "reflect"
+ "testing"
+
+ v1 "k8s.io/api/core/v1"
+
+ "helm.sh/helm/v4/pkg/release"
+ "helm.sh/helm/v4/pkg/release/common"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+func TestSecretName(t *testing.T) {
+ c := newTestFixtureSecrets(t)
+ if c.Name() != SecretsDriverName {
+ t.Errorf("Expected name to be %q, got %q", SecretsDriverName, c.Name())
+ }
+}
+
+func TestSecretGet(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ secrets := newTestFixtureSecrets(t, []*rspb.Release{rel}...)
+
+ // get release with key
+ got, err := secrets.Get(key)
+ if err != nil {
+ t.Fatalf("Failed to get release: %s", err)
+ }
+ // compare fetched release with original
+ if !reflect.DeepEqual(rel, got) {
+ t.Errorf("Expected {%v}, got {%v}", rel, got)
+ }
+}
+
+func TestUNcompressedSecretGet(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ // Create a test fixture which contains an uncompressed release
+ secret, err := newSecretsObject(key, rel, nil)
+ if err != nil {
+ t.Fatalf("Failed to create secret: %s", err)
+ }
+ b, err := json.Marshal(rel)
+ if err != nil {
+ t.Fatalf("Failed to marshal release: %s", err)
+ }
+ secret.Data["release"] = []byte(base64.StdEncoding.EncodeToString(b))
+ var mock MockSecretsInterface
+ mock.objects = map[string]*v1.Secret{key: secret}
+ secrets := NewSecrets(&mock)
+
+ // get release with key
+ got, err := secrets.Get(key)
+ if err != nil {
+ t.Fatalf("Failed to get release: %s", err)
+ }
+ // compare fetched release with original
+ if !reflect.DeepEqual(rel, got) {
+ t.Errorf("Expected {%v}, got {%v}", rel, got)
+ }
+}
+
+func TestSecretList(t *testing.T) {
+ secrets := newTestFixtureSecrets(t, []*rspb.Release{
+ releaseStub("key-1", 1, "default", common.StatusUninstalled),
+ releaseStub("key-2", 1, "default", common.StatusUninstalled),
+ releaseStub("key-3", 1, "default", common.StatusDeployed),
+ releaseStub("key-4", 1, "default", common.StatusDeployed),
+ releaseStub("key-5", 1, "default", common.StatusSuperseded),
+ releaseStub("key-6", 1, "default", common.StatusSuperseded),
+ }...)
+
+ // list all deleted releases
+ del, err := secrets.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusUninstalled
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list deleted: %s", err)
+ }
+ if len(del) != 2 {
+ t.Errorf("Expected 2 deleted, got %d:\n%v\n", len(del), del)
+ }
+
+ // list all deployed releases
+ dpl, err := secrets.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusDeployed
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list deployed: %s", err)
+ }
+ if len(dpl) != 2 {
+ t.Errorf("Expected 2 deployed, got %d", len(dpl))
+ }
+
+ // list all superseded releases
+ ssd, err := secrets.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusSuperseded
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list superseded: %s", err)
+ }
+ if len(ssd) != 2 {
+ t.Errorf("Expected 2 superseded, got %d", len(ssd))
+ }
+ // Check if release having both system and custom labels, this is needed to ensure that selector filtering would work.
+ rls := convertReleaserToV1(t, ssd[0])
+ _, ok := rls.Labels["name"]
+ if !ok {
+ t.Fatalf("Expected 'name' label in results, actual %v", rls.Labels)
+ }
+ _, ok = rls.Labels["key1"]
+ if !ok {
+ t.Fatalf("Expected 'key1' label in results, actual %v", rls.Labels)
+ }
+}
+
+func TestSecretQuery(t *testing.T) {
+ secrets := newTestFixtureSecrets(t, []*rspb.Release{
+ releaseStub("key-1", 1, "default", common.StatusUninstalled),
+ releaseStub("key-2", 1, "default", common.StatusUninstalled),
+ releaseStub("key-3", 1, "default", common.StatusDeployed),
+ releaseStub("key-4", 1, "default", common.StatusDeployed),
+ releaseStub("key-5", 1, "default", common.StatusSuperseded),
+ releaseStub("key-6", 1, "default", common.StatusSuperseded),
+ }...)
+
+ rls, err := secrets.Query(map[string]string{"status": "deployed"})
+ if err != nil {
+ t.Fatalf("Failed to query: %s", err)
+ }
+ if len(rls) != 2 {
+ t.Fatalf("Expected 2 results, actual %d", len(rls))
+ }
+
+ _, err = secrets.Query(map[string]string{"name": "notExist"})
+ if err != ErrReleaseNotFound {
+ t.Errorf("Expected {%v}, got {%v}", ErrReleaseNotFound, err)
+ }
+}
+
+func TestSecretCreate(t *testing.T) {
+ secrets := newTestFixtureSecrets(t)
+
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ // store the release in a secret
+ if err := secrets.Create(key, rel); err != nil {
+ t.Fatalf("Failed to create release with key %q: %s", key, err)
+ }
+
+ // get the release back
+ got, err := secrets.Get(key)
+ if err != nil {
+ t.Fatalf("Failed to get release with key %q: %s", key, err)
+ }
+
+ // compare created release with original
+ if !reflect.DeepEqual(rel, got) {
+ t.Errorf("Expected {%v}, got {%v}", rel, got)
+ }
+}
+
+func TestSecretUpdate(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ secrets := newTestFixtureSecrets(t, []*rspb.Release{rel}...)
+
+ // modify release status code
+ rel.Info.Status = common.StatusSuperseded
+
+ // perform the update
+ if err := secrets.Update(key, rel); err != nil {
+ t.Fatalf("Failed to update release: %s", err)
+ }
+
+ // fetch the updated release
+ goti, err := secrets.Get(key)
+ if err != nil {
+ t.Fatalf("Failed to get release with key %q: %s", key, err)
+ }
+ got := convertReleaserToV1(t, goti)
+
+ // check release has actually been updated by comparing modified fields
+ if rel.Info.Status != got.Info.Status {
+ t.Errorf("Expected status %s, got status %s", rel.Info.Status.String(), got.Info.Status.String())
+ }
+}
+
+func TestSecretDelete(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ secrets := newTestFixtureSecrets(t, []*rspb.Release{rel}...)
+
+ // perform the delete on a non-existing release
+ _, err := secrets.Delete("nonexistent")
+ if err != ErrReleaseNotFound {
+ t.Fatalf("Expected ErrReleaseNotFound, got: {%v}", err)
+ }
+
+ // perform the delete
+ rls, err := secrets.Delete(key)
+ if err != nil {
+ t.Fatalf("Failed to delete release with key %q: %s", key, err)
+ }
+ if !reflect.DeepEqual(rel, rls) {
+ t.Errorf("Expected {%v}, got {%v}", rel, rls)
+ }
+ _, err = secrets.Get(key)
+ if !errors.Is(err, ErrReleaseNotFound) {
+ t.Errorf("Expected {%v}, got {%v}", ErrReleaseNotFound, err)
+ }
+}
diff --git a/helm/pkg/storage/driver/sql.go b/helm/pkg/storage/driver/sql.go
new file mode 100644
index 000000000..21d9f6679
--- /dev/null
+++ b/helm/pkg/storage/driver/sql.go
@@ -0,0 +1,727 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v4/pkg/storage/driver"
+
+import (
+ "fmt"
+ "log/slog"
+ "maps"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/jmoiron/sqlx"
+ migrate "github.com/rubenv/sql-migrate"
+
+ sq "github.com/Masterminds/squirrel"
+
+ // Import pq for postgres dialect
+ _ "github.com/lib/pq"
+
+ "helm.sh/helm/v4/internal/logging"
+ "helm.sh/helm/v4/pkg/release"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+var _ Driver = (*SQL)(nil)
+
+var labelMap = map[string]struct{}{
+ "modifiedAt": {},
+ "createdAt": {},
+ "version": {},
+ "status": {},
+ "owner": {},
+ "name": {},
+}
+
+const postgreSQLDialect = "postgres"
+
+// SQLDriverName is the string name of this driver.
+const SQLDriverName = "SQL"
+
+const sqlReleaseTableName = "releases_v1"
+const sqlCustomLabelsTableName = "custom_labels_v1"
+
+const (
+ sqlReleaseTableKeyColumn = "key"
+ sqlReleaseTableTypeColumn = "type"
+ sqlReleaseTableBodyColumn = "body"
+ sqlReleaseTableNameColumn = "name"
+ sqlReleaseTableNamespaceColumn = "namespace"
+ sqlReleaseTableVersionColumn = "version"
+ sqlReleaseTableStatusColumn = "status"
+ sqlReleaseTableOwnerColumn = "owner"
+ sqlReleaseTableCreatedAtColumn = "createdAt"
+ sqlReleaseTableModifiedAtColumn = "modifiedAt"
+
+ sqlCustomLabelsTableReleaseKeyColumn = "releaseKey"
+ sqlCustomLabelsTableReleaseNamespaceColumn = "releaseNamespace"
+ sqlCustomLabelsTableKeyColumn = "key"
+ sqlCustomLabelsTableValueColumn = "value"
+)
+
+// Following limits based on k8s labels limits - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set
+const (
+ sqlCustomLabelsTableKeyMaxLength = 253 + 1 + 63
+ sqlCustomLabelsTableValueMaxLength = 63
+)
+
+const (
+ sqlReleaseDefaultOwner = "helm"
+ sqlReleaseDefaultType = "helm.sh/release.v1"
+)
+
+// SQL is the sql storage driver implementation.
+type SQL struct {
+ db *sqlx.DB
+ namespace string
+ statementBuilder sq.StatementBuilderType
+ // Embed a LogHolder to provide logger functionality
+ logging.LogHolder
+}
+
+// Name returns the name of the driver.
+func (s *SQL) Name() string {
+ return SQLDriverName
+}
+
+// Check if all migrations al
+func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool {
+ // make map (set) of ids for fast search
+ migrationsIDs := make(map[string]struct{})
+ for _, migration := range migrations {
+ migrationsIDs[migration.Id] = struct{}{}
+ }
+
+ // get list of applied migrations
+ migrate.SetDisableCreateTable(true)
+ records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect)
+ migrate.SetDisableCreateTable(false)
+ if err != nil {
+ s.Logger().Debug("failed to get migration records", slog.Any("error", err))
+ return false
+ }
+
+ for _, record := range records {
+ if _, ok := migrationsIDs[record.Id]; ok {
+ s.Logger().Debug("found previous migration", "id", record.Id, "appliedAt", record.AppliedAt)
+ delete(migrationsIDs, record.Id)
+ }
+ }
+
+ // check if all migrations applied
+ if len(migrationsIDs) != 0 {
+ for id := range migrationsIDs {
+ s.Logger().Debug("find unapplied migration", "id", id)
+ }
+ return false
+ }
+ return true
+}
+
+func (s *SQL) ensureDBSetup() error {
+
+ migrations := &migrate.MemoryMigrationSource{
+ Migrations: []*migrate.Migration{
+ {
+ Id: "init",
+ Up: []string{
+ fmt.Sprintf(`
+ CREATE TABLE %s (
+ %s VARCHAR(90),
+ %s VARCHAR(64) NOT NULL,
+ %s TEXT NOT NULL,
+ %s VARCHAR(64) NOT NULL,
+ %s VARCHAR(64) NOT NULL,
+ %s INTEGER NOT NULL,
+ %s TEXT NOT NULL,
+ %s TEXT NOT NULL,
+ %s INTEGER NOT NULL,
+ %s INTEGER NOT NULL DEFAULT 0,
+ PRIMARY KEY(%s, %s)
+ );
+ CREATE INDEX ON %s (%s, %s);
+ CREATE INDEX ON %s (%s);
+ CREATE INDEX ON %s (%s);
+ CREATE INDEX ON %s (%s);
+ CREATE INDEX ON %s (%s);
+ CREATE INDEX ON %s (%s);
+
+ GRANT ALL ON %s TO PUBLIC;
+
+ ALTER TABLE %s ENABLE ROW LEVEL SECURITY;
+ `,
+ sqlReleaseTableName,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableTypeColumn,
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableNameColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableVersionColumn,
+ sqlReleaseTableStatusColumn,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableCreatedAtColumn,
+ sqlReleaseTableModifiedAtColumn,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableVersionColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableStatusColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableCreatedAtColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableModifiedAtColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableName,
+ ),
+ },
+ Down: []string{
+ fmt.Sprintf(`
+ DROP TABLE %s;
+ `, sqlReleaseTableName),
+ },
+ },
+ {
+ Id: "custom_labels",
+ Up: []string{
+ fmt.Sprintf(`
+ CREATE TABLE %s (
+ %s VARCHAR(64),
+ %s VARCHAR(67),
+ %s VARCHAR(%d),
+ %s VARCHAR(%d)
+ );
+ CREATE INDEX ON %s (%s, %s);
+
+ GRANT ALL ON %s TO PUBLIC;
+ ALTER TABLE %s ENABLE ROW LEVEL SECURITY;
+ `,
+ sqlCustomLabelsTableName,
+ sqlCustomLabelsTableReleaseKeyColumn,
+ sqlCustomLabelsTableReleaseNamespaceColumn,
+ sqlCustomLabelsTableKeyColumn,
+ sqlCustomLabelsTableKeyMaxLength,
+ sqlCustomLabelsTableValueColumn,
+ sqlCustomLabelsTableValueMaxLength,
+ sqlCustomLabelsTableName,
+ sqlCustomLabelsTableReleaseKeyColumn,
+ sqlCustomLabelsTableReleaseNamespaceColumn,
+ sqlCustomLabelsTableName,
+ sqlCustomLabelsTableName,
+ ),
+ },
+ Down: []string{
+ fmt.Sprintf(`
+ DELETE TABLE %s;
+ `, sqlCustomLabelsTableName),
+ },
+ },
+ },
+ }
+
+ // Check that init migration already applied
+ if s.checkAlreadyApplied(migrations.Migrations) {
+ return nil
+ }
+
+ // Populate the database with the relations we need if they don't exist yet
+ _, err := migrate.Exec(s.db.DB, postgreSQLDialect, migrations, migrate.Up)
+ return err
+}
+
+// SQLReleaseWrapper describes how Helm releases are stored in an SQL database
+type SQLReleaseWrapper struct {
+ // The primary key, made of {release-name}.{release-version}
+ Key string `db:"key"`
+
+ // See https://github.com/helm/helm/blob/c9fe3d118caec699eb2565df9838673af379ce12/pkg/storage/driver/secrets.go#L231
+ Type string `db:"type"`
+
+ // The rspb.Release body, as a base64-encoded string
+ Body string `db:"body"`
+
+ // Release "labels" that can be used as filters in the storage.Query(labels map[string]string)
+ // we implemented. Note that allowing Helm users to filter against new dimensions will require a
+ // new migration to be added, and the Create and/or update functions to be updated accordingly.
+ Name string `db:"name"`
+ Namespace string `db:"namespace"`
+ Version int `db:"version"`
+ Status string `db:"status"`
+ Owner string `db:"owner"`
+ CreatedAt int `db:"createdAt"`
+ ModifiedAt int `db:"modifiedAt"`
+}
+
+type SQLReleaseCustomLabelWrapper struct {
+ ReleaseKey string `db:"release_key"`
+ ReleaseNamespace string `db:"release_namespace"`
+ Key string `db:"key"`
+ Value string `db:"value"`
+}
+
+// NewSQL initializes a new sql driver.
+func NewSQL(connectionString string, namespace string) (*SQL, error) {
+ db, err := sqlx.Connect(postgreSQLDialect, connectionString)
+ if err != nil {
+ return nil, err
+ }
+
+ driver := &SQL{
+ db: db,
+ statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar),
+ }
+
+ if err := driver.ensureDBSetup(); err != nil {
+ return nil, err
+ }
+
+ driver.namespace = namespace
+ driver.SetLogger(slog.Default().Handler())
+
+ return driver, nil
+}
+
+// Get returns the release named by key.
+func (s *SQL) Get(key string) (release.Releaser, error) {
+ var record SQLReleaseWrapper
+
+ qb := s.statementBuilder.
+ Select(sqlReleaseTableBodyColumn).
+ From(sqlReleaseTableName).
+ Where(sq.Eq{sqlReleaseTableKeyColumn: key}).
+ Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace})
+
+ query, args, err := qb.ToSql()
+ if err != nil {
+ s.Logger().Debug("failed to build query", slog.Any("error", err))
+ return nil, err
+ }
+
+ // Get will return an error if the result is empty
+ if err := s.db.Get(&record, query, args...); err != nil {
+ s.Logger().Debug("got SQL error when getting release", slog.String("key", key), slog.Any("error", err))
+ return nil, ErrReleaseNotFound
+ }
+
+ release, err := decodeRelease(record.Body)
+ if err != nil {
+ s.Logger().Debug("failed to decode data", slog.String("key", key), slog.Any("error", err))
+ return nil, err
+ }
+
+ if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil {
+ s.Logger().Debug(
+ "failed to get release custom labels",
+ slog.String("namespace", s.namespace),
+ slog.String("key", key),
+ slog.Any("error", err),
+ )
+ return nil, err
+ }
+
+ return release, nil
+}
+
+// List returns the list of all releases such that filter(release) == true
+func (s *SQL) List(filter func(release.Releaser) bool) ([]release.Releaser, error) {
+ sb := s.statementBuilder.
+ Select(sqlReleaseTableKeyColumn, sqlReleaseTableNamespaceColumn, sqlReleaseTableBodyColumn).
+ From(sqlReleaseTableName).
+ Where(sq.Eq{sqlReleaseTableOwnerColumn: sqlReleaseDefaultOwner})
+
+ // If a namespace was specified, we only list releases from that namespace
+ if s.namespace != "" {
+ sb = sb.Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace})
+ }
+
+ query, args, err := sb.ToSql()
+ if err != nil {
+ s.Logger().Debug("failed to build query", slog.Any("error", err))
+ return nil, err
+ }
+
+ var records = []SQLReleaseWrapper{}
+ if err := s.db.Select(&records, query, args...); err != nil {
+ s.Logger().Debug("failed to list", slog.Any("error", err))
+ return nil, err
+ }
+
+ var releases []release.Releaser
+ for _, record := range records {
+ release, err := decodeRelease(record.Body)
+ if err != nil {
+ s.Logger().Debug("failed to decode release", slog.Any("record", record), slog.Any("error", err))
+ continue
+ }
+
+ if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil {
+ s.Logger().Debug(
+ "failed to get release custom labels",
+ slog.String("namespace", record.Namespace),
+ slog.String("key", record.Key),
+ slog.Any("error", err),
+ )
+ return nil, err
+ }
+ maps.Copy(release.Labels, getReleaseSystemLabels(release))
+
+ if filter(release) {
+ releases = append(releases, release)
+ }
+ }
+
+ return releases, nil
+}
+
+// Query returns the set of releases that match the provided set of labels.
+func (s *SQL) Query(labels map[string]string) ([]release.Releaser, error) {
+ sb := s.statementBuilder.
+ Select(sqlReleaseTableKeyColumn, sqlReleaseTableNamespaceColumn, sqlReleaseTableBodyColumn).
+ From(sqlReleaseTableName)
+
+ keys := make([]string, 0, len(labels))
+ for key := range labels {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ for _, key := range keys {
+ if _, ok := labelMap[key]; ok {
+ sb = sb.Where(sq.Eq{key: labels[key]})
+ } else {
+ s.Logger().Debug("unknown label", "key", key)
+ return nil, fmt.Errorf("unknown label %s", key)
+ }
+ }
+
+ // If a namespace was specified, we only list releases from that namespace
+ if s.namespace != "" {
+ sb = sb.Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace})
+ }
+
+ // Build our query
+ query, args, err := sb.ToSql()
+ if err != nil {
+ s.Logger().Debug("failed to build query", slog.Any("error", err))
+ return nil, err
+ }
+
+ var records = []SQLReleaseWrapper{}
+ if err := s.db.Select(&records, query, args...); err != nil {
+ s.Logger().Debug("failed to query with labels", slog.Any("error", err))
+ return nil, err
+ }
+
+ if len(records) == 0 {
+ return nil, ErrReleaseNotFound
+ }
+
+ var releases []release.Releaser
+ for _, record := range records {
+ release, err := decodeRelease(record.Body)
+ if err != nil {
+ s.Logger().Debug("failed to decode release", slog.Any("record", record), slog.Any("error", err))
+ continue
+ }
+
+ if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil {
+ s.Logger().Debug(
+ "failed to get release custom labels",
+ slog.String("namespace", record.Namespace),
+ slog.String("key", record.Key),
+ slog.Any("error", err),
+ )
+ return nil, err
+ }
+
+ releases = append(releases, release)
+ }
+
+ if len(releases) == 0 {
+ return nil, ErrReleaseNotFound
+ }
+
+ return releases, nil
+}
+
+// Create creates a new release.
+func (s *SQL) Create(key string, rel release.Releaser) error {
+ rls, err := releaserToV1Release(rel)
+ if err != nil {
+ return err
+ }
+
+ namespace := rls.Namespace
+ if namespace == "" {
+ namespace = defaultNamespace
+ }
+ s.namespace = namespace
+
+ body, err := encodeRelease(rls)
+ if err != nil {
+ s.Logger().Debug("failed to encode release", slog.Any("error", err))
+ return err
+ }
+
+ transaction, err := s.db.Beginx()
+ if err != nil {
+ s.Logger().Debug("failed to start SQL transaction", slog.Any("error", err))
+ return fmt.Errorf("error beginning transaction: %v", err)
+ }
+
+ insertQuery, args, err := s.statementBuilder.
+ Insert(sqlReleaseTableName).
+ Columns(
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableTypeColumn,
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableNameColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableVersionColumn,
+ sqlReleaseTableStatusColumn,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableCreatedAtColumn,
+ ).
+ Values(
+ key,
+ sqlReleaseDefaultType,
+ body,
+ rls.Name,
+ namespace,
+ int(rls.Version),
+ rls.Info.Status.String(),
+ sqlReleaseDefaultOwner,
+ int(time.Now().Unix()),
+ ).ToSql()
+ if err != nil {
+ s.Logger().Debug("failed to build insert query", slog.Any("error", err))
+ return err
+ }
+
+ if _, err := transaction.Exec(insertQuery, args...); err != nil {
+ defer transaction.Rollback()
+
+ selectQuery, args, buildErr := s.statementBuilder.
+ Select(sqlReleaseTableKeyColumn).
+ From(sqlReleaseTableName).
+ Where(sq.Eq{sqlReleaseTableKeyColumn: key}).
+ Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
+ ToSql()
+ if buildErr != nil {
+ s.Logger().Debug("failed to build select query", "error", buildErr)
+ return err
+ }
+
+ var record SQLReleaseWrapper
+ if err := transaction.Get(&record, selectQuery, args...); err == nil {
+ s.Logger().Debug("release already exists", slog.String("key", key))
+ return ErrReleaseExists
+ }
+
+ s.Logger().Debug("failed to store release in SQL database", slog.String("key", key), slog.Any("error", err))
+ return err
+ }
+
+ // Filtering labels before insert cause in SQL storage driver system releases are stored in separate columns of release table
+ for k, v := range filterSystemLabels(rls.Labels) {
+ insertLabelsQuery, args, err := s.statementBuilder.
+ Insert(sqlCustomLabelsTableName).
+ Columns(
+ sqlCustomLabelsTableReleaseKeyColumn,
+ sqlCustomLabelsTableReleaseNamespaceColumn,
+ sqlCustomLabelsTableKeyColumn,
+ sqlCustomLabelsTableValueColumn,
+ ).
+ Values(
+ key,
+ namespace,
+ k,
+ v,
+ ).ToSql()
+
+ if err != nil {
+ defer transaction.Rollback()
+ s.Logger().Debug("failed to build insert query", slog.Any("error", err))
+ return err
+ }
+
+ if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil {
+ defer transaction.Rollback()
+ s.Logger().Debug("failed to write Labels", slog.Any("error", err))
+ return err
+ }
+ }
+ defer transaction.Commit()
+
+ return nil
+}
+
+// Update updates a release.
+func (s *SQL) Update(key string, rel release.Releaser) error {
+ rls, err := releaserToV1Release(rel)
+ if err != nil {
+ return err
+ }
+ namespace := rls.Namespace
+ if namespace == "" {
+ namespace = defaultNamespace
+ }
+ s.namespace = namespace
+
+ body, err := encodeRelease(rls)
+ if err != nil {
+ s.Logger().Debug("failed to encode release", slog.Any("error", err))
+ return err
+ }
+
+ query, args, err := s.statementBuilder.
+ Update(sqlReleaseTableName).
+ Set(sqlReleaseTableBodyColumn, body).
+ Set(sqlReleaseTableNameColumn, rls.Name).
+ Set(sqlReleaseTableVersionColumn, int(rls.Version)).
+ Set(sqlReleaseTableStatusColumn, rls.Info.Status.String()).
+ Set(sqlReleaseTableOwnerColumn, sqlReleaseDefaultOwner).
+ Set(sqlReleaseTableModifiedAtColumn, int(time.Now().Unix())).
+ Where(sq.Eq{sqlReleaseTableKeyColumn: key}).
+ Where(sq.Eq{sqlReleaseTableNamespaceColumn: namespace}).
+ ToSql()
+
+ if err != nil {
+ s.Logger().Debug("failed to build update query", slog.Any("error", err))
+ return err
+ }
+
+ if _, err := s.db.Exec(query, args...); err != nil {
+ s.Logger().Debug("failed to update release in SQL database", slog.String("key", key), slog.Any("error", err))
+ return err
+ }
+
+ return nil
+}
+
+// Delete deletes a release or returns ErrReleaseNotFound.
+func (s *SQL) Delete(key string) (release.Releaser, error) {
+ transaction, err := s.db.Beginx()
+ if err != nil {
+ s.Logger().Debug("failed to start SQL transaction", slog.Any("error", err))
+ return nil, fmt.Errorf("error beginning transaction: %v", err)
+ }
+
+ selectQuery, args, err := s.statementBuilder.
+ Select(sqlReleaseTableBodyColumn).
+ From(sqlReleaseTableName).
+ Where(sq.Eq{sqlReleaseTableKeyColumn: key}).
+ Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
+ ToSql()
+ if err != nil {
+ s.Logger().Debug("failed to build select query", slog.Any("error", err))
+ return nil, err
+ }
+
+ var record SQLReleaseWrapper
+ err = transaction.Get(&record, selectQuery, args...)
+ if err != nil {
+ s.Logger().Debug("release not found", slog.String("key", key), slog.Any("error", err))
+ return nil, ErrReleaseNotFound
+ }
+
+ release, err := decodeRelease(record.Body)
+ if err != nil {
+ s.Logger().Debug("failed to decode release", slog.String("key", key), slog.Any("error", err))
+ transaction.Rollback()
+ return nil, err
+ }
+ defer transaction.Commit()
+
+ deleteQuery, args, err := s.statementBuilder.
+ Delete(sqlReleaseTableName).
+ Where(sq.Eq{sqlReleaseTableKeyColumn: key}).
+ Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}).
+ ToSql()
+ if err != nil {
+ s.Logger().Debug("failed to build delete query", slog.Any("error", err))
+ return nil, err
+ }
+
+ _, err = transaction.Exec(deleteQuery, args...)
+ if err != nil {
+ s.Logger().Debug("failed perform delete query", slog.Any("error", err))
+ return release, err
+ }
+
+ if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil {
+ s.Logger().Debug(
+ "failed to get release custom labels",
+ slog.String("namespace", s.namespace),
+ slog.String("key", key),
+ slog.Any("error", err))
+ return nil, err
+ }
+
+ deleteCustomLabelsQuery, args, err := s.statementBuilder.
+ Delete(sqlCustomLabelsTableName).
+ Where(sq.Eq{sqlCustomLabelsTableReleaseKeyColumn: key}).
+ Where(sq.Eq{sqlCustomLabelsTableReleaseNamespaceColumn: s.namespace}).
+ ToSql()
+
+ if err != nil {
+ s.Logger().Debug("failed to build delete Labels query", slog.Any("error", err))
+ return nil, err
+ }
+ _, err = transaction.Exec(deleteCustomLabelsQuery, args...)
+ return release, err
+}
+
+// Get release custom labels from database
+func (s *SQL) getReleaseCustomLabels(key string, _ string) (map[string]string, error) {
+ query, args, err := s.statementBuilder.
+ Select(sqlCustomLabelsTableKeyColumn, sqlCustomLabelsTableValueColumn).
+ From(sqlCustomLabelsTableName).
+ Where(sq.Eq{sqlCustomLabelsTableReleaseKeyColumn: key,
+ sqlCustomLabelsTableReleaseNamespaceColumn: s.namespace}).
+ ToSql()
+ if err != nil {
+ return nil, err
+ }
+
+ var labelsList = []SQLReleaseCustomLabelWrapper{}
+ if err := s.db.Select(&labelsList, query, args...); err != nil {
+ return nil, err
+ }
+
+ labelsMap := make(map[string]string)
+ for _, i := range labelsList {
+ labelsMap[i.Key] = i.Value
+ }
+
+ return filterSystemLabels(labelsMap), nil
+}
+
+// Rebuild system labels from release object
+func getReleaseSystemLabels(rls *rspb.Release) map[string]string {
+ return map[string]string{
+ "name": rls.Name,
+ "owner": sqlReleaseDefaultOwner,
+ "status": rls.Info.Status.String(),
+ "version": strconv.Itoa(rls.Version),
+ }
+}
diff --git a/helm/pkg/storage/driver/sql_test.go b/helm/pkg/storage/driver/sql_test.go
new file mode 100644
index 000000000..f7c29033c
--- /dev/null
+++ b/helm/pkg/storage/driver/sql_test.go
@@ -0,0 +1,625 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "reflect"
+ "regexp"
+ "testing"
+ "time"
+
+ sqlmock "github.com/DATA-DOG/go-sqlmock"
+ migrate "github.com/rubenv/sql-migrate"
+
+ "helm.sh/helm/v4/pkg/release"
+ "helm.sh/helm/v4/pkg/release/common"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+const recentTimestampTolerance = time.Second
+
+func recentUnixTimestamp() sqlmock.Argument {
+ return recentUnixTimestampArgument{}
+}
+
+type recentUnixTimestampArgument struct{}
+
+func (recentUnixTimestampArgument) Match(value driver.Value) bool {
+ var ts int64
+ switch v := value.(type) {
+ case int:
+ ts = int64(v)
+ case int64:
+ ts = v
+ default:
+ return false
+ }
+
+ diff := time.Since(time.Unix(ts, 0))
+ if diff < 0 {
+ diff = -diff
+ }
+
+ return diff <= recentTimestampTolerance
+}
+
+func TestSQLName(t *testing.T) {
+ sqlDriver, _ := newTestFixtureSQL(t)
+ if sqlDriver.Name() != SQLDriverName {
+ t.Errorf("Expected name to be %s, got %s", SQLDriverName, sqlDriver.Name())
+ }
+}
+
+func TestSQLGet(t *testing.T) {
+ vers := int(1)
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ body, _ := encodeRelease(rel)
+
+ sqlDriver, mock := newTestFixtureSQL(t)
+
+ query := fmt.Sprintf(
+ regexp.QuoteMeta("SELECT %s FROM %s WHERE %s = $1 AND %s = $2"),
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ )
+
+ mock.
+ ExpectQuery(query).
+ WithArgs(key, namespace).
+ WillReturnRows(
+ mock.NewRows([]string{
+ sqlReleaseTableBodyColumn,
+ }).AddRow(
+ body,
+ ),
+ ).RowsWillBeClosed()
+
+ mockGetReleaseCustomLabels(mock, key, namespace, rel.Labels)
+
+ got, err := sqlDriver.Get(key)
+ if err != nil {
+ t.Fatalf("Failed to get release: %v", err)
+ }
+
+ if !reflect.DeepEqual(rel, got) {
+ t.Errorf("Expected release {%v}, got {%v}", rel, got)
+ }
+
+ if err := mock.ExpectationsWereMet(); err != nil {
+ t.Errorf("sql expectations weren't met: %v", err)
+ }
+}
+
+func TestSQLList(t *testing.T) {
+ releases := []*rspb.Release{}
+ releases = append(releases, releaseStub("key-1", 1, "default", common.StatusUninstalled))
+ releases = append(releases, releaseStub("key-2", 1, "default", common.StatusUninstalled))
+ releases = append(releases, releaseStub("key-3", 1, "default", common.StatusDeployed))
+ releases = append(releases, releaseStub("key-4", 1, "default", common.StatusDeployed))
+ releases = append(releases, releaseStub("key-5", 1, "default", common.StatusSuperseded))
+ releases = append(releases, releaseStub("key-6", 1, "default", common.StatusSuperseded))
+
+ sqlDriver, mock := newTestFixtureSQL(t)
+
+ for range 3 {
+ query := fmt.Sprintf(
+ "SELECT %s, %s, %s FROM %s WHERE %s = $1 AND %s = $2",
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableNamespaceColumn,
+ )
+
+ rows := mock.NewRows([]string{
+ sqlReleaseTableBodyColumn,
+ })
+ for _, r := range releases {
+ body, _ := encodeRelease(r)
+ rows.AddRow(body)
+ }
+ mock.
+ ExpectQuery(regexp.QuoteMeta(query)).
+ WithArgs(sqlReleaseDefaultOwner, sqlDriver.namespace).
+ WillReturnRows(rows).RowsWillBeClosed()
+
+ for _, r := range releases {
+ mockGetReleaseCustomLabels(mock, "", r.Namespace, r.Labels)
+ }
+ }
+
+ // list all deleted releases
+ del, err := sqlDriver.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusUninstalled
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list deleted: %v", err)
+ }
+ if len(del) != 2 {
+ t.Errorf("Expected 2 deleted, got %d:\n%v\n", len(del), del)
+ }
+
+ // list all deployed releases
+ dpl, err := sqlDriver.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusDeployed
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list deployed: %v", err)
+ }
+ if len(dpl) != 2 {
+ t.Errorf("Expected 2 deployed, got %d:\n%v\n", len(dpl), dpl)
+ }
+
+ // list all superseded releases
+ ssd, err := sqlDriver.List(func(rel release.Releaser) bool {
+ rls := convertReleaserToV1(t, rel)
+ return rls.Info.Status == common.StatusSuperseded
+ })
+ // check
+ if err != nil {
+ t.Errorf("Failed to list superseded: %v", err)
+ }
+ if len(ssd) != 2 {
+ t.Errorf("Expected 2 superseded, got %d:\n%v\n", len(ssd), ssd)
+ }
+
+ if err := mock.ExpectationsWereMet(); err != nil {
+ t.Errorf("sql expectations weren't met: %v", err)
+ }
+
+ // Check if release having both system and custom labels, this is needed to ensure that selector filtering would work.
+ rls := convertReleaserToV1(t, ssd[0])
+ _, ok := rls.Labels["name"]
+ if !ok {
+ t.Fatalf("Expected 'name' label in results, actual %v", rls.Labels)
+ }
+ _, ok = rls.Labels["key1"]
+ if !ok {
+ t.Fatalf("Expected 'key1' label in results, actual %v", rls.Labels)
+ }
+}
+
+func TestSqlCreate(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ sqlDriver, mock := newTestFixtureSQL(t)
+ body, _ := encodeRelease(rel)
+
+ query := fmt.Sprintf(
+ "INSERT INTO %s (%s,%s,%s,%s,%s,%s,%s,%s,%s) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9)",
+ sqlReleaseTableName,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableTypeColumn,
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableNameColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableVersionColumn,
+ sqlReleaseTableStatusColumn,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableCreatedAtColumn,
+ )
+
+ mock.ExpectBegin()
+ mock.
+ ExpectExec(regexp.QuoteMeta(query)).
+ WithArgs(key, sqlReleaseDefaultType, body, rel.Name, rel.Namespace, int(rel.Version), rel.Info.Status.String(), sqlReleaseDefaultOwner, recentUnixTimestamp()).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+
+ labelsQuery := fmt.Sprintf(
+ "INSERT INTO %s (%s,%s,%s,%s) VALUES ($1,$2,$3,$4)",
+ sqlCustomLabelsTableName,
+ sqlCustomLabelsTableReleaseKeyColumn,
+ sqlCustomLabelsTableReleaseNamespaceColumn,
+ sqlCustomLabelsTableKeyColumn,
+ sqlCustomLabelsTableValueColumn,
+ )
+
+ mock.MatchExpectationsInOrder(false)
+ for k, v := range filterSystemLabels(rel.Labels) {
+ mock.
+ ExpectExec(regexp.QuoteMeta(labelsQuery)).
+ WithArgs(key, rel.Namespace, k, v).
+ WillReturnResult(sqlmock.NewResult(1, 1))
+ }
+ mock.ExpectCommit()
+
+ if err := sqlDriver.Create(key, rel); err != nil {
+ t.Fatalf("failed to create release with key %s: %v", key, err)
+ }
+
+ if err := mock.ExpectationsWereMet(); err != nil {
+ t.Errorf("sql expectations weren't met: %v", err)
+ }
+}
+
+func TestSqlCreateAlreadyExists(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ sqlDriver, mock := newTestFixtureSQL(t)
+ body, _ := encodeRelease(rel)
+
+ insertQuery := fmt.Sprintf(
+ "INSERT INTO %s (%s,%s,%s,%s,%s,%s,%s,%s,%s) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9)",
+ sqlReleaseTableName,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableTypeColumn,
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableNameColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableVersionColumn,
+ sqlReleaseTableStatusColumn,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableCreatedAtColumn,
+ )
+
+ // Insert fails (primary key already exists)
+ mock.ExpectBegin()
+ mock.
+ ExpectExec(regexp.QuoteMeta(insertQuery)).
+ WithArgs(key, sqlReleaseDefaultType, body, rel.Name, rel.Namespace, int(rel.Version), rel.Info.Status.String(), sqlReleaseDefaultOwner, recentUnixTimestamp()).
+ WillReturnError(fmt.Errorf("dialect dependent SQL error"))
+
+ selectQuery := fmt.Sprintf(
+ regexp.QuoteMeta("SELECT %s FROM %s WHERE %s = $1 AND %s = $2"),
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ )
+
+ // Let's check that we do make sure the error is due to a release already existing
+ mock.
+ ExpectQuery(selectQuery).
+ WithArgs(key, namespace).
+ WillReturnRows(
+ mock.NewRows([]string{
+ sqlReleaseTableKeyColumn,
+ }).AddRow(
+ key,
+ ),
+ ).RowsWillBeClosed()
+ mock.ExpectRollback()
+
+ if err := sqlDriver.Create(key, rel); err == nil {
+ t.Fatalf("failed to create release with key %s: %v", key, err)
+ }
+
+ if err := mock.ExpectationsWereMet(); err != nil {
+ t.Errorf("sql expectations weren't met: %v", err)
+ }
+}
+
+func TestSqlUpdate(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ sqlDriver, mock := newTestFixtureSQL(t)
+ body, _ := encodeRelease(rel)
+
+ query := fmt.Sprintf(
+ "UPDATE %s SET %s = $1, %s = $2, %s = $3, %s = $4, %s = $5, %s = $6 WHERE %s = $7 AND %s = $8",
+ sqlReleaseTableName,
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableNameColumn,
+ sqlReleaseTableVersionColumn,
+ sqlReleaseTableStatusColumn,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableModifiedAtColumn,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ )
+
+ mock.
+ ExpectExec(regexp.QuoteMeta(query)).
+ WithArgs(body, rel.Name, int(rel.Version), rel.Info.Status.String(), sqlReleaseDefaultOwner, recentUnixTimestamp(), key, namespace).
+ WillReturnResult(sqlmock.NewResult(0, 1))
+
+ if err := sqlDriver.Update(key, rel); err != nil {
+ t.Fatalf("failed to update release with key %s: %v", key, err)
+ }
+
+ if err := mock.ExpectationsWereMet(); err != nil {
+ t.Errorf("sql expectations weren't met: %v", err)
+ }
+}
+
+func TestSqlQuery(t *testing.T) {
+ // Reflect actual use cases in ../storage.go
+ labelSetUnknown := map[string]string{
+ "name": "smug-pigeon",
+ "owner": sqlReleaseDefaultOwner,
+ "status": "unknown",
+ }
+ labelSetDeployed := map[string]string{
+ "name": "smug-pigeon",
+ "owner": sqlReleaseDefaultOwner,
+ "status": "deployed",
+ }
+ labelSetAll := map[string]string{
+ "name": "smug-pigeon",
+ "owner": sqlReleaseDefaultOwner,
+ }
+
+ supersededRelease := releaseStub("smug-pigeon", 1, "default", common.StatusSuperseded)
+ supersededReleaseBody, _ := encodeRelease(supersededRelease)
+ deployedRelease := releaseStub("smug-pigeon", 2, "default", common.StatusDeployed)
+ deployedReleaseBody, _ := encodeRelease(deployedRelease)
+
+ // Let's actually start our test
+ sqlDriver, mock := newTestFixtureSQL(t)
+
+ query := fmt.Sprintf(
+ "SELECT %s, %s, %s FROM %s WHERE %s = $1 AND %s = $2 AND %s = $3 AND %s = $4",
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableNameColumn,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableStatusColumn,
+ sqlReleaseTableNamespaceColumn,
+ )
+
+ mock.
+ ExpectQuery(regexp.QuoteMeta(query)).
+ WithArgs("smug-pigeon", sqlReleaseDefaultOwner, "unknown", "default").
+ WillReturnRows(
+ mock.NewRows([]string{
+ sqlReleaseTableBodyColumn,
+ }),
+ ).RowsWillBeClosed()
+
+ mock.
+ ExpectQuery(regexp.QuoteMeta(query)).
+ WithArgs("smug-pigeon", sqlReleaseDefaultOwner, "deployed", "default").
+ WillReturnRows(
+ mock.NewRows([]string{
+ sqlReleaseTableBodyColumn,
+ }).AddRow(
+ deployedReleaseBody,
+ ),
+ ).RowsWillBeClosed()
+
+ mockGetReleaseCustomLabels(mock, "", deployedRelease.Namespace, deployedRelease.Labels)
+
+ query = fmt.Sprintf(
+ "SELECT %s, %s, %s FROM %s WHERE %s = $1 AND %s = $2 AND %s = $3",
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableNameColumn,
+ sqlReleaseTableOwnerColumn,
+ sqlReleaseTableNamespaceColumn,
+ )
+
+ mock.
+ ExpectQuery(regexp.QuoteMeta(query)).
+ WithArgs("smug-pigeon", sqlReleaseDefaultOwner, "default").
+ WillReturnRows(
+ mock.NewRows([]string{
+ sqlReleaseTableBodyColumn,
+ }).AddRow(
+ supersededReleaseBody,
+ ).AddRow(
+ deployedReleaseBody,
+ ),
+ ).RowsWillBeClosed()
+
+ mockGetReleaseCustomLabels(mock, "", supersededRelease.Namespace, supersededRelease.Labels)
+ mockGetReleaseCustomLabels(mock, "", deployedRelease.Namespace, deployedRelease.Labels)
+
+ _, err := sqlDriver.Query(labelSetUnknown)
+ if err == nil {
+ t.Errorf("Expected error {%v}, got nil", ErrReleaseNotFound)
+ } else if err != ErrReleaseNotFound {
+ t.Fatalf("failed to query for unknown smug-pigeon release: %v", err)
+ }
+
+ results, err := sqlDriver.Query(labelSetDeployed)
+ if err != nil {
+ t.Fatalf("failed to query for deployed smug-pigeon release: %v", err)
+ }
+
+ for _, res := range results {
+ if !reflect.DeepEqual(res, deployedRelease) {
+ t.Errorf("Expected release {%v}, got {%v}", deployedRelease, res)
+ }
+ }
+
+ results, err = sqlDriver.Query(labelSetAll)
+ if err != nil {
+ t.Fatalf("failed to query release history for smug-pigeon: %v", err)
+ }
+
+ if len(results) != 2 {
+ t.Errorf("expected a resultset of size 2, got %d", len(results))
+ }
+
+ for _, res := range results {
+ if !reflect.DeepEqual(res, deployedRelease) && !reflect.DeepEqual(res, supersededRelease) {
+ t.Errorf("Expected release {%v} or {%v}, got {%v}", deployedRelease, supersededRelease, res)
+ }
+ }
+
+ if err := mock.ExpectationsWereMet(); err != nil {
+ t.Errorf("sql expectations weren't met: %v", err)
+ }
+}
+
+func TestSqlDelete(t *testing.T) {
+ vers := 1
+ name := "smug-pigeon"
+ namespace := "default"
+ key := testKey(name, vers)
+ rel := releaseStub(name, vers, namespace, common.StatusDeployed)
+
+ body, _ := encodeRelease(rel)
+
+ sqlDriver, mock := newTestFixtureSQL(t)
+
+ selectQuery := fmt.Sprintf(
+ "SELECT %s FROM %s WHERE %s = $1 AND %s = $2",
+ sqlReleaseTableBodyColumn,
+ sqlReleaseTableName,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ )
+
+ mock.ExpectBegin()
+ mock.
+ ExpectQuery(regexp.QuoteMeta(selectQuery)).
+ WithArgs(key, namespace).
+ WillReturnRows(
+ mock.NewRows([]string{
+ sqlReleaseTableBodyColumn,
+ }).AddRow(
+ body,
+ ),
+ ).RowsWillBeClosed()
+
+ deleteQuery := fmt.Sprintf(
+ "DELETE FROM %s WHERE %s = $1 AND %s = $2",
+ sqlReleaseTableName,
+ sqlReleaseTableKeyColumn,
+ sqlReleaseTableNamespaceColumn,
+ )
+
+ mock.
+ ExpectExec(regexp.QuoteMeta(deleteQuery)).
+ WithArgs(key, namespace).
+ WillReturnResult(sqlmock.NewResult(0, 1))
+
+ mockGetReleaseCustomLabels(mock, key, namespace, rel.Labels)
+
+ deleteLabelsQuery := fmt.Sprintf(
+ "DELETE FROM %s WHERE %s = $1 AND %s = $2",
+ sqlCustomLabelsTableName,
+ sqlCustomLabelsTableReleaseKeyColumn,
+ sqlCustomLabelsTableReleaseNamespaceColumn,
+ )
+ mock.
+ ExpectExec(regexp.QuoteMeta(deleteLabelsQuery)).
+ WithArgs(key, namespace).
+ WillReturnResult(sqlmock.NewResult(0, 1))
+
+ mock.ExpectCommit()
+
+ deletedRelease, err := sqlDriver.Delete(key)
+ if err := mock.ExpectationsWereMet(); err != nil {
+ t.Errorf("sql expectations weren't met: %v", err)
+ }
+ if err != nil {
+ t.Fatalf("failed to delete release with key %q: %v", key, err)
+ }
+
+ if !reflect.DeepEqual(rel, deletedRelease) {
+ t.Errorf("Expected release {%v}, got {%v}", rel, deletedRelease)
+ }
+}
+
+func mockGetReleaseCustomLabels(mock sqlmock.Sqlmock, key string, namespace string, labels map[string]string) {
+ query := fmt.Sprintf(
+ regexp.QuoteMeta("SELECT %s, %s FROM %s WHERE %s = $1 AND %s = $2"),
+ sqlCustomLabelsTableKeyColumn,
+ sqlCustomLabelsTableValueColumn,
+ sqlCustomLabelsTableName,
+ sqlCustomLabelsTableReleaseKeyColumn,
+ sqlCustomLabelsTableReleaseNamespaceColumn,
+ )
+
+ eq := mock.ExpectQuery(query).
+ WithArgs(key, namespace)
+
+ returnRows := mock.NewRows([]string{
+ sqlCustomLabelsTableKeyColumn,
+ sqlCustomLabelsTableValueColumn,
+ })
+ for k, v := range labels {
+ returnRows.AddRow(k, v)
+ }
+ eq.WillReturnRows(returnRows).RowsWillBeClosed()
+}
+
+func TestSqlCheckAppliedMigrations(t *testing.T) {
+ cases := []struct {
+ migrationsToApply []*migrate.Migration
+ appliedMigrationsIDs []string
+ expectedResult bool
+ errorExplanation string
+ }{
+ {
+ migrationsToApply: []*migrate.Migration{{Id: "init1"}, {Id: "init2"}, {Id: "init3"}},
+ appliedMigrationsIDs: []string{"1", "2", "init1", "3", "init2", "4", "5"},
+ expectedResult: false,
+ errorExplanation: "Has found one migration id \"init3\" as applied, that was not applied",
+ },
+ {
+ migrationsToApply: []*migrate.Migration{{Id: "init1"}, {Id: "init2"}, {Id: "init3"}},
+ appliedMigrationsIDs: []string{"1", "2", "init1", "3", "init2", "4", "init3", "5"},
+ expectedResult: true,
+ errorExplanation: "Has not found one or more migration ids, that was applied",
+ },
+ {
+ migrationsToApply: []*migrate.Migration{{Id: "init"}},
+ appliedMigrationsIDs: []string{"1", "2", "3", "inits", "4", "tinit", "5"},
+ expectedResult: false,
+ errorExplanation: "Has found single \"init\", that was not applied",
+ },
+ {
+ migrationsToApply: []*migrate.Migration{{Id: "init"}},
+ appliedMigrationsIDs: []string{"1", "2", "init", "3", "init2", "4", "init3", "5"},
+ expectedResult: true,
+ errorExplanation: "Has not found single migration id \"init\", that was applied",
+ },
+ }
+ for i, c := range cases {
+ sqlDriver, mock := newTestFixtureSQL(t)
+ rows := sqlmock.NewRows([]string{"id", "applied_at"})
+ for _, id := range c.appliedMigrationsIDs {
+ rows.AddRow(id, time.Time{})
+ }
+ mock.
+ ExpectQuery("").
+ WillReturnRows(rows)
+ mock.ExpectCommit()
+ if sqlDriver.checkAlreadyApplied(c.migrationsToApply) != c.expectedResult {
+ t.Errorf("Test case: %v, Expected: %v, Have: %v, Explanation: %v", i, c.expectedResult, !c.expectedResult, c.errorExplanation)
+ }
+ }
+}
diff --git a/helm/pkg/storage/driver/util.go b/helm/pkg/storage/driver/util.go
new file mode 100644
index 000000000..ca8e23cc2
--- /dev/null
+++ b/helm/pkg/storage/driver/util.go
@@ -0,0 +1,118 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver // import "helm.sh/helm/v4/pkg/storage/driver"
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/base64"
+ "encoding/json"
+ "io"
+ "slices"
+
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+)
+
+var b64 = base64.StdEncoding
+
+var magicGzip = []byte{0x1f, 0x8b, 0x08}
+
+var systemLabels = []string{"name", "owner", "status", "version", "createdAt", "modifiedAt"}
+
+// encodeRelease encodes a release returning a base64 encoded
+// gzipped string representation, or error.
+func encodeRelease(rls *rspb.Release) (string, error) {
+ b, err := json.Marshal(rls)
+ if err != nil {
+ return "", err
+ }
+ var buf bytes.Buffer
+ w, err := gzip.NewWriterLevel(&buf, gzip.BestCompression)
+ if err != nil {
+ return "", err
+ }
+ if _, err = w.Write(b); err != nil {
+ return "", err
+ }
+ w.Close()
+
+ return b64.EncodeToString(buf.Bytes()), nil
+}
+
+// decodeRelease decodes the bytes of data into a release
+// type. Data must contain a base64 encoded gzipped string of a
+// valid release, otherwise an error is returned.
+func decodeRelease(data string) (*rspb.Release, error) {
+ // base64 decode string
+ b, err := b64.DecodeString(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // For backwards compatibility with releases that were stored before
+ // compression was introduced we skip decompression if the
+ // gzip magic header is not found
+ if len(b) > 3 && bytes.Equal(b[0:3], magicGzip) {
+ r, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+ b2, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ b = b2
+ }
+
+ var rls rspb.Release
+ // unmarshal release object bytes
+ if err := json.Unmarshal(b, &rls); err != nil {
+ return nil, err
+ }
+ return &rls, nil
+}
+
+// Checks if label is system
+func isSystemLabel(key string) bool {
+ return slices.Contains(GetSystemLabels(), key)
+}
+
+// Removes system labels from labels map
+func filterSystemLabels(lbs map[string]string) map[string]string {
+ result := make(map[string]string)
+ for k, v := range lbs {
+ if !isSystemLabel(k) {
+ result[k] = v
+ }
+ }
+ return result
+}
+
+// Checks if labels array contains system labels
+func ContainsSystemLabels(lbs map[string]string) bool {
+ for k := range lbs {
+ if isSystemLabel(k) {
+ return true
+ }
+ }
+ return false
+}
+
+func GetSystemLabels() []string {
+ return systemLabels
+}
diff --git a/helm/pkg/storage/driver/util_test.go b/helm/pkg/storage/driver/util_test.go
new file mode 100644
index 000000000..d16043924
--- /dev/null
+++ b/helm/pkg/storage/driver/util_test.go
@@ -0,0 +1,108 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package driver
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestGetSystemLabel(t *testing.T) {
+ if output := GetSystemLabels(); !reflect.DeepEqual(systemLabels, output) {
+ t.Errorf("Expected {%v}, got {%v}", systemLabels, output)
+ }
+}
+
+func TestIsSystemLabel(t *testing.T) {
+ tests := map[string]bool{
+ "name": true,
+ "owner": true,
+ "test": false,
+ "NaMe": false,
+ }
+ for label, result := range tests {
+ if output := isSystemLabel(label); output != result {
+ t.Errorf("Output %t not equal to expected %t", output, result)
+ }
+ }
+}
+
+func TestFilterSystemLabels(t *testing.T) {
+ var tests = [][2]map[string]string{
+ {nil, map[string]string{}},
+ {map[string]string{}, map[string]string{}},
+ {map[string]string{
+ "name": "name",
+ "owner": "owner",
+ "status": "status",
+ "version": "version",
+ "createdAt": "createdAt",
+ "modifiedAt": "modifiedAt",
+ }, map[string]string{}},
+ {map[string]string{
+ "StaTus": "status",
+ "name": "name",
+ "owner": "owner",
+ "key": "value",
+ }, map[string]string{
+ "StaTus": "status",
+ "key": "value",
+ }},
+ {map[string]string{
+ "key1": "value1",
+ "key2": "value2",
+ }, map[string]string{
+ "key1": "value1",
+ "key2": "value2",
+ }},
+ }
+ for _, test := range tests {
+ if output := filterSystemLabels(test[0]); !reflect.DeepEqual(test[1], output) {
+ t.Errorf("Expected {%v}, got {%v}", test[1], output)
+ }
+ }
+}
+
+func TestContainsSystemLabels(t *testing.T) {
+ var tests = []struct {
+ input map[string]string
+ output bool
+ }{
+ {nil, false},
+ {map[string]string{}, false},
+ {map[string]string{
+ "name": "name",
+ "owner": "owner",
+ "status": "status",
+ "version": "version",
+ "createdAt": "createdAt",
+ "modifiedAt": "modifiedAt",
+ }, true},
+ {map[string]string{
+ "StaTus": "status",
+ "name": "name",
+ "owner": "owner",
+ "key": "value",
+ }, true},
+ {map[string]string{
+ "key1": "value1",
+ "key2": "value2",
+ }, false},
+ }
+ for _, test := range tests {
+ if output := ContainsSystemLabels(test.input); !reflect.DeepEqual(test.output, output) {
+ t.Errorf("Expected {%v}, got {%v}", test.output, output)
+ }
+ }
+}
diff --git a/helm/pkg/storage/storage.go b/helm/pkg/storage/storage.go
new file mode 100644
index 000000000..d6c41635b
--- /dev/null
+++ b/helm/pkg/storage/storage.go
@@ -0,0 +1,350 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package storage // import "helm.sh/helm/v4/pkg/storage"
+
+import (
+ "errors"
+ "fmt"
+ "log/slog"
+ "strings"
+
+ "helm.sh/helm/v4/internal/logging"
+ "helm.sh/helm/v4/pkg/release"
+ "helm.sh/helm/v4/pkg/release/common"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+ relutil "helm.sh/helm/v4/pkg/release/v1/util"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+// HelmStorageType is the type field of the Kubernetes storage object which stores the Helm release
+// version. It is modified slightly replacing the '/': sh.helm/release.v1
+// Note: The version 'v1' is incremented if the release object metadata is
+// modified between major releases.
+// This constant is used as a prefix for the Kubernetes storage object name.
+const HelmStorageType = "sh.helm.release.v1"
+
+// Storage represents a storage engine for a Release.
+type Storage struct {
+ driver.Driver
+
+ // MaxHistory specifies the maximum number of historical releases that will
+ // be retained, including the most recent release. Values of 0 or less are
+ // ignored (meaning no limits are imposed).
+ MaxHistory int
+
+ // Embed a LogHolder to provide logger functionality
+ logging.LogHolder
+}
+
+// Get retrieves the release from storage. An error is returned
+// if the storage driver failed to fetch the release, or the
+// release identified by the key, version pair does not exist.
+func (s *Storage) Get(name string, version int) (release.Releaser, error) {
+ s.Logger().Debug("getting release", "key", makeKey(name, version))
+ return s.Driver.Get(makeKey(name, version))
+}
+
+// Create creates a new storage entry holding the release. An
+// error is returned if the storage driver fails to store the
+// release, or a release with an identical key already exists.
+func (s *Storage) Create(rls release.Releaser) error {
+ rac, err := release.NewAccessor(rls)
+ if err != nil {
+ return err
+ }
+ s.Logger().Debug("creating release", "key", makeKey(rac.Name(), rac.Version()))
+ if s.MaxHistory > 0 {
+ // Want to make space for one more release.
+ if err := s.removeLeastRecent(rac.Name(), s.MaxHistory-1); err != nil &&
+ !errors.Is(err, driver.ErrReleaseNotFound) {
+ return err
+ }
+ }
+ return s.Driver.Create(makeKey(rac.Name(), rac.Version()), rls)
+}
+
+// Update updates the release in storage. An error is returned if the
+// storage backend fails to update the release or if the release
+// does not exist.
+func (s *Storage) Update(rls release.Releaser) error {
+ rac, err := release.NewAccessor(rls)
+ if err != nil {
+ return err
+ }
+ s.Logger().Debug("updating release", "key", makeKey(rac.Name(), rac.Version()))
+ return s.Driver.Update(makeKey(rac.Name(), rac.Version()), rls)
+}
+
+// Delete deletes the release from storage. An error is returned if
+// the storage backend fails to delete the release or if the release
+// does not exist.
+func (s *Storage) Delete(name string, version int) (release.Releaser, error) {
+ s.Logger().Debug("deleting release", "key", makeKey(name, version))
+ return s.Driver.Delete(makeKey(name, version))
+}
+
+// ListReleases returns all releases from storage. An error is returned if the
+// storage backend fails to retrieve the releases.
+func (s *Storage) ListReleases() ([]release.Releaser, error) {
+ s.Logger().Debug("listing all releases in storage")
+ return s.List(func(_ release.Releaser) bool { return true })
+}
+
+// releaserToV1Release is a helper function to convert a v1 release passed by interface
+// into the type object.
+func releaserToV1Release(rel release.Releaser) (*rspb.Release, error) {
+ switch r := rel.(type) {
+ case rspb.Release:
+ return &r, nil
+ case *rspb.Release:
+ return r, nil
+ case nil:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unsupported release type: %T", rel)
+ }
+}
+
+// ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned
+// if the storage backend fails to retrieve the releases.
+func (s *Storage) ListUninstalled() ([]release.Releaser, error) {
+ s.Logger().Debug("listing uninstalled releases in storage")
+ return s.List(func(rls release.Releaser) bool {
+ rel, err := releaserToV1Release(rls)
+ if err != nil {
+ // This will only happen if calling code does not pass the proper types. This is
+ // a problem with the application and not user data.
+ s.Logger().Error("unable to convert release to typed release", slog.Any("error", err))
+ panic(fmt.Sprintf("unable to convert release to typed release: %s", err))
+ }
+ return relutil.StatusFilter(common.StatusUninstalled).Check(rel)
+ })
+}
+
+// ListDeployed returns all releases with Status == DEPLOYED. An error is returned
+// if the storage backend fails to retrieve the releases.
+func (s *Storage) ListDeployed() ([]release.Releaser, error) {
+ s.Logger().Debug("listing all deployed releases in storage")
+ return s.List(func(rls release.Releaser) bool {
+ rel, err := releaserToV1Release(rls)
+ if err != nil {
+ // This will only happen if calling code does not pass the proper types. This is
+ // a problem with the application and not user data.
+ s.Logger().Error("unable to convert release to typed release", slog.Any("error", err))
+ panic(fmt.Sprintf("unable to convert release to typed release: %s", err))
+ }
+ return relutil.StatusFilter(common.StatusDeployed).Check(rel)
+ })
+}
+
+// Deployed returns the last deployed release with the provided release name, or
+// returns driver.NewErrNoDeployedReleases if not found.
+func (s *Storage) Deployed(name string) (release.Releaser, error) {
+ ls, err := s.DeployedAll(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(ls) == 0 {
+ return nil, driver.NewErrNoDeployedReleases(name)
+ }
+
+ rls, err := releaseListToV1List(ls)
+ if err != nil {
+ return nil, err
+ }
+
+ // If executed concurrently, Helm's database gets corrupted
+ // and multiple releases are DEPLOYED. Take the latest.
+ relutil.Reverse(rls, relutil.SortByRevision)
+
+ return rls[0], nil
+}
+
+func releaseListToV1List(ls []release.Releaser) ([]*rspb.Release, error) {
+ rls := make([]*rspb.Release, 0, len(ls))
+ for _, val := range ls {
+ rel, err := releaserToV1Release(val)
+ if err != nil {
+ return nil, err
+ }
+ rls = append(rls, rel)
+ }
+
+ return rls, nil
+}
+
+// DeployedAll returns all deployed releases with the provided name, or
+// returns driver.NewErrNoDeployedReleases if not found.
+func (s *Storage) DeployedAll(name string) ([]release.Releaser, error) {
+ s.Logger().Debug("getting deployed releases", "name", name)
+
+ ls, err := s.Query(map[string]string{
+ "name": name,
+ "owner": "helm",
+ "status": "deployed",
+ })
+ if err == nil {
+ return ls, nil
+ }
+ if strings.Contains(err.Error(), "not found") {
+ return nil, driver.NewErrNoDeployedReleases(name)
+ }
+ return nil, err
+}
+
+// History returns the revision history for the release with the provided name, or
+// returns driver.ErrReleaseNotFound if no such release name exists.
+func (s *Storage) History(name string) ([]release.Releaser, error) {
+ s.Logger().Debug("getting release history", "name", name)
+
+ return s.Query(map[string]string{"name": name, "owner": "helm"})
+}
+
+// removeLeastRecent removes items from history until the length number of releases
+// does not exceed max.
+//
+// We allow max to be set explicitly so that calling functions can "make space"
+// for the new records they are going to write.
+func (s *Storage) removeLeastRecent(name string, maximum int) error {
+ if maximum < 0 {
+ return nil
+ }
+ h, err := s.History(name)
+ if err != nil {
+ return err
+ }
+ if len(h) <= maximum {
+ return nil
+ }
+ rls, err := releaseListToV1List(h)
+ if err != nil {
+ return err
+ }
+
+ // We want oldest to newest
+ relutil.SortByRevision(rls)
+
+ lastDeployed, err := s.Deployed(name)
+ if err != nil && !errors.Is(err, driver.ErrNoDeployedReleases) {
+ return err
+ }
+
+ var toDelete []release.Releaser
+ for _, rel := range rls {
+ // once we have enough releases to delete to reach the maximum, stop
+ if len(rls)-len(toDelete) == maximum {
+ break
+ }
+ if lastDeployed != nil {
+ ldac, err := release.NewAccessor(lastDeployed)
+ if err != nil {
+ return err
+ }
+ if rel.Version != ldac.Version() {
+ toDelete = append(toDelete, rel)
+ }
+ } else {
+ toDelete = append(toDelete, rel)
+ }
+ }
+
+ // Delete as many as possible. In the case of API throughput limitations,
+ // multiple invocations of this function will eventually delete them all.
+ errs := []error{}
+ for _, rel := range toDelete {
+ rac, err := release.NewAccessor(rel)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ err = s.deleteReleaseVersion(name, rac.Version())
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ s.Logger().Debug("pruned records", "count", len(toDelete), "release", name, "errors", len(errs))
+ switch c := len(errs); c {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return fmt.Errorf("encountered %d deletion errors. First is: %w", c, errs[0])
+ }
+}
+
+func (s *Storage) deleteReleaseVersion(name string, version int) error {
+ key := makeKey(name, version)
+ _, err := s.Delete(name, version)
+ if err != nil {
+ s.Logger().Debug("error pruning release", slog.String("key", key), slog.Any("error", err))
+ return err
+ }
+ return nil
+}
+
+// Last fetches the last revision of the named release.
+func (s *Storage) Last(name string) (release.Releaser, error) {
+ s.Logger().Debug("getting last revision", "name", name)
+ h, err := s.History(name)
+ if err != nil {
+ return nil, err
+ }
+ if len(h) == 0 {
+ return nil, fmt.Errorf("no revision for release %q", name)
+ }
+ rls, err := releaseListToV1List(h)
+ if err != nil {
+ return nil, err
+ }
+
+ relutil.Reverse(rls, relutil.SortByRevision)
+ return rls[0], nil
+}
+
+// makeKey concatenates the Kubernetes storage object type, a release name and version
+// into a string with format:```..v```.
+// The storage type is prepended to keep name uniqueness between different
+// release storage types. An example of clash when not using the type:
+// https://github.com/helm/helm/issues/6435.
+// This key is used to uniquely identify storage objects.
+func makeKey(rlsname string, version int) string {
+ return fmt.Sprintf("%s.%s.v%d", HelmStorageType, rlsname, version)
+}
+
+// Init initializes a new storage backend with the driver d.
+// If d is nil, the default in-memory driver is used.
+func Init(d driver.Driver) *Storage {
+ // default driver is in memory
+ if d == nil {
+ d = driver.NewMemory()
+ }
+ s := &Storage{
+ Driver: d,
+ }
+
+ // Get logger from driver if it implements the LoggerSetterGetter interface
+ if ls, ok := d.(logging.LoggerSetterGetter); ok {
+ ls.SetLogger(s.Logger().Handler())
+ } else {
+ // If the driver does not implement the LoggerSetterGetter interface, set the default logger
+ s.SetLogger(slog.Default().Handler())
+ }
+ return s
+}
diff --git a/helm/pkg/storage/storage_test.go b/helm/pkg/storage/storage_test.go
new file mode 100644
index 000000000..5b2a3bba5
--- /dev/null
+++ b/helm/pkg/storage/storage_test.go
@@ -0,0 +1,581 @@
+/*
+Copyright The Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package storage // import "helm.sh/helm/v4/pkg/storage"
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "helm.sh/helm/v4/pkg/release"
+ "helm.sh/helm/v4/pkg/release/common"
+ rspb "helm.sh/helm/v4/pkg/release/v1"
+ "helm.sh/helm/v4/pkg/storage/driver"
+)
+
+func TestStorageCreate(t *testing.T) {
+ // initialize storage
+ storage := Init(driver.NewMemory())
+
+ // create fake release
+ rls := ReleaseTestData{
+ Name: "angry-beaver",
+ Version: 1,
+ }.ToRelease()
+
+ assertErrNil(t.Fatal, storage.Create(rls), "StoreRelease")
+
+ // fetch the release
+ res, err := storage.Get(rls.Name, rls.Version)
+ assertErrNil(t.Fatal, err, "QueryRelease")
+
+ // verify the fetched and created release are the same
+ if !reflect.DeepEqual(rls, res) {
+ t.Fatalf("Expected %v, got %v", rls, res)
+ }
+}
+
+func TestStorageUpdate(t *testing.T) {
+ // initialize storage
+ storage := Init(driver.NewMemory())
+
+ // create fake release
+ rls := ReleaseTestData{
+ Name: "angry-beaver",
+ Version: 1,
+ Status: common.StatusDeployed,
+ }.ToRelease()
+
+ assertErrNil(t.Fatal, storage.Create(rls), "StoreRelease")
+
+ // modify the release
+ rls.Info.Status = common.StatusUninstalled
+ assertErrNil(t.Fatal, storage.Update(rls), "UpdateRelease")
+
+ // retrieve the updated release
+ res, err := storage.Get(rls.Name, rls.Version)
+ assertErrNil(t.Fatal, err, "QueryRelease")
+
+ // verify updated and fetched releases are the same.
+ if !reflect.DeepEqual(rls, res) {
+ t.Fatalf("Expected %v, got %v", rls, res)
+ }
+}
+
+func TestStorageDelete(t *testing.T) {
+ // initialize storage
+ storage := Init(driver.NewMemory())
+
+ // create fake release
+ rls := ReleaseTestData{
+ Name: "angry-beaver",
+ Version: 1,
+ }.ToRelease()
+ rls2 := ReleaseTestData{
+ Name: "angry-beaver",
+ Version: 2,
+ }.ToRelease()
+
+ assertErrNil(t.Fatal, storage.Create(rls), "StoreRelease")
+ assertErrNil(t.Fatal, storage.Create(rls2), "StoreRelease")
+
+ // delete the release
+ res, err := storage.Delete(rls.Name, rls.Version)
+ assertErrNil(t.Fatal, err, "DeleteRelease")
+
+ // verify updated and fetched releases are the same.
+ if !reflect.DeepEqual(rls, res) {
+ t.Fatalf("Expected %v, got %v", rls, res)
+ }
+
+ hist, err := storage.History(rls.Name)
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+
+ rhist, err := releaseListToV1List(hist)
+ assert.NoError(t, err)
+
+ // We have now deleted one of the two records.
+ if len(rhist) != 1 {
+ t.Errorf("expected 1 record for deleted release version, got %d", len(hist))
+ }
+
+ if rhist[0].Version != 2 {
+ t.Errorf("Expected version to be 2, got %d", rhist[0].Version)
+ }
+}
+
+func TestStorageList(t *testing.T) {
+ // initialize storage
+ storage := Init(driver.NewMemory())
+
+ // setup storage with test releases
+ setup := func() {
+ // release records
+ rls0 := ReleaseTestData{Name: "happy-catdog", Status: common.StatusSuperseded}.ToRelease()
+ rls1 := ReleaseTestData{Name: "livid-human", Status: common.StatusSuperseded}.ToRelease()
+ rls2 := ReleaseTestData{Name: "relaxed-cat", Status: common.StatusSuperseded}.ToRelease()
+ rls3 := ReleaseTestData{Name: "hungry-hippo", Status: common.StatusDeployed}.ToRelease()
+ rls4 := ReleaseTestData{Name: "angry-beaver", Status: common.StatusDeployed}.ToRelease()
+ rls5 := ReleaseTestData{Name: "opulent-frog", Status: common.StatusUninstalled}.ToRelease()
+ rls6 := ReleaseTestData{Name: "happy-liger", Status: common.StatusUninstalled}.ToRelease()
+
+ // create the release records in the storage
+ assertErrNil(t.Fatal, storage.Create(rls0), "Storing release 'rls0'")
+ assertErrNil(t.Fatal, storage.Create(rls1), "Storing release 'rls1'")
+ assertErrNil(t.Fatal, storage.Create(rls2), "Storing release 'rls2'")
+ assertErrNil(t.Fatal, storage.Create(rls3), "Storing release 'rls3'")
+ assertErrNil(t.Fatal, storage.Create(rls4), "Storing release 'rls4'")
+ assertErrNil(t.Fatal, storage.Create(rls5), "Storing release 'rls5'")
+ assertErrNil(t.Fatal, storage.Create(rls6), "Storing release 'rls6'")
+ }
+
+ var listTests = []struct {
+ Description string
+ NumExpected int
+ ListFunc func() ([]release.Releaser, error)
+ }{
+ {"ListDeployed", 2, storage.ListDeployed},
+ {"ListReleases", 7, storage.ListReleases},
+ {"ListUninstalled", 2, storage.ListUninstalled},
+ }
+
+ setup()
+
+ for _, tt := range listTests {
+ list, err := tt.ListFunc()
+ assertErrNil(t.Fatal, err, tt.Description)
+ // verify the count of releases returned
+ if len(list) != tt.NumExpected {
+ t.Errorf("ListReleases(%s): expected %d, actual %d",
+ tt.Description,
+ tt.NumExpected,
+ len(list))
+ }
+ }
+}
+
+func TestStorageDeployed(t *testing.T) {
+ storage := Init(driver.NewMemory())
+
+ const name = "angry-bird"
+ const vers = 4
+
+ // setup storage with test releases
+ setup := func() {
+ // release records
+ rls0 := ReleaseTestData{Name: name, Version: 1, Status: common.StatusSuperseded}.ToRelease()
+ rls1 := ReleaseTestData{Name: name, Version: 2, Status: common.StatusSuperseded}.ToRelease()
+ rls2 := ReleaseTestData{Name: name, Version: 3, Status: common.StatusSuperseded}.ToRelease()
+ rls3 := ReleaseTestData{Name: name, Version: 4, Status: common.StatusDeployed}.ToRelease()
+
+ // create the release records in the storage
+ assertErrNil(t.Fatal, storage.Create(rls0), "Storing release 'angry-bird' (v1)")
+ assertErrNil(t.Fatal, storage.Create(rls1), "Storing release 'angry-bird' (v2)")
+ assertErrNil(t.Fatal, storage.Create(rls2), "Storing release 'angry-bird' (v3)")
+ assertErrNil(t.Fatal, storage.Create(rls3), "Storing release 'angry-bird' (v4)")
+ }
+
+ setup()
+
+ rls, err := storage.Last(name)
+ if err != nil {
+ t.Fatalf("Failed to query for deployed release: %s\n", err)
+ }
+
+ rel, err := releaserToV1Release(rls)
+ assert.NoError(t, err)
+
+ switch {
+ case rls == nil:
+ t.Fatalf("Release is nil")
+ case rel.Name != name:
+ t.Fatalf("Expected release name %q, actual %q\n", name, rel.Name)
+ case rel.Version != vers:
+ t.Fatalf("Expected release version %d, actual %d\n", vers, rel.Version)
+ case rel.Info.Status != common.StatusDeployed:
+ t.Fatalf("Expected release status 'DEPLOYED', actual %s\n", rel.Info.Status.String())
+ }
+}
+
+func TestStorageDeployedWithCorruption(t *testing.T) {
+ storage := Init(driver.NewMemory())
+
+ const name = "angry-bird"
+ const vers = int(4)
+
+ // setup storage with test releases
+ setup := func() {
+ // release records (notice odd order and corruption)
+ rls0 := ReleaseTestData{Name: name, Version: 1, Status: common.StatusSuperseded}.ToRelease()
+ rls1 := ReleaseTestData{Name: name, Version: 4, Status: common.StatusDeployed}.ToRelease()
+ rls2 := ReleaseTestData{Name: name, Version: 3, Status: common.StatusSuperseded}.ToRelease()
+ rls3 := ReleaseTestData{Name: name, Version: 2, Status: common.StatusDeployed}.ToRelease()
+
+ // create the release records in the storage
+ assertErrNil(t.Fatal, storage.Create(rls0), "Storing release 'angry-bird' (v1)")
+ assertErrNil(t.Fatal, storage.Create(rls1), "Storing release 'angry-bird' (v2)")
+ assertErrNil(t.Fatal, storage.Create(rls2), "Storing release 'angry-bird' (v3)")
+ assertErrNil(t.Fatal, storage.Create(rls3), "Storing release 'angry-bird' (v4)")
+ }
+
+ setup()
+
+ rls, err := storage.Deployed(name)
+ if err != nil {
+ t.Fatalf("Failed to query for deployed release: %s\n", err)
+ }
+
+ rel, err := releaserToV1Release(rls)
+ assert.NoError(t, err)
+
+ switch {
+ case rls == nil:
+ t.Fatalf("Release is nil")
+ case rel.Name != name:
+ t.Fatalf("Expected release name %q, actual %q\n", name, rel.Name)
+ case rel.Version != vers:
+ t.Fatalf("Expected release version %d, actual %d\n", vers, rel.Version)
+ case rel.Info.Status != common.StatusDeployed:
+ t.Fatalf("Expected release status 'DEPLOYED', actual %s\n", rel.Info.Status.String())
+ }
+}
+
+func TestStorageHistory(t *testing.T) {
+ storage := Init(driver.NewMemory())
+
+ const name = "angry-bird"
+
+ // setup storage with test releases
+ setup := func() {
+ // release records
+ rls0 := ReleaseTestData{Name: name, Version: 1, Status: common.StatusSuperseded}.ToRelease()
+ rls1 := ReleaseTestData{Name: name, Version: 2, Status: common.StatusSuperseded}.ToRelease()
+ rls2 := ReleaseTestData{Name: name, Version: 3, Status: common.StatusSuperseded}.ToRelease()
+ rls3 := ReleaseTestData{Name: name, Version: 4, Status: common.StatusDeployed}.ToRelease()
+
+ // create the release records in the storage
+ assertErrNil(t.Fatal, storage.Create(rls0), "Storing release 'angry-bird' (v1)")
+ assertErrNil(t.Fatal, storage.Create(rls1), "Storing release 'angry-bird' (v2)")
+ assertErrNil(t.Fatal, storage.Create(rls2), "Storing release 'angry-bird' (v3)")
+ assertErrNil(t.Fatal, storage.Create(rls3), "Storing release 'angry-bird' (v4)")
+ }
+
+ setup()
+
+ h, err := storage.History(name)
+ if err != nil {
+ t.Fatalf("Failed to query for release history (%q): %s\n", name, err)
+ }
+ if len(h) != 4 {
+ t.Fatalf("Release history (%q) is empty\n", name)
+ }
+}
+
+var errMaxHistoryMockDriverSomethingHappened = errors.New("something happened")
+
+type MaxHistoryMockDriver struct {
+ Driver driver.Driver
+}
+
+func NewMaxHistoryMockDriver(d driver.Driver) *MaxHistoryMockDriver {
+ return &MaxHistoryMockDriver{Driver: d}
+}
+func (d *MaxHistoryMockDriver) Create(key string, rls release.Releaser) error {
+ return d.Driver.Create(key, rls)
+}
+func (d *MaxHistoryMockDriver) Update(key string, rls release.Releaser) error {
+ return d.Driver.Update(key, rls)
+}
+func (d *MaxHistoryMockDriver) Delete(_ string) (release.Releaser, error) {
+ return nil, errMaxHistoryMockDriverSomethingHappened
+}
+func (d *MaxHistoryMockDriver) Get(key string) (release.Releaser, error) {
+ return d.Driver.Get(key)
+}
+func (d *MaxHistoryMockDriver) List(filter func(release.Releaser) bool) ([]release.Releaser, error) {
+ return d.Driver.List(filter)
+}
+func (d *MaxHistoryMockDriver) Query(labels map[string]string) ([]release.Releaser, error) {
+ return d.Driver.Query(labels)
+}
+func (d *MaxHistoryMockDriver) Name() string {
+ return d.Driver.Name()
+}
+
+func TestMaxHistoryErrorHandling(t *testing.T) {
+ //func TestStorageRemoveLeastRecentWithError(t *testing.T) {
+ storage := Init(NewMaxHistoryMockDriver(driver.NewMemory()))
+
+ storage.MaxHistory = 1
+
+ const name = "angry-bird"
+
+ // setup storage with test releases
+ setup := func() {
+ // release records
+ rls1 := ReleaseTestData{Name: name, Version: 1, Status: common.StatusSuperseded}.ToRelease()
+
+ // create the release records in the storage
+ assertErrNil(t.Fatal, storage.Driver.Create(makeKey(rls1.Name, rls1.Version), rls1), "Storing release 'angry-bird' (v1)")
+ }
+ setup()
+
+ rls2 := ReleaseTestData{Name: name, Version: 2, Status: common.StatusSuperseded}.ToRelease()
+ wantErr := errMaxHistoryMockDriverSomethingHappened
+ gotErr := storage.Create(rls2)
+ if !errors.Is(gotErr, wantErr) {
+ t.Fatalf("Storing release 'angry-bird' (v2) should return the error %#v, but returned %#v", wantErr, gotErr)
+ }
+}
+
+func TestStorageRemoveLeastRecent(t *testing.T) {
+ storage := Init(driver.NewMemory())
+
+ // Make sure that specifying this at the outset doesn't cause any bugs.
+ storage.MaxHistory = 10
+
+ const name = "angry-bird"
+
+ // setup storage with test releases
+ setup := func() {
+ // release records
+ rls0 := ReleaseTestData{Name: name, Version: 1, Status: common.StatusSuperseded}.ToRelease()
+ rls1 := ReleaseTestData{Name: name, Version: 2, Status: common.StatusSuperseded}.ToRelease()
+ rls2 := ReleaseTestData{Name: name, Version: 3, Status: common.StatusSuperseded}.ToRelease()
+ rls3 := ReleaseTestData{Name: name, Version: 4, Status: common.StatusDeployed}.ToRelease()
+
+ // create the release records in the storage
+ assertErrNil(t.Fatal, storage.Create(rls0), "Storing release 'angry-bird' (v1)")
+ assertErrNil(t.Fatal, storage.Create(rls1), "Storing release 'angry-bird' (v2)")
+ assertErrNil(t.Fatal, storage.Create(rls2), "Storing release 'angry-bird' (v3)")
+ assertErrNil(t.Fatal, storage.Create(rls3), "Storing release 'angry-bird' (v4)")
+ }
+ setup()
+
+ // Because we have not set a limit, we expect 4.
+ expect := 4
+ if hist, err := storage.History(name); err != nil {
+ t.Fatal(err)
+ } else if len(hist) != expect {
+ t.Fatalf("expected %d items in history, got %d", expect, len(hist))
+ }
+
+ storage.MaxHistory = 3
+ rls5 := ReleaseTestData{Name: name, Version: 5, Status: common.StatusDeployed}.ToRelease()
+ assertErrNil(t.Fatal, storage.Create(rls5), "Storing release 'angry-bird' (v5)")
+
+ // On inserting the 5th record, we expect two records to be pruned from history.
+ hist, err := storage.History(name)
+ assert.NoError(t, err)
+ rhist, err := releaseListToV1List(hist)
+ assert.NoError(t, err)
+ if err != nil {
+ t.Fatal(err)
+ } else if len(rhist) != storage.MaxHistory {
+ for _, item := range rhist {
+ t.Logf("%s %v", item.Name, item.Version)
+ }
+ t.Fatalf("expected %d items in history, got %d", storage.MaxHistory, len(rhist))
+ }
+
+ // We expect the existing records to be 3, 4, and 5.
+ for i, item := range rhist {
+ v := item.Version
+ if expect := i + 3; v != expect {
+ t.Errorf("Expected release %d, got %d", expect, v)
+ }
+ }
+}
+
+func TestStorageDoNotDeleteDeployed(t *testing.T) {
+ storage := Init(driver.NewMemory())
+ storage.MaxHistory = 3
+
+ const name = "angry-bird"
+
+ // setup storage with test releases
+ setup := func() {
+ // release records
+ rls0 := ReleaseTestData{Name: name, Version: 1, Status: common.StatusSuperseded}.ToRelease()
+ rls1 := ReleaseTestData{Name: name, Version: 2, Status: common.StatusDeployed}.ToRelease()
+ rls2 := ReleaseTestData{Name: name, Version: 3, Status: common.StatusFailed}.ToRelease()
+ rls3 := ReleaseTestData{Name: name, Version: 4, Status: common.StatusFailed}.ToRelease()
+
+ // create the release records in the storage
+ assertErrNil(t.Fatal, storage.Create(rls0), "Storing release 'angry-bird' (v1)")
+ assertErrNil(t.Fatal, storage.Create(rls1), "Storing release 'angry-bird' (v2)")
+ assertErrNil(t.Fatal, storage.Create(rls2), "Storing release 'angry-bird' (v3)")
+ assertErrNil(t.Fatal, storage.Create(rls3), "Storing release 'angry-bird' (v4)")
+ }
+ setup()
+
+ rls5 := ReleaseTestData{Name: name, Version: 5, Status: common.StatusFailed}.ToRelease()
+ assertErrNil(t.Fatal, storage.Create(rls5), "Storing release 'angry-bird' (v5)")
+
+ // On inserting the 5th record, we expect a total of 3 releases, but we expect version 2
+ // (the only deployed release), to still exist
+ hist, err := storage.History(name)
+ if err != nil {
+ t.Fatal(err)
+ } else if len(hist) != storage.MaxHistory {
+ rhist, err := releaseListToV1List(hist)
+ assert.NoError(t, err)
+ for _, item := range rhist {
+ t.Logf("%s %v", item.Name, item.Version)
+ }
+ t.Fatalf("expected %d items in history, got %d", storage.MaxHistory, len(rhist))
+ }
+
+ expectedVersions := map[int]bool{
+ 2: true,
+ 4: true,
+ 5: true,
+ }
+
+ rhist, err := releaseListToV1List(hist)
+ assert.NoError(t, err)
+ for _, item := range rhist {
+ if !expectedVersions[item.Version] {
+ t.Errorf("Release version %d, found when not expected", item.Version)
+ }
+ }
+}
+
+func TestStorageLast(t *testing.T) {
+ storage := Init(driver.NewMemory())
+
+ const name = "angry-bird"
+
+ // Set up storage with test releases.
+ setup := func() {
+ // release records
+ rls0 := ReleaseTestData{Name: name, Version: 1, Status: common.StatusSuperseded}.ToRelease()
+ rls1 := ReleaseTestData{Name: name, Version: 2, Status: common.StatusSuperseded}.ToRelease()
+ rls2 := ReleaseTestData{Name: name, Version: 3, Status: common.StatusSuperseded}.ToRelease()
+ rls3 := ReleaseTestData{Name: name, Version: 4, Status: common.StatusFailed}.ToRelease()
+
+ // create the release records in the storage
+ assertErrNil(t.Fatal, storage.Create(rls0), "Storing release 'angry-bird' (v1)")
+ assertErrNil(t.Fatal, storage.Create(rls1), "Storing release 'angry-bird' (v2)")
+ assertErrNil(t.Fatal, storage.Create(rls2), "Storing release 'angry-bird' (v3)")
+ assertErrNil(t.Fatal, storage.Create(rls3), "Storing release 'angry-bird' (v4)")
+ }
+
+ setup()
+
+ h, err := storage.Last(name)
+ if err != nil {
+ t.Fatalf("Failed to query for release history (%q): %s\n", name, err)
+ }
+
+ rel, err := releaserToV1Release(h)
+ assert.NoError(t, err)
+
+ if rel.Version != 4 {
+ t.Errorf("Expected revision 4, got %d", rel.Version)
+ }
+}
+
+// TestUpgradeInitiallyFailedReleaseWithHistoryLimit tests a case when there are no deployed release yet, but history limit has been
+// reached: the has-no-deployed-releases error should not occur in such case.
+func TestUpgradeInitiallyFailedReleaseWithHistoryLimit(t *testing.T) {
+ storage := Init(driver.NewMemory())
+ storage.MaxHistory = 4
+
+ const name = "angry-bird"
+
+ // setup storage with test releases
+ setup := func() {
+ // release records
+ rls0 := ReleaseTestData{Name: name, Version: 1, Status: common.StatusFailed}.ToRelease()
+ rls1 := ReleaseTestData{Name: name, Version: 2, Status: common.StatusFailed}.ToRelease()
+ rls2 := ReleaseTestData{Name: name, Version: 3, Status: common.StatusFailed}.ToRelease()
+ rls3 := ReleaseTestData{Name: name, Version: 4, Status: common.StatusFailed}.ToRelease()
+
+ // create the release records in the storage
+ assertErrNil(t.Fatal, storage.Create(rls0), "Storing release 'angry-bird' (v1)")
+ assertErrNil(t.Fatal, storage.Create(rls1), "Storing release 'angry-bird' (v2)")
+ assertErrNil(t.Fatal, storage.Create(rls2), "Storing release 'angry-bird' (v3)")
+ assertErrNil(t.Fatal, storage.Create(rls3), "Storing release 'angry-bird' (v4)")
+
+ hist, err := storage.History(name)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ wantHistoryLen := 4
+ if len(hist) != wantHistoryLen {
+ t.Fatalf("expected history of release %q to contain %d releases, got %d", name, wantHistoryLen, len(hist))
+ }
+ }
+
+ setup()
+
+ rls5 := ReleaseTestData{Name: name, Version: 5, Status: common.StatusFailed}.ToRelease()
+ err := storage.Create(rls5)
+ if err != nil {
+ t.Fatalf("Failed to create a new release version: %s", err)
+ }
+
+ hist, err := storage.History(name)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ rhist, err := releaseListToV1List(hist)
+ assert.NoError(t, err)
+ for i, rel := range rhist {
+ wantVersion := i + 2
+ if rel.Version != wantVersion {
+ t.Fatalf("Expected history release %d version to equal %d, got %d", i+1, wantVersion, rel.Version)
+ }
+
+ wantStatus := common.StatusFailed
+ if rel.Info.Status != wantStatus {
+ t.Fatalf("Expected history release %d status to equal %q, got %q", i+1, wantStatus, rel.Info.Status)
+ }
+ }
+}
+
+type ReleaseTestData struct {
+ Name string
+ Version int
+ Manifest string
+ Namespace string
+ Status common.Status
+}
+
+func (test ReleaseTestData) ToRelease() *rspb.Release {
+ return &rspb.Release{
+ Name: test.Name,
+ Version: test.Version,
+ Manifest: test.Manifest,
+ Namespace: test.Namespace,
+ Info: &rspb.Info{Status: test.Status},
+ }
+}
+
+func assertErrNil(eh func(args ...interface{}), err error, message string) {
+ if err != nil {
+ eh(fmt.Sprintf("%s: %q", message, err))
+ }
+}
diff --git a/helm/pkg/strvals/doc.go b/helm/pkg/strvals/doc.go
new file mode 100644
index 000000000..e9931300c
--- /dev/null
+++ b/helm/pkg/strvals/doc.go
@@ -0,0 +1,33 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package strvals provides tools for working with strval lines.
+
+Helm supports a compressed format for YAML settings which we call strvals.
+The format is roughly like this:
+
+ name=value,topname.subname=value
+
+The above is equivalent to the YAML document
+
+ name: value
+ topname:
+ subname: value
+
+This package provides a parser and utilities for converting the strvals format
+to other formats.
+*/
+package strvals
diff --git a/helm/pkg/strvals/fuzz_test.go b/helm/pkg/strvals/fuzz_test.go
new file mode 100644
index 000000000..68b43c8ec
--- /dev/null
+++ b/helm/pkg/strvals/fuzz_test.go
@@ -0,0 +1,26 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package strvals
+
+import (
+ "testing"
+)
+
+func FuzzParse(f *testing.F) {
+ f.Fuzz(func(_ *testing.T, data string) {
+ _, _ = Parse(data)
+ })
+}
diff --git a/helm/pkg/strvals/literal_parser.go b/helm/pkg/strvals/literal_parser.go
new file mode 100644
index 000000000..d5d4c25b4
--- /dev/null
+++ b/helm/pkg/strvals/literal_parser.go
@@ -0,0 +1,243 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package strvals
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+)
+
+// ParseLiteral parses a set line interpreting the value as a literal string.
+//
+// A set line is of the form name1=value1
+func ParseLiteral(s string) (map[string]interface{}, error) {
+ vals := map[string]interface{}{}
+ scanner := bytes.NewBufferString(s)
+ t := newLiteralParser(scanner, vals)
+ err := t.parse()
+ return vals, err
+}
+
+// ParseLiteralInto parses a strvals line and merges the result into dest.
+// The value is interpreted as a literal string.
+//
+// If the strval string has a key that exists in dest, it overwrites the
+// dest version.
+func ParseLiteralInto(s string, dest map[string]interface{}) error {
+ scanner := bytes.NewBufferString(s)
+ t := newLiteralParser(scanner, dest)
+ return t.parse()
+}
+
+// literalParser is a simple parser that takes a strvals line and parses
+// it into a map representation.
+//
+// Values are interpreted as a literal string.
+//
+// where sc is the source of the original data being parsed
+// where data is the final parsed data from the parses with correct types
+type literalParser struct {
+ sc *bytes.Buffer
+ data map[string]interface{}
+}
+
+func newLiteralParser(sc *bytes.Buffer, data map[string]interface{}) *literalParser {
+ return &literalParser{sc: sc, data: data}
+}
+
+func (t *literalParser) parse() error {
+ for {
+ err := t.key(t.data, 0)
+ if err == nil {
+ continue
+ }
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ return err
+ }
+}
+
+func runesUntilLiteral(in io.RuneReader, stop map[rune]bool) ([]rune, rune, error) {
+ v := []rune{}
+ for {
+ switch r, _, e := in.ReadRune(); {
+ case e != nil:
+ return v, r, e
+ case inMap(r, stop):
+ return v, r, nil
+ default:
+ v = append(v, r)
+ }
+ }
+}
+
+func (t *literalParser) key(data map[string]interface{}, nestedNameLevel int) (reterr error) {
+ defer func() {
+ if r := recover(); r != nil {
+ reterr = fmt.Errorf("unable to parse key: %s", r)
+ }
+ }()
+ stop := runeSet([]rune{'=', '[', '.'})
+ for {
+ switch key, lastRune, err := runesUntilLiteral(t.sc, stop); {
+ case err != nil:
+ if len(key) == 0 {
+ return err
+ }
+ return fmt.Errorf("key %q has no value", string(key))
+
+ case lastRune == '=':
+ // found end of key: swallow the '=' and get the value
+ value, err := t.val()
+ if err == nil && err != io.EOF {
+ return err
+ }
+ set(data, string(key), string(value))
+ return nil
+
+ case lastRune == '.':
+ // Check value name is within the maximum nested name level
+ nestedNameLevel++
+ if nestedNameLevel > MaxNestedNameLevel {
+ return fmt.Errorf("value name nested level is greater than maximum supported nested level of %d", MaxNestedNameLevel)
+ }
+
+ // first, create or find the target map in the given data
+ inner := map[string]interface{}{}
+ if _, ok := data[string(key)]; ok {
+ inner = data[string(key)].(map[string]interface{})
+ }
+
+ // recurse on sub-tree with remaining data
+ err := t.key(inner, nestedNameLevel)
+ if err == nil && len(inner) == 0 {
+ return fmt.Errorf("key map %q has no value", string(key))
+ }
+ if len(inner) != 0 {
+ set(data, string(key), inner)
+ }
+ return err
+
+ case lastRune == '[':
+ // We are in a list index context, so we need to set an index.
+ i, err := t.keyIndex()
+ if err != nil {
+ return fmt.Errorf("error parsing index: %w", err)
+ }
+ kk := string(key)
+
+ // find or create target list
+ list := []interface{}{}
+ if _, ok := data[kk]; ok {
+ list = data[kk].([]interface{})
+ }
+
+ // now we need to get the value after the ]
+ list, err = t.listItem(list, i, nestedNameLevel)
+ set(data, kk, list)
+ return err
+ }
+ }
+}
+
+func (t *literalParser) keyIndex() (int, error) {
+ // First, get the key.
+ stop := runeSet([]rune{']'})
+ v, _, err := runesUntilLiteral(t.sc, stop)
+ if err != nil {
+ return 0, err
+ }
+
+ // v should be the index
+ return strconv.Atoi(string(v))
+}
+
+func (t *literalParser) listItem(list []interface{}, i, nestedNameLevel int) ([]interface{}, error) {
+ if i < 0 {
+ return list, fmt.Errorf("negative %d index not allowed", i)
+ }
+ stop := runeSet([]rune{'[', '.', '='})
+
+ switch key, lastRune, err := runesUntilLiteral(t.sc, stop); {
+ case len(key) > 0:
+ return list, fmt.Errorf("unexpected data at end of array index: %q", key)
+
+ case err != nil:
+ return list, err
+
+ case lastRune == '=':
+ value, err := t.val()
+ if err != nil && !errors.Is(err, io.EOF) {
+ return list, err
+ }
+ return setIndex(list, i, string(value))
+
+ case lastRune == '.':
+ // we have a nested object. Send to t.key
+ inner := map[string]interface{}{}
+ if len(list) > i {
+ var ok bool
+ inner, ok = list[i].(map[string]interface{})
+ if !ok {
+ // We have indices out of order. Initialize empty value.
+ list[i] = map[string]interface{}{}
+ inner = list[i].(map[string]interface{})
+ }
+ }
+
+ // recurse
+ err := t.key(inner, nestedNameLevel)
+ if err != nil {
+ return list, err
+ }
+ return setIndex(list, i, inner)
+
+ case lastRune == '[':
+ // now we have a nested list. Read the index and handle.
+ nextI, err := t.keyIndex()
+ if err != nil {
+ return list, fmt.Errorf("error parsing index: %w", err)
+ }
+ var crtList []interface{}
+ if len(list) > i {
+ // If nested list already exists, take the value of list to next cycle.
+ existed := list[i]
+ if existed != nil {
+ crtList = list[i].([]interface{})
+ }
+ }
+
+ // Now we need to get the value after the ].
+ list2, err := t.listItem(crtList, nextI, nestedNameLevel)
+ if err != nil {
+ return list, err
+ }
+ return setIndex(list, i, list2)
+
+ default:
+ return nil, fmt.Errorf("parse error: unexpected token %v", lastRune)
+ }
+}
+
+func (t *literalParser) val() ([]rune, error) {
+ stop := runeSet([]rune{})
+ v, _, err := runesUntilLiteral(t.sc, stop)
+ return v, err
+}
diff --git a/helm/pkg/strvals/literal_parser_test.go b/helm/pkg/strvals/literal_parser_test.go
new file mode 100644
index 000000000..6a76458f5
--- /dev/null
+++ b/helm/pkg/strvals/literal_parser_test.go
@@ -0,0 +1,481 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package strvals
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "sigs.k8s.io/yaml"
+)
+
+func TestParseLiteral(t *testing.T) {
+ cases := []struct {
+ str string
+ expect map[string]interface{}
+ err bool
+ }{
+ {
+ str: "name",
+ err: true,
+ },
+ {
+ str: "name=",
+ expect: map[string]interface{}{"name": ""},
+ },
+ {
+ str: "name=value",
+ expect: map[string]interface{}{"name": "value"},
+ err: false,
+ },
+ {
+ str: "long_int_string=1234567890",
+ expect: map[string]interface{}{"long_int_string": "1234567890"},
+ err: false,
+ },
+ {
+ str: "boolean=true",
+ expect: map[string]interface{}{"boolean": "true"},
+ err: false,
+ },
+ {
+ str: "is_null=null",
+ expect: map[string]interface{}{"is_null": "null"},
+ err: false,
+ },
+ {
+ str: "zero=0",
+ expect: map[string]interface{}{"zero": "0"},
+ err: false,
+ },
+ {
+ str: "name1=null,name2=value2",
+ expect: map[string]interface{}{"name1": "null,name2=value2"},
+ err: false,
+ },
+ {
+ str: "name1=value,,,tail",
+ expect: map[string]interface{}{"name1": "value,,,tail"},
+ err: false,
+ },
+ {
+ str: "leading_zeros=00009",
+ expect: map[string]interface{}{"leading_zeros": "00009"},
+ err: false,
+ },
+ {
+ str: "name=one two three",
+ expect: map[string]interface{}{"name": "one two three"},
+ err: false,
+ },
+ {
+ str: "outer.inner=value",
+ expect: map[string]interface{}{"outer": map[string]interface{}{"inner": "value"}},
+ err: false,
+ },
+ {
+ str: "outer.middle.inner=value",
+ expect: map[string]interface{}{"outer": map[string]interface{}{"middle": map[string]interface{}{"inner": "value"}}},
+ err: false,
+ },
+ {
+ str: "name1.name2",
+ err: true,
+ },
+ {
+ str: "name1.name2=",
+ expect: map[string]interface{}{"name1": map[string]interface{}{"name2": ""}},
+ err: false,
+ },
+ {
+ str: "name1.=name2",
+ err: true,
+ },
+ {
+ str: "name1.,name2",
+ err: true,
+ },
+ {
+ str: "name1={value1,value2}",
+ expect: map[string]interface{}{"name1": "{value1,value2}"},
+ },
+
+ // List support
+ {
+ str: "list[0]=foo",
+ expect: map[string]interface{}{"list": []string{"foo"}},
+ err: false,
+ },
+ {
+ str: "list[0].foo=bar",
+ expect: map[string]interface{}{
+ "list": []interface{}{
+ map[string]interface{}{"foo": "bar"},
+ },
+ },
+ err: false,
+ },
+ {
+ str: "list[-30].hello=world",
+ err: true,
+ },
+ {
+ str: "list[3]=bar",
+ expect: map[string]interface{}{"list": []interface{}{nil, nil, nil, "bar"}},
+ err: false,
+ },
+ {
+ str: "illegal[0]name.foo=bar",
+ err: true,
+ },
+ {
+ str: "noval[0]",
+ expect: map[string]interface{}{"noval": []interface{}{}},
+ err: false,
+ },
+ {
+ str: "noval[0]=",
+ expect: map[string]interface{}{"noval": []interface{}{""}},
+ err: false,
+ },
+ {
+ str: "nested[0][0]=1",
+ expect: map[string]interface{}{"nested": []interface{}{[]interface{}{"1"}}},
+ err: false,
+ },
+ {
+ str: "nested[1][1]=1",
+ expect: map[string]interface{}{"nested": []interface{}{nil, []interface{}{nil, "1"}}},
+ err: false,
+ },
+ {
+ str: "name1.name2[0].foo=bar",
+ expect: map[string]interface{}{
+ "name1": map[string]interface{}{
+ "name2": []map[string]interface{}{{"foo": "bar"}},
+ },
+ },
+ },
+ {
+ str: "name1.name2[1].foo=bar",
+ expect: map[string]interface{}{
+ "name1": map[string]interface{}{
+ "name2": []map[string]interface{}{nil, {"foo": "bar"}},
+ },
+ },
+ },
+ {
+ str: "name1.name2[1].foo=bar",
+ expect: map[string]interface{}{
+ "name1": map[string]interface{}{
+ "name2": []map[string]interface{}{nil, {"foo": "bar"}},
+ },
+ },
+ },
+ {
+ str: "]={}].",
+ expect: map[string]interface{}{"]": "{}]."},
+ err: false,
+ },
+
+ // issue test cases: , = $ ( ) { } . \ \\
+ {
+ str: "name=val,val",
+ expect: map[string]interface{}{"name": "val,val"},
+ err: false,
+ },
+ {
+ str: "name=val.val",
+ expect: map[string]interface{}{"name": "val.val"},
+ err: false,
+ },
+ {
+ str: "name=val=val",
+ expect: map[string]interface{}{"name": "val=val"},
+ err: false,
+ },
+ {
+ str: "name=val$val",
+ expect: map[string]interface{}{"name": "val$val"},
+ err: false,
+ },
+ {
+ str: "name=(value",
+ expect: map[string]interface{}{"name": "(value"},
+ err: false,
+ },
+ {
+ str: "name=value)",
+ expect: map[string]interface{}{"name": "value)"},
+ err: false,
+ },
+ {
+ str: "name=(value)",
+ expect: map[string]interface{}{"name": "(value)"},
+ err: false,
+ },
+ {
+ str: "name={value",
+ expect: map[string]interface{}{"name": "{value"},
+ err: false,
+ },
+ {
+ str: "name=value}",
+ expect: map[string]interface{}{"name": "value}"},
+ err: false,
+ },
+ {
+ str: "name={value}",
+ expect: map[string]interface{}{"name": "{value}"},
+ err: false,
+ },
+ {
+ str: "name={value1,value2}",
+ expect: map[string]interface{}{"name": "{value1,value2}"},
+ err: false,
+ },
+ {
+ str: `name=val\val`,
+ expect: map[string]interface{}{"name": `val\val`},
+ err: false,
+ },
+ {
+ str: `name=val\\val`,
+ expect: map[string]interface{}{"name": `val\\val`},
+ err: false,
+ },
+ {
+ str: `name=val\\\val`,
+ expect: map[string]interface{}{"name": `val\\\val`},
+ err: false,
+ },
+ {
+ str: `name={val,.?*v\0a!l)some`,
+ expect: map[string]interface{}{"name": `{val,.?*v\0a!l)some`},
+ err: false,
+ },
+ {
+ str: `name=em%GT)tqUDqz,i-\h+Mbqs-!:.m\\rE=mkbM#rR}@{-k@`,
+ expect: map[string]interface{}{"name": `em%GT)tqUDqz,i-\h+Mbqs-!:.m\\rE=mkbM#rR}@{-k@`},
+ },
+ }
+
+ for _, tt := range cases {
+ got, err := ParseLiteral(tt.str)
+ if err != nil {
+ if !tt.err {
+ t.Fatalf("%s: %s", tt.str, err)
+ }
+ continue
+ }
+
+ if tt.err {
+ t.Errorf("%s: Expected error. Got nil", tt.str)
+ }
+
+ y1, err := yaml.Marshal(tt.expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ y2, err := yaml.Marshal(got)
+ if err != nil {
+ t.Fatalf("Error serializing parsed value: %s", err)
+ }
+
+ if string(y1) != string(y2) {
+ t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.str, y1, y2)
+ }
+ }
+}
+
+func TestParseLiteralInto(t *testing.T) {
+ tests := []struct {
+ input string
+ input2 string
+ got map[string]interface{}
+ expect map[string]interface{}
+ err bool
+ }{
+ {
+ input: "outer.inner1=value1,outer.inner3=value3,outer.inner4=4",
+ got: map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": "overwrite",
+ "inner2": "value2",
+ },
+ },
+ expect: map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": "value1,outer.inner3=value3,outer.inner4=4",
+ "inner2": "value2",
+ }},
+ err: false,
+ },
+ {
+ input: "listOuter[0][0].type=listValue",
+ input2: "listOuter[0][0].status=alive",
+ got: map[string]interface{}{},
+ expect: map[string]interface{}{
+ "listOuter": [][]interface{}{{map[string]string{
+ "type": "listValue",
+ "status": "alive",
+ }}},
+ },
+ err: false,
+ },
+ {
+ input: "listOuter[0][0].type=listValue",
+ input2: "listOuter[1][0].status=alive",
+ got: map[string]interface{}{},
+ expect: map[string]interface{}{
+ "listOuter": [][]interface{}{
+ {
+ map[string]string{"type": "listValue"},
+ },
+ {
+ map[string]string{"status": "alive"},
+ },
+ },
+ },
+ err: false,
+ },
+ {
+ input: "listOuter[0][1][0].type=listValue",
+ input2: "listOuter[0][0][1].status=alive",
+ got: map[string]interface{}{
+ "listOuter": []interface{}{
+ []interface{}{
+ []interface{}{
+ map[string]string{"exited": "old"},
+ },
+ },
+ },
+ },
+ expect: map[string]interface{}{
+ "listOuter": [][][]interface{}{
+ {
+ {
+ map[string]string{"exited": "old"},
+ map[string]string{"status": "alive"},
+ },
+ {
+ map[string]string{"type": "listValue"},
+ },
+ },
+ },
+ },
+ err: false,
+ },
+ }
+
+ for _, tt := range tests {
+ if err := ParseLiteralInto(tt.input, tt.got); err != nil {
+ t.Fatal(err)
+ }
+ if tt.err {
+ t.Errorf("%s: Expected error. Got nil", tt.input)
+ }
+
+ if tt.input2 != "" {
+ if err := ParseLiteralInto(tt.input2, tt.got); err != nil {
+ t.Fatal(err)
+ }
+ if tt.err {
+ t.Errorf("%s: Expected error. Got nil", tt.input2)
+ }
+ }
+
+ y1, err := yaml.Marshal(tt.expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ y2, err := yaml.Marshal(tt.got)
+ if err != nil {
+ t.Fatalf("Error serializing parsed value: %s", err)
+ }
+
+ if string(y1) != string(y2) {
+ t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.input, y1, y2)
+ }
+ }
+}
+
+func TestParseLiteralNestedLevels(t *testing.T) {
+ var keyMultipleNestedLevels strings.Builder
+
+ for i := 1; i <= MaxNestedNameLevel+2; i++ {
+ tmpStr := fmt.Sprintf("name%d", i)
+ if i <= MaxNestedNameLevel+1 {
+ tmpStr = tmpStr + "."
+ }
+ keyMultipleNestedLevels.WriteString(tmpStr)
+ }
+
+ tests := []struct {
+ str string
+ expect map[string]interface{}
+ err bool
+ errStr string
+ }{
+ {
+ "outer.middle.inner=value",
+ map[string]interface{}{"outer": map[string]interface{}{"middle": map[string]interface{}{"inner": "value"}}},
+ false,
+ "",
+ },
+ {
+ str: keyMultipleNestedLevels.String() + "=value",
+ err: true,
+ errStr: fmt.Sprintf("value name nested level is greater than maximum supported nested level of %d", MaxNestedNameLevel),
+ },
+ }
+
+ for _, tt := range tests {
+ got, err := ParseLiteral(tt.str)
+ if err != nil {
+ if tt.err {
+ if tt.errStr != "" {
+ if err.Error() != tt.errStr {
+ t.Errorf("Expected error: %s. Got error: %s", tt.errStr, err.Error())
+ }
+ }
+ continue
+ }
+ t.Fatalf("%s: %s", tt.str, err)
+ }
+
+ if tt.err {
+ t.Errorf("%s: Expected error. Got nil", tt.str)
+ }
+
+ y1, err := yaml.Marshal(tt.expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ y2, err := yaml.Marshal(got)
+ if err != nil {
+ t.Fatalf("Error serializing parsed value: %s", err)
+ }
+
+ if string(y1) != string(y2) {
+ t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.str, y1, y2)
+ }
+ }
+}
diff --git a/helm/pkg/strvals/parser.go b/helm/pkg/strvals/parser.go
new file mode 100644
index 000000000..8eb761dce
--- /dev/null
+++ b/helm/pkg/strvals/parser.go
@@ -0,0 +1,560 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package strvals
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "sigs.k8s.io/yaml"
+)
+
+// ErrNotList indicates that a non-list was treated as a list.
+var ErrNotList = errors.New("not a list")
+
+// MaxIndex is the maximum index that will be allowed by setIndex.
+// The default value 65536 = 1024 * 64
+var MaxIndex = 65536
+
+// MaxNestedNameLevel is the maximum level of nesting for a value name that
+// will be allowed.
+var MaxNestedNameLevel = 30
+
+// ToYAML takes a string of arguments and converts to a YAML document.
+func ToYAML(s string) (string, error) {
+ m, err := Parse(s)
+ if err != nil {
+ return "", err
+ }
+ d, err := yaml.Marshal(m)
+ return strings.TrimSuffix(string(d), "\n"), err
+}
+
+// Parse parses a set line.
+//
+// A set line is of the form name1=value1,name2=value2
+func Parse(s string) (map[string]interface{}, error) {
+ vals := map[string]interface{}{}
+ scanner := bytes.NewBufferString(s)
+ t := newParser(scanner, vals, false)
+ err := t.parse()
+ return vals, err
+}
+
+// ParseString parses a set line and forces a string value.
+//
+// A set line is of the form name1=value1,name2=value2
+func ParseString(s string) (map[string]interface{}, error) {
+ vals := map[string]interface{}{}
+ scanner := bytes.NewBufferString(s)
+ t := newParser(scanner, vals, true)
+ err := t.parse()
+ return vals, err
+}
+
+// ParseInto parses a strvals line and merges the result into dest.
+//
+// If the strval string has a key that exists in dest, it overwrites the
+// dest version.
+func ParseInto(s string, dest map[string]interface{}) error {
+ scanner := bytes.NewBufferString(s)
+ t := newParser(scanner, dest, false)
+ return t.parse()
+}
+
+// ParseFile parses a set line, but its final value is loaded from the file at the path specified by the original value.
+//
+// A set line is of the form name1=path1,name2=path2
+//
+// When the files at path1 and path2 contained "val1" and "val2" respectively, the set line is consumed as
+// name1=val1,name2=val2
+func ParseFile(s string, reader RunesValueReader) (map[string]interface{}, error) {
+ vals := map[string]interface{}{}
+ scanner := bytes.NewBufferString(s)
+ t := newFileParser(scanner, vals, reader)
+ err := t.parse()
+ return vals, err
+}
+
+// ParseIntoString parses a strvals line and merges the result into dest.
+//
+// This method always returns a string as the value.
+func ParseIntoString(s string, dest map[string]interface{}) error {
+ scanner := bytes.NewBufferString(s)
+ t := newParser(scanner, dest, true)
+ return t.parse()
+}
+
+// ParseJSON parses a string with format key1=val1, key2=val2, ...
+// where values are json strings (null, or scalars, or arrays, or objects).
+// An empty val is treated as null.
+//
+// If a key exists in dest, the new value overwrites the dest version.
+func ParseJSON(s string, dest map[string]interface{}) error {
+ scanner := bytes.NewBufferString(s)
+ t := newJSONParser(scanner, dest)
+ return t.parse()
+}
+
+// ParseIntoFile parses a filevals line and merges the result into dest.
+//
+// This method always returns a string as the value.
+func ParseIntoFile(s string, dest map[string]interface{}, reader RunesValueReader) error {
+ scanner := bytes.NewBufferString(s)
+ t := newFileParser(scanner, dest, reader)
+ return t.parse()
+}
+
+// RunesValueReader is a function that takes the given value (a slice of runes)
+// and returns the parsed value
+type RunesValueReader func([]rune) (interface{}, error)
+
+// parser is a simple parser that takes a strvals line and parses it into a
+// map representation.
+//
+// where sc is the source of the original data being parsed
+// where data is the final parsed data from the parses with correct types
+type parser struct {
+ sc *bytes.Buffer
+ data map[string]interface{}
+ reader RunesValueReader
+ isjsonval bool
+}
+
+func newParser(sc *bytes.Buffer, data map[string]interface{}, stringBool bool) *parser {
+ stringConverter := func(rs []rune) (interface{}, error) {
+ return typedVal(rs, stringBool), nil
+ }
+ return &parser{sc: sc, data: data, reader: stringConverter}
+}
+
+func newJSONParser(sc *bytes.Buffer, data map[string]interface{}) *parser {
+ return &parser{sc: sc, data: data, reader: nil, isjsonval: true}
+}
+
+func newFileParser(sc *bytes.Buffer, data map[string]interface{}, reader RunesValueReader) *parser {
+ return &parser{sc: sc, data: data, reader: reader}
+}
+
+func (t *parser) parse() error {
+ for {
+ err := t.key(t.data, 0)
+ if err == nil {
+ continue
+ }
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ return err
+ }
+}
+
+func runeSet(r []rune) map[rune]bool {
+ s := make(map[rune]bool, len(r))
+ for _, rr := range r {
+ s[rr] = true
+ }
+ return s
+}
+
+func (t *parser) key(data map[string]interface{}, nestedNameLevel int) (reterr error) {
+ defer func() {
+ if r := recover(); r != nil {
+ reterr = fmt.Errorf("unable to parse key: %s", r)
+ }
+ }()
+ stop := runeSet([]rune{'=', '[', ',', '.'})
+ for {
+ switch k, last, err := runesUntil(t.sc, stop); {
+ case err != nil:
+ if len(k) == 0 {
+ return err
+ }
+ return fmt.Errorf("key %q has no value", string(k))
+ //set(data, string(k), "")
+ //return err
+ case last == '[':
+ // We are in a list index context, so we need to set an index.
+ i, err := t.keyIndex()
+ if err != nil {
+ return fmt.Errorf("error parsing index: %w", err)
+ }
+ kk := string(k)
+ // Find or create target list
+ list := []interface{}{}
+ if _, ok := data[kk]; ok {
+ list = data[kk].([]interface{})
+ }
+
+ // Now we need to get the value after the ].
+ list, err = t.listItem(list, i, nestedNameLevel)
+ set(data, kk, list)
+ return err
+ case last == '=':
+ if t.isjsonval {
+ empval, err := t.emptyVal()
+ if err != nil {
+ return err
+ }
+ if empval {
+ set(data, string(k), nil)
+ return nil
+ }
+ // parse jsonvals by using Go’s JSON standard library
+ // Decode is preferred to Unmarshal in order to parse just the json parts of the list key1=jsonval1,key2=jsonval2,...
+ // Since Decode has its own buffer that consumes more characters (from underlying t.sc) than the ones actually decoded,
+ // we invoke Decode on a separate reader built with a copy of what is left in t.sc. After Decode is executed, we
+ // discard in t.sc the chars of the decoded json value (the number of those characters is returned by InputOffset).
+ var jsonval interface{}
+ dec := json.NewDecoder(strings.NewReader(t.sc.String()))
+ if err = dec.Decode(&jsonval); err != nil {
+ return err
+ }
+ set(data, string(k), jsonval)
+ if _, err = io.CopyN(io.Discard, t.sc, dec.InputOffset()); err != nil {
+ return err
+ }
+ // skip possible blanks and comma
+ _, err = t.emptyVal()
+ return err
+ }
+ // End of key. Consume =, Get value.
+ // FIXME: Get value list first
+ vl, e := t.valList()
+ switch e {
+ case nil:
+ set(data, string(k), vl)
+ return nil
+ case io.EOF:
+ set(data, string(k), "")
+ return e
+ case ErrNotList:
+ rs, e := t.val()
+ if e != nil && e != io.EOF {
+ return e
+ }
+ v, e := t.reader(rs)
+ set(data, string(k), v)
+ return e
+ default:
+ return e
+ }
+ case last == ',':
+ // No value given. Set the value to empty string. Return error.
+ set(data, string(k), "")
+ return fmt.Errorf("key %q has no value (cannot end with ,)", string(k))
+ case last == '.':
+ // Check value name is within the maximum nested name level
+ nestedNameLevel++
+ if nestedNameLevel > MaxNestedNameLevel {
+ return fmt.Errorf("value name nested level is greater than maximum supported nested level of %d", MaxNestedNameLevel)
+ }
+
+ // First, create or find the target map.
+ inner := map[string]interface{}{}
+ if _, ok := data[string(k)]; ok {
+ inner = data[string(k)].(map[string]interface{})
+ }
+
+ // Recurse
+ e := t.key(inner, nestedNameLevel)
+ if e == nil && len(inner) == 0 {
+ return fmt.Errorf("key map %q has no value", string(k))
+ }
+ if len(inner) != 0 {
+ set(data, string(k), inner)
+ }
+ return e
+ }
+ }
+}
+
+func set(data map[string]interface{}, key string, val interface{}) {
+ // If key is empty, don't set it.
+ if len(key) == 0 {
+ return
+ }
+ data[key] = val
+}
+
+func setIndex(list []interface{}, index int, val interface{}) (l2 []interface{}, err error) {
+ // There are possible index values that are out of range on a target system
+ // causing a panic. This will catch the panic and return an error instead.
+ // The value of the index that causes a panic varies from system to system.
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("error processing index %d: %s", index, r)
+ }
+ }()
+
+ if index < 0 {
+ return list, fmt.Errorf("negative %d index not allowed", index)
+ }
+ if index > MaxIndex {
+ return list, fmt.Errorf("index of %d is greater than maximum supported index of %d", index, MaxIndex)
+ }
+ if len(list) <= index {
+ newlist := make([]interface{}, index+1)
+ copy(newlist, list)
+ list = newlist
+ }
+ list[index] = val
+ return list, nil
+}
+
+func (t *parser) keyIndex() (int, error) {
+ // First, get the key.
+ stop := runeSet([]rune{']'})
+ v, _, err := runesUntil(t.sc, stop)
+ if err != nil {
+ return 0, err
+ }
+ // v should be the index
+ return strconv.Atoi(string(v))
+
+}
+
+func (t *parser) listItem(list []interface{}, i, nestedNameLevel int) ([]interface{}, error) {
+ if i < 0 {
+ return list, fmt.Errorf("negative %d index not allowed", i)
+ }
+ stop := runeSet([]rune{'[', '.', '='})
+ switch k, last, err := runesUntil(t.sc, stop); {
+ case len(k) > 0:
+ return list, fmt.Errorf("unexpected data at end of array index: %q", k)
+ case err != nil:
+ return list, err
+ case last == '=':
+ if t.isjsonval {
+ empval, err := t.emptyVal()
+ if err != nil {
+ return list, err
+ }
+ if empval {
+ return setIndex(list, i, nil)
+ }
+ // parse jsonvals by using Go’s JSON standard library
+ // Decode is preferred to Unmarshal in order to parse just the json parts of the list key1=jsonval1,key2=jsonval2,...
+ // Since Decode has its own buffer that consumes more characters (from underlying t.sc) than the ones actually decoded,
+ // we invoke Decode on a separate reader built with a copy of what is left in t.sc. After Decode is executed, we
+ // discard in t.sc the chars of the decoded json value (the number of those characters is returned by InputOffset).
+ var jsonval interface{}
+ dec := json.NewDecoder(strings.NewReader(t.sc.String()))
+ if err = dec.Decode(&jsonval); err != nil {
+ return list, err
+ }
+ if list, err = setIndex(list, i, jsonval); err != nil {
+ return list, err
+ }
+ if _, err = io.CopyN(io.Discard, t.sc, dec.InputOffset()); err != nil {
+ return list, err
+ }
+ // skip possible blanks and comma
+ _, err = t.emptyVal()
+ return list, err
+ }
+ vl, e := t.valList()
+ switch e {
+ case nil:
+ return setIndex(list, i, vl)
+ case io.EOF:
+ return setIndex(list, i, "")
+ case ErrNotList:
+ rs, e := t.val()
+ if e != nil && e != io.EOF {
+ return list, e
+ }
+ v, e := t.reader(rs)
+ if e != nil {
+ return list, e
+ }
+ return setIndex(list, i, v)
+ default:
+ return list, e
+ }
+ case last == '[':
+ // now we have a nested list. Read the index and handle.
+ nextI, err := t.keyIndex()
+ if err != nil {
+ return list, fmt.Errorf("error parsing index: %w", err)
+ }
+ var crtList []interface{}
+ if len(list) > i {
+ // If nested list already exists, take the value of list to next cycle.
+ existed := list[i]
+ if existed != nil {
+ crtList = list[i].([]interface{})
+ }
+ }
+ // Now we need to get the value after the ].
+ list2, err := t.listItem(crtList, nextI, nestedNameLevel)
+ if err != nil {
+ return list, err
+ }
+ return setIndex(list, i, list2)
+ case last == '.':
+ // We have a nested object. Send to t.key
+ inner := map[string]interface{}{}
+ if len(list) > i {
+ var ok bool
+ inner, ok = list[i].(map[string]interface{})
+ if !ok {
+ // We have indices out of order. Initialize empty value.
+ list[i] = map[string]interface{}{}
+ inner = list[i].(map[string]interface{})
+ }
+ }
+
+ // Recurse
+ e := t.key(inner, nestedNameLevel)
+ if e != nil {
+ return list, e
+ }
+ return setIndex(list, i, inner)
+ default:
+ return nil, fmt.Errorf("parse error: unexpected token %v", last)
+ }
+}
+
+// check for an empty value
+// read and consume optional spaces until comma or EOF (empty val) or any other char (not empty val)
+// comma and spaces are consumed, while any other char is not consumed
+func (t *parser) emptyVal() (bool, error) {
+ for {
+ r, _, e := t.sc.ReadRune()
+ if e == io.EOF {
+ return true, nil
+ }
+ if e != nil {
+ return false, e
+ }
+ if r == ',' {
+ return true, nil
+ }
+ if !unicode.IsSpace(r) {
+ t.sc.UnreadRune()
+ return false, nil
+ }
+ }
+}
+
+func (t *parser) val() ([]rune, error) {
+ stop := runeSet([]rune{','})
+ v, _, err := runesUntil(t.sc, stop)
+ return v, err
+}
+
+func (t *parser) valList() ([]interface{}, error) {
+ r, _, e := t.sc.ReadRune()
+ if e != nil {
+ return []interface{}{}, e
+ }
+
+ if r != '{' {
+ t.sc.UnreadRune()
+ return []interface{}{}, ErrNotList
+ }
+
+ list := []interface{}{}
+ stop := runeSet([]rune{',', '}'})
+ for {
+ switch rs, last, err := runesUntil(t.sc, stop); {
+ case err != nil:
+ if err == io.EOF {
+ err = errors.New("list must terminate with '}'")
+ }
+ return list, err
+ case last == '}':
+ // If this is followed by ',', consume it.
+ if r, _, e := t.sc.ReadRune(); e == nil && r != ',' {
+ t.sc.UnreadRune()
+ }
+ v, e := t.reader(rs)
+ list = append(list, v)
+ return list, e
+ case last == ',':
+ v, e := t.reader(rs)
+ if e != nil {
+ return list, e
+ }
+ list = append(list, v)
+ }
+ }
+}
+
+func runesUntil(in io.RuneReader, stop map[rune]bool) ([]rune, rune, error) {
+ v := []rune{}
+ for {
+ switch r, _, e := in.ReadRune(); {
+ case e != nil:
+ return v, r, e
+ case inMap(r, stop):
+ return v, r, nil
+ case r == '\\':
+ next, _, e := in.ReadRune()
+ if e != nil {
+ return v, next, e
+ }
+ v = append(v, next)
+ default:
+ v = append(v, r)
+ }
+ }
+}
+
+func inMap(k rune, m map[rune]bool) bool {
+ _, ok := m[k]
+ return ok
+}
+
+func typedVal(v []rune, st bool) interface{} {
+ val := string(v)
+
+ if st {
+ return val
+ }
+
+ if strings.EqualFold(val, "true") {
+ return true
+ }
+
+ if strings.EqualFold(val, "false") {
+ return false
+ }
+
+ if strings.EqualFold(val, "null") {
+ return nil
+ }
+
+ if strings.EqualFold(val, "0") {
+ return int64(0)
+ }
+
+ // If this value does not start with zero, try parsing it to an int
+ if len(val) != 0 && val[0] != '0' {
+ if iv, err := strconv.ParseInt(val, 10, 64); err == nil {
+ return iv
+ }
+ }
+
+ return val
+}
diff --git a/helm/pkg/strvals/parser_test.go b/helm/pkg/strvals/parser_test.go
new file mode 100644
index 000000000..73403fc52
--- /dev/null
+++ b/helm/pkg/strvals/parser_test.go
@@ -0,0 +1,819 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package strvals
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "sigs.k8s.io/yaml"
+)
+
+func TestSetIndex(t *testing.T) {
+ tests := []struct {
+ name string
+ initial []interface{}
+ expect []interface{}
+ add int
+ val int
+ err bool
+ }{
+ {
+ name: "short",
+ initial: []interface{}{0, 1},
+ expect: []interface{}{0, 1, 2},
+ add: 2,
+ val: 2,
+ err: false,
+ },
+ {
+ name: "equal",
+ initial: []interface{}{0, 1},
+ expect: []interface{}{0, 2},
+ add: 1,
+ val: 2,
+ err: false,
+ },
+ {
+ name: "long",
+ initial: []interface{}{0, 1, 2, 3, 4, 5},
+ expect: []interface{}{0, 1, 2, 4, 4, 5},
+ add: 3,
+ val: 4,
+ err: false,
+ },
+ {
+ name: "negative",
+ initial: []interface{}{0, 1, 2, 3, 4, 5},
+ expect: []interface{}{0, 1, 2, 3, 4, 5},
+ add: -1,
+ val: 4,
+ err: true,
+ },
+ {
+ name: "large",
+ initial: []interface{}{0, 1, 2, 3, 4, 5},
+ expect: []interface{}{0, 1, 2, 3, 4, 5},
+ add: MaxIndex + 1,
+ val: 4,
+ err: true,
+ },
+ }
+
+ for _, tt := range tests {
+ got, err := setIndex(tt.initial, tt.add, tt.val)
+
+ if err != nil && tt.err == false {
+ t.Fatalf("%s: Expected no error but error returned", tt.name)
+ } else if err == nil && tt.err == true {
+ t.Fatalf("%s: Expected error but no error returned", tt.name)
+ }
+
+ if len(got) != len(tt.expect) {
+ t.Fatalf("%s: Expected length %d, got %d", tt.name, len(tt.expect), len(got))
+ }
+
+ if !tt.err {
+ if gg := got[tt.add].(int); gg != tt.val {
+ t.Errorf("%s, Expected value %d, got %d", tt.name, tt.val, gg)
+ }
+ }
+
+ for k, v := range got {
+ if v != tt.expect[k] {
+ t.Errorf("%s, Expected value %d, got %d", tt.name, tt.expect[k], v)
+ }
+ }
+ }
+}
+
+func TestParseSet(t *testing.T) {
+ testsString := []struct {
+ str string
+ expect map[string]interface{}
+ err bool
+ }{
+ {
+ str: "long_int_string=1234567890",
+ expect: map[string]interface{}{"long_int_string": "1234567890"},
+ err: false,
+ },
+ {
+ str: "boolean=true",
+ expect: map[string]interface{}{"boolean": "true"},
+ err: false,
+ },
+ {
+ str: "is_null=null",
+ expect: map[string]interface{}{"is_null": "null"},
+ err: false,
+ },
+ {
+ str: "zero=0",
+ expect: map[string]interface{}{"zero": "0"},
+ err: false,
+ },
+ }
+ tests := []struct {
+ str string
+ expect map[string]interface{}
+ err bool
+ }{
+ {
+ "name1=null,f=false,t=true",
+ map[string]interface{}{"name1": nil, "f": false, "t": true},
+ false,
+ },
+ {
+ "name1=value1",
+ map[string]interface{}{"name1": "value1"},
+ false,
+ },
+ {
+ "name1=value1,name2=value2",
+ map[string]interface{}{"name1": "value1", "name2": "value2"},
+ false,
+ },
+ {
+ "name1=value1,name2=value2,",
+ map[string]interface{}{"name1": "value1", "name2": "value2"},
+ false,
+ },
+ {
+ str: "name1=value1,,,,name2=value2,",
+ err: true,
+ },
+ {
+ str: "name1=,name2=value2",
+ expect: map[string]interface{}{"name1": "", "name2": "value2"},
+ },
+ {
+ str: "leading_zeros=00009",
+ expect: map[string]interface{}{"leading_zeros": "00009"},
+ },
+ {
+ str: "zero_int=0",
+ expect: map[string]interface{}{"zero_int": 0},
+ },
+ {
+ str: "long_int=1234567890",
+ expect: map[string]interface{}{"long_int": 1234567890},
+ },
+ {
+ str: "boolean=true",
+ expect: map[string]interface{}{"boolean": true},
+ },
+ {
+ str: "is_null=null",
+ expect: map[string]interface{}{"is_null": nil},
+ err: false,
+ },
+ {
+ str: "name1,name2=",
+ err: true,
+ },
+ {
+ str: "name1,name2=value2",
+ err: true,
+ },
+ {
+ str: "name1,name2=value2\\",
+ err: true,
+ },
+ {
+ str: "name1,name2",
+ err: true,
+ },
+ {
+ "name1=one\\,two,name2=three\\,four",
+ map[string]interface{}{"name1": "one,two", "name2": "three,four"},
+ false,
+ },
+ {
+ "name1=one\\=two,name2=three\\=four",
+ map[string]interface{}{"name1": "one=two", "name2": "three=four"},
+ false,
+ },
+ {
+ "name1=one two three,name2=three two one",
+ map[string]interface{}{"name1": "one two three", "name2": "three two one"},
+ false,
+ },
+ {
+ "outer.inner=value",
+ map[string]interface{}{"outer": map[string]interface{}{"inner": "value"}},
+ false,
+ },
+ {
+ "outer.middle.inner=value",
+ map[string]interface{}{"outer": map[string]interface{}{"middle": map[string]interface{}{"inner": "value"}}},
+ false,
+ },
+ {
+ "outer.inner1=value,outer.inner2=value2",
+ map[string]interface{}{"outer": map[string]interface{}{"inner1": "value", "inner2": "value2"}},
+ false,
+ },
+ {
+ "outer.inner1=value,outer.middle.inner=value",
+ map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": "value",
+ "middle": map[string]interface{}{
+ "inner": "value",
+ },
+ },
+ },
+ false,
+ },
+ {
+ str: "name1.name2",
+ err: true,
+ },
+ {
+ str: "name1.name2,name1.name3",
+ err: true,
+ },
+ {
+ str: "name1.name2=",
+ expect: map[string]interface{}{"name1": map[string]interface{}{"name2": ""}},
+ },
+ {
+ str: "name1.=name2",
+ err: true,
+ },
+ {
+ str: "name1.,name2",
+ err: true,
+ },
+ {
+ "name1={value1,value2}",
+ map[string]interface{}{"name1": []string{"value1", "value2"}},
+ false,
+ },
+ {
+ "name1={value1,value2},name2={value1,value2}",
+ map[string]interface{}{
+ "name1": []string{"value1", "value2"},
+ "name2": []string{"value1", "value2"},
+ },
+ false,
+ },
+ {
+ "name1={1021,902}",
+ map[string]interface{}{"name1": []int{1021, 902}},
+ false,
+ },
+ {
+ "name1.name2={value1,value2}",
+ map[string]interface{}{"name1": map[string]interface{}{"name2": []string{"value1", "value2"}}},
+ false,
+ },
+ {
+ str: "name1={1021,902",
+ err: true,
+ },
+ // List support
+ {
+ str: "list[0]=foo",
+ expect: map[string]interface{}{"list": []string{"foo"}},
+ },
+ {
+ str: "list[0].foo=bar",
+ expect: map[string]interface{}{
+ "list": []interface{}{
+ map[string]interface{}{"foo": "bar"},
+ },
+ },
+ },
+ {
+ str: "list[0].foo=bar,list[0].hello=world",
+ expect: map[string]interface{}{
+ "list": []interface{}{
+ map[string]interface{}{"foo": "bar", "hello": "world"},
+ },
+ },
+ },
+ {
+ str: "list[0].foo=bar,list[-30].hello=world",
+ err: true,
+ },
+ {
+ str: "list[0]=foo,list[1]=bar",
+ expect: map[string]interface{}{"list": []string{"foo", "bar"}},
+ },
+ {
+ str: "list[0]=foo,list[1]=bar,",
+ expect: map[string]interface{}{"list": []string{"foo", "bar"}},
+ },
+ {
+ str: "list[0]=foo,list[3]=bar",
+ expect: map[string]interface{}{"list": []interface{}{"foo", nil, nil, "bar"}},
+ },
+ {
+ str: "list[0]=foo,list[-20]=bar",
+ err: true,
+ },
+ {
+ str: "illegal[0]name.foo=bar",
+ err: true,
+ },
+ {
+ str: "noval[0]",
+ expect: map[string]interface{}{"noval": []interface{}{}},
+ },
+ {
+ str: "noval[0]=",
+ expect: map[string]interface{}{"noval": []interface{}{""}},
+ },
+ {
+ str: "nested[0][0]=1",
+ expect: map[string]interface{}{"nested": []interface{}{[]interface{}{1}}},
+ },
+ {
+ str: "nested[1][1]=1",
+ expect: map[string]interface{}{"nested": []interface{}{nil, []interface{}{nil, 1}}},
+ },
+ {
+ str: "name1.name2[0].foo=bar,name1.name2[1].foo=bar",
+ expect: map[string]interface{}{
+ "name1": map[string]interface{}{
+ "name2": []map[string]interface{}{{"foo": "bar"}, {"foo": "bar"}},
+ },
+ },
+ },
+ {
+ str: "name1.name2[1].foo=bar,name1.name2[0].foo=bar",
+ expect: map[string]interface{}{
+ "name1": map[string]interface{}{
+ "name2": []map[string]interface{}{{"foo": "bar"}, {"foo": "bar"}},
+ },
+ },
+ },
+ {
+ str: "name1.name2[1].foo=bar",
+ expect: map[string]interface{}{
+ "name1": map[string]interface{}{
+ "name2": []map[string]interface{}{nil, {"foo": "bar"}},
+ },
+ },
+ },
+ {
+ str: "]={}].",
+ err: true,
+ },
+ }
+
+ for _, tt := range tests {
+ got, err := Parse(tt.str)
+ if err != nil {
+ if tt.err {
+ continue
+ }
+ t.Fatalf("%s: %s", tt.str, err)
+ }
+ if tt.err {
+ t.Errorf("%s: Expected error. Got nil", tt.str)
+ }
+
+ y1, err := yaml.Marshal(tt.expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+ y2, err := yaml.Marshal(got)
+ if err != nil {
+ t.Fatalf("Error serializing parsed value: %s", err)
+ }
+
+ if string(y1) != string(y2) {
+ t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.str, y1, y2)
+ }
+ }
+ for _, tt := range testsString {
+ got, err := ParseString(tt.str)
+ if err != nil {
+ if tt.err {
+ continue
+ }
+ t.Fatalf("%s: %s", tt.str, err)
+ }
+ if tt.err {
+ t.Errorf("%s: Expected error. Got nil", tt.str)
+ }
+
+ y1, err := yaml.Marshal(tt.expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+ y2, err := yaml.Marshal(got)
+ if err != nil {
+ t.Fatalf("Error serializing parsed value: %s", err)
+ }
+
+ if string(y1) != string(y2) {
+ t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.str, y1, y2)
+ }
+ }
+}
+
+func TestParseInto(t *testing.T) {
+ tests := []struct {
+ input string
+ input2 string
+ got map[string]interface{}
+ expect map[string]interface{}
+ err bool
+ }{
+ {
+ input: "outer.inner1=value1,outer.inner3=value3,outer.inner4=4",
+ got: map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": "overwrite",
+ "inner2": "value2",
+ },
+ },
+ expect: map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": "value1",
+ "inner2": "value2",
+ "inner3": "value3",
+ "inner4": 4,
+ }},
+ err: false,
+ },
+ {
+ input: "listOuter[0][0].type=listValue",
+ input2: "listOuter[0][0].status=alive",
+ got: map[string]interface{}{},
+ expect: map[string]interface{}{
+ "listOuter": [][]interface{}{{map[string]string{
+ "type": "listValue",
+ "status": "alive",
+ }}},
+ },
+ err: false,
+ },
+ {
+ input: "listOuter[0][0].type=listValue",
+ input2: "listOuter[1][0].status=alive",
+ got: map[string]interface{}{},
+ expect: map[string]interface{}{
+ "listOuter": [][]interface{}{
+ {
+ map[string]string{"type": "listValue"},
+ },
+ {
+ map[string]string{"status": "alive"},
+ },
+ },
+ },
+ err: false,
+ },
+ {
+ input: "listOuter[0][1][0].type=listValue",
+ input2: "listOuter[0][0][1].status=alive",
+ got: map[string]interface{}{
+ "listOuter": []interface{}{
+ []interface{}{
+ []interface{}{
+ map[string]string{"exited": "old"},
+ },
+ },
+ },
+ },
+ expect: map[string]interface{}{
+ "listOuter": [][][]interface{}{
+ {
+ {
+ map[string]string{"exited": "old"},
+ map[string]string{"status": "alive"},
+ },
+ {
+ map[string]string{"type": "listValue"},
+ },
+ },
+ },
+ },
+ err: false,
+ },
+ }
+ for _, tt := range tests {
+ if err := ParseInto(tt.input, tt.got); err != nil {
+ t.Fatal(err)
+ }
+ if tt.err {
+ t.Errorf("%s: Expected error. Got nil", tt.input)
+ }
+
+ if tt.input2 != "" {
+ if err := ParseInto(tt.input2, tt.got); err != nil {
+ t.Fatal(err)
+ }
+ if tt.err {
+ t.Errorf("%s: Expected error. Got nil", tt.input2)
+ }
+ }
+
+ y1, err := yaml.Marshal(tt.expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+ y2, err := yaml.Marshal(tt.got)
+ if err != nil {
+ t.Fatalf("Error serializing parsed value: %s", err)
+ }
+
+ if string(y1) != string(y2) {
+ t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.input, y1, y2)
+ }
+ }
+}
+
+func TestParseIntoString(t *testing.T) {
+ got := map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": "overwrite",
+ "inner2": "value2",
+ },
+ }
+ input := "outer.inner1=1,outer.inner3=3"
+ expect := map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": "1",
+ "inner2": "value2",
+ "inner3": "3",
+ },
+ }
+
+ if err := ParseIntoString(input, got); err != nil {
+ t.Fatal(err)
+ }
+
+ y1, err := yaml.Marshal(expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+ y2, err := yaml.Marshal(got)
+ if err != nil {
+ t.Fatalf("Error serializing parsed value: %s", err)
+ }
+
+ if string(y1) != string(y2) {
+ t.Errorf("%s: Expected:\n%s\nGot:\n%s", input, y1, y2)
+ }
+}
+
+func TestParseJSON(t *testing.T) {
+ tests := []struct {
+ input string
+ got map[string]interface{}
+ expect map[string]interface{}
+ err bool
+ }{
+ { // set json scalars values, and replace one existing key
+ input: "outer.inner1=\"1\",outer.inner3=3,outer.inner4=true,outer.inner5=\"true\"",
+ got: map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": "overwrite",
+ "inner2": "value2",
+ },
+ },
+ expect: map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": "1",
+ "inner2": "value2",
+ "inner3": 3,
+ "inner4": true,
+ "inner5": "true",
+ },
+ },
+ err: false,
+ },
+ { // set json objects and arrays, and replace one existing key
+ input: "outer.inner1={\"a\":\"1\",\"b\":2,\"c\":[1,2,3]},outer.inner3=[\"new value 1\",\"new value 2\"],outer.inner4={\"aa\":\"1\",\"bb\":2,\"cc\":[1,2,3]},outer.inner5=[{\"A\":\"1\",\"B\":2,\"C\":[1,2,3]}]",
+ got: map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": map[string]interface{}{
+ "x": "overwrite",
+ },
+ "inner2": "value2",
+ "inner3": []interface{}{
+ "overwrite",
+ },
+ },
+ },
+ expect: map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": map[string]interface{}{"a": "1", "b": 2, "c": []interface{}{1, 2, 3}},
+ "inner2": "value2",
+ "inner3": []interface{}{"new value 1", "new value 2"},
+ "inner4": map[string]interface{}{"aa": "1", "bb": 2, "cc": []interface{}{1, 2, 3}},
+ "inner5": []interface{}{map[string]interface{}{"A": "1", "B": 2, "C": []interface{}{1, 2, 3}}},
+ },
+ },
+ err: false,
+ },
+ { // null assignment, and no value assigned (equivalent to null)
+ input: "outer.inner1=,outer.inner3={\"aa\":\"1\",\"bb\":2,\"cc\":[1,2,3]},outer.inner3.cc[1]=null",
+ got: map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": map[string]interface{}{
+ "x": "overwrite",
+ },
+ "inner2": "value2",
+ },
+ },
+ expect: map[string]interface{}{
+ "outer": map[string]interface{}{
+ "inner1": nil,
+ "inner2": "value2",
+ "inner3": map[string]interface{}{"aa": "1", "bb": 2, "cc": []interface{}{1, nil, 3}},
+ },
+ },
+ err: false,
+ },
+ { // syntax error
+ input: "outer.inner1={\"a\":\"1\",\"b\":2,\"c\":[1,2,3]},outer.inner3=[\"new value 1\",\"new value 2\"],outer.inner4={\"aa\":\"1\",\"bb\":2,\"cc\":[1,2,3]},outer.inner5={\"A\":\"1\",\"B\":2,\"C\":[1,2,3]}]",
+ got: nil,
+ expect: nil,
+ err: true,
+ },
+ }
+ for _, tt := range tests {
+ if err := ParseJSON(tt.input, tt.got); err != nil {
+ if tt.err {
+ continue
+ }
+ t.Fatalf("%s: %s", tt.input, err)
+ }
+ if tt.err {
+ t.Fatalf("%s: Expected error. Got nil", tt.input)
+ }
+ y1, err := yaml.Marshal(tt.expect)
+ if err != nil {
+ t.Fatalf("Error serializing expected value: %s", err)
+ }
+ y2, err := yaml.Marshal(tt.got)
+ if err != nil {
+ t.Fatalf("Error serializing parsed value: %s", err)
+ }
+
+ if string(y1) != string(y2) {
+ t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.input, y1, y2)
+ }
+ }
+}
+
+func TestParseFile(t *testing.T) {
+ input := "name1=path1"
+ expect := map[string]interface{}{
+ "name1": "value1",
+ }
+ rs2v := func(rs []rune) (interface{}, error) {
+ v := string(rs)
+ if v != "path1" {
+ t.Errorf("%s: runesToVal: Expected value path1, got %s", input, v)
+ return "", nil
+ }
+ return "value1", nil
+ }
+
+ got, err := ParseFile(input, rs2v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ y1, err := yaml.Marshal(expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+ y2, err := yaml.Marshal(got)
+ if err != nil {
+ t.Fatalf("Error serializing parsed value: %s", err)
+ }
+
+ if string(y1) != string(y2) {
+ t.Errorf("%s: Expected:\n%s\nGot:\n%s", input, y1, y2)
+ }
+}
+
+func TestParseIntoFile(t *testing.T) {
+ got := map[string]interface{}{}
+ input := "name1=path1"
+ expect := map[string]interface{}{
+ "name1": "value1",
+ }
+ rs2v := func(rs []rune) (interface{}, error) {
+ v := string(rs)
+ if v != "path1" {
+ t.Errorf("%s: runesToVal: Expected value path1, got %s", input, v)
+ return "", nil
+ }
+ return "value1", nil
+ }
+
+ if err := ParseIntoFile(input, got, rs2v); err != nil {
+ t.Fatal(err)
+ }
+
+ y1, err := yaml.Marshal(expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+ y2, err := yaml.Marshal(got)
+ if err != nil {
+ t.Fatalf("Error serializing parsed value: %s", err)
+ }
+
+ if string(y1) != string(y2) {
+ t.Errorf("%s: Expected:\n%s\nGot:\n%s", input, y1, y2)
+ }
+}
+
+func TestToYAML(t *testing.T) {
+ // The TestParse does the hard part. We just verify that YAML formatting is
+ // happening.
+ o, err := ToYAML("name=value")
+ if err != nil {
+ t.Fatal(err)
+ }
+ expect := "name: value"
+ if o != expect {
+ t.Errorf("Expected %q, got %q", expect, o)
+ }
+}
+
+func TestParseSetNestedLevels(t *testing.T) {
+ var keyMultipleNestedLevels strings.Builder
+ for i := 1; i <= MaxNestedNameLevel+2; i++ {
+ tmpStr := fmt.Sprintf("name%d", i)
+ if i <= MaxNestedNameLevel+1 {
+ tmpStr = tmpStr + "."
+ }
+ keyMultipleNestedLevels.WriteString(tmpStr)
+ }
+ tests := []struct {
+ str string
+ expect map[string]interface{}
+ err bool
+ errStr string
+ }{
+ {
+ "outer.middle.inner=value",
+ map[string]interface{}{"outer": map[string]interface{}{"middle": map[string]interface{}{"inner": "value"}}},
+ false,
+ "",
+ },
+ {
+ str: keyMultipleNestedLevels.String() + "=value",
+ err: true,
+ errStr: fmt.Sprintf("value name nested level is greater than maximum supported nested level of %d",
+ MaxNestedNameLevel),
+ },
+ }
+
+ for _, tt := range tests {
+ got, err := Parse(tt.str)
+ if err != nil {
+ if tt.err {
+ if tt.errStr != "" {
+ if err.Error() != tt.errStr {
+ t.Errorf("Expected error: %s. Got error: %s", tt.errStr, err.Error())
+ }
+ }
+ continue
+ }
+ t.Fatalf("%s: %s", tt.str, err)
+ }
+ if tt.err {
+ t.Errorf("%s: Expected error. Got nil", tt.str)
+ }
+
+ y1, err := yaml.Marshal(tt.expect)
+ if err != nil {
+ t.Fatal(err)
+ }
+ y2, err := yaml.Marshal(got)
+ if err != nil {
+ t.Fatalf("Error serializing parsed value: %s", err)
+ }
+
+ if string(y1) != string(y2) {
+ t.Errorf("%s: Expected:\n%s\nGot:\n%s", tt.str, y1, y2)
+ }
+ }
+}
diff --git a/helm/pkg/uploader/chart_uploader.go b/helm/pkg/uploader/chart_uploader.go
new file mode 100644
index 000000000..b3d612e38
--- /dev/null
+++ b/helm/pkg/uploader/chart_uploader.go
@@ -0,0 +1,56 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uploader
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+
+ "helm.sh/helm/v4/pkg/pusher"
+ "helm.sh/helm/v4/pkg/registry"
+)
+
+// ChartUploader handles uploading a chart.
+type ChartUploader struct {
+ // Out is the location to write warning and info messages.
+ Out io.Writer
+ // Pusher collection for the operation
+ Pushers pusher.Providers
+ // Options provide parameters to be passed along to the Pusher being initialized.
+ Options []pusher.Option
+ // RegistryClient is a client for interacting with registries.
+ RegistryClient *registry.Client
+}
+
+// UploadTo uploads a chart. Depending on the settings, it may also upload a provenance file.
+func (c *ChartUploader) UploadTo(ref, remote string) error {
+ u, err := url.Parse(remote)
+ if err != nil {
+ return fmt.Errorf("invalid chart URL format: %s", remote)
+ }
+
+ if u.Scheme == "" {
+ return fmt.Errorf("scheme prefix missing from remote (e.g. \"%s://\")", registry.OCIScheme)
+ }
+
+ p, err := c.Pushers.ByScheme(u.Scheme)
+ if err != nil {
+ return err
+ }
+
+ return p.Push(ref, u.String(), c.Options...)
+}
diff --git a/helm/pkg/uploader/doc.go b/helm/pkg/uploader/doc.go
new file mode 100644
index 000000000..112ddbf2c
--- /dev/null
+++ b/helm/pkg/uploader/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Helm Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package uploader provides a library for uploading charts.
+
+This package contains tools for uploading charts to registries.
+*/
+package uploader
diff --git a/helm/scripts/coverage.sh b/helm/scripts/coverage.sh
new file mode 100755
index 000000000..4a29a68ad
--- /dev/null
+++ b/helm/scripts/coverage.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+
+# Copyright The Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+covermode=${COVERMODE:-atomic}
+coverdir=$(mktemp -d /tmp/coverage.XXXXXXXXXX)
+trap 'rm -rf "${coverdir}"' EXIT
+profile="${coverdir}/cover.out"
+html=false
+target="./..." # by default the whole repository is tested
+for arg in "$@"; do
+ case "${arg}" in
+ --html)
+ html=true
+ ;;
+ *)
+ target="${arg}"
+ ;;
+ esac
+done
+
+generate_cover_data() {
+ for d in $(go list "$target"); do
+ (
+ local output="${coverdir}/${d//\//-}.cover"
+ go test -coverprofile="${output}" -covermode="$covermode" "$d"
+ )
+ done
+
+ echo "mode: $covermode" >"$profile"
+ grep -h -v "^mode:" "$coverdir"/*.cover >>"$profile"
+}
+
+generate_cover_data
+go tool cover -func "${profile}"
+
+if [ "${html}" = "true" ] ; then
+ go tool cover -html "${profile}"
+fi
+
diff --git a/helm/scripts/get b/helm/scripts/get
new file mode 100755
index 000000000..25fd08e76
--- /dev/null
+++ b/helm/scripts/get
@@ -0,0 +1,242 @@
+#!/usr/bin/env bash
+
+# Copyright The Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The install script is based off of the MIT-licensed script from glide,
+# the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get
+
+PROJECT_NAME="helm"
+TILLER_NAME="tiller"
+
+: ${USE_SUDO:="true"}
+: ${HELM_INSTALL_DIR:="/usr/local/bin"}
+
+# initArch discovers the architecture for this system.
+initArch() {
+ ARCH=$(uname -m)
+ case $ARCH in
+ armv5*) ARCH="armv5";;
+ armv6*) ARCH="armv6";;
+ armv7*) ARCH="arm";;
+ aarch64) ARCH="arm64";;
+ x86) ARCH="386";;
+ x86_64) ARCH="amd64";;
+ i686) ARCH="386";;
+ i386) ARCH="386";;
+ esac
+}
+
+# initOS discovers the operating system for this system.
+initOS() {
+ OS=$(echo `uname`|tr '[:upper:]' '[:lower:]')
+
+ case "$OS" in
+ # Minimalist GNU for Windows
+ mingw*) OS='windows';;
+ esac
+}
+
+# runs the given command as root (detects if we are root already)
+runAsRoot() {
+ if [ $EUID -ne 0 -a "$USE_SUDO" = "true" ]; then
+ sudo "${@}"
+ else
+ "${@}"
+ fi
+}
+
+# verifySupported checks that the os/arch combination is supported for
+# binary builds.
+verifySupported() {
+ local supported="darwin-amd64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-loong64\nlinux-ppc64le\nlinux-s390x\nwindows-amd64\nwindows-arm64"
+ if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then
+ echo "No prebuilt binary for ${OS}-${ARCH}."
+ echo "To build from source, go to https://github.com/helm/helm"
+ exit 1
+ fi
+
+ if ! type "curl" > /dev/null && ! type "wget" > /dev/null; then
+ echo "Either curl or wget is required"
+ exit 1
+ fi
+}
+
+# checkDesiredVersion checks if the desired version is available.
+checkDesiredVersion() {
+ if [ "x$DESIRED_VERSION" == "x" ]; then
+ # Pinning tag to v2.17.0 as per https://github.com/helm/helm/issues/9607
+ TAG=v2.17.0
+ else
+ TAG=$DESIRED_VERSION
+ fi
+}
+
+# checkHelmInstalledVersion checks which version of helm is installed and
+# if it needs to be changed.
+checkHelmInstalledVersion() {
+ if [[ -f "${HELM_INSTALL_DIR}/${PROJECT_NAME}" ]]; then
+ local version=$("${HELM_INSTALL_DIR}/${PROJECT_NAME}" version -c | grep '^Client' | cut -d'"' -f2)
+ if [[ "$version" == "$TAG" ]]; then
+ echo "Helm ${version} is already ${DESIRED_VERSION:-latest}"
+ return 0
+ else
+ echo "Helm ${TAG} is available. Changing from version ${version}."
+ return 1
+ fi
+ else
+ return 1
+ fi
+}
+
+# downloadFile downloads the latest binary package and also the checksum
+# for that binary.
+downloadFile() {
+ HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz"
+ DOWNLOAD_URL="https://get.helm.sh/$HELM_DIST"
+ CHECKSUM_URL="$DOWNLOAD_URL.sha256"
+ HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)"
+ HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST"
+ HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256"
+ echo "Downloading $DOWNLOAD_URL"
+ if type "curl" > /dev/null; then
+ curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE"
+ elif type "wget" > /dev/null; then
+ wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL"
+ fi
+ if type "curl" > /dev/null; then
+ curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE"
+ elif type "wget" > /dev/null; then
+ wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL"
+ fi
+}
+
+# installFile verifies the SHA256 for the file, then unpacks and
+# installs it.
+installFile() {
+ HELM_TMP="$HELM_TMP_ROOT/$PROJECT_NAME"
+ local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}')
+ local expected_sum=$(cat ${HELM_SUM_FILE})
+ if [ "$sum" != "$expected_sum" ]; then
+ echo "SHA sum of ${HELM_TMP_FILE} does not match. Aborting."
+ exit 1
+ fi
+
+ mkdir -p "$HELM_TMP"
+ tar xf "$HELM_TMP_FILE" -C "$HELM_TMP"
+ HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/$PROJECT_NAME"
+ TILLER_TMP_BIN="$HELM_TMP/$OS-$ARCH/$TILLER_NAME"
+ echo "Preparing to install $PROJECT_NAME and $TILLER_NAME into ${HELM_INSTALL_DIR}"
+ runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR/$PROJECT_NAME"
+ echo "$PROJECT_NAME installed into $HELM_INSTALL_DIR/$PROJECT_NAME"
+ if [ -x "$TILLER_TMP_BIN" ]; then
+ runAsRoot cp "$TILLER_TMP_BIN" "$HELM_INSTALL_DIR/$TILLER_NAME"
+ echo "$TILLER_NAME installed into $HELM_INSTALL_DIR/$TILLER_NAME"
+ else
+ echo "info: $TILLER_NAME binary was not found in this release; skipping $TILLER_NAME installation"
+ fi
+}
+
+# fail_trap is executed if an error occurs.
+fail_trap() {
+ result=$?
+ if [ "$result" != "0" ]; then
+ if [[ -n "$INPUT_ARGUMENTS" ]]; then
+ echo "Failed to install $PROJECT_NAME with the arguments provided: $INPUT_ARGUMENTS"
+ help
+ else
+ echo "Failed to install $PROJECT_NAME"
+ fi
+ echo -e "\tFor support, go to https://github.com/helm/helm."
+ fi
+ cleanup
+ exit $result
+}
+
+# testVersion tests the installed client to make sure it is working.
+testVersion() {
+ set +e
+ HELM="$(command -v $PROJECT_NAME)"
+ if [ "$?" = "1" ]; then
+ echo "$PROJECT_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?'
+ exit 1
+ fi
+ set -e
+ echo "Run '$PROJECT_NAME init' to configure $PROJECT_NAME."
+}
+
+# help provides possible cli installation arguments
+help () {
+ echo "Accepted cli arguments are:"
+ echo -e "\t[--help|-h ] ->> prints this help"
+ echo -e "\t[--version|-v ]"
+ echo -e "\te.g. --version v2.4.0 or -v latest"
+ echo -e "\t[--no-sudo] ->> install without sudo"
+}
+
+# cleanup temporary files to avoid https://github.com/helm/helm/issues/2977
+cleanup() {
+ if [[ -d "${HELM_TMP_ROOT:-}" ]]; then
+ rm -rf "$HELM_TMP_ROOT"
+ fi
+}
+
+# Execution
+
+#Stop execution on any error
+trap "fail_trap" EXIT
+set -e
+
+# Parsing input arguments (if any)
+export INPUT_ARGUMENTS="${@}"
+set -u
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ '--version'|-v)
+ shift
+ if [[ $# -ne 0 ]]; then
+ export DESIRED_VERSION="${1}"
+ if [[ "$1" != "v"* ]]; then
+ echo "Expected version arg ('${DESIRED_VERSION}') to begin with 'v', fixing..."
+ export DESIRED_VERSION="v${1}"
+ fi
+ else
+ echo -e "Please provide the desired version. e.g. --version v2.4.0 or -v latest"
+ exit 0
+ fi
+ ;;
+ '--no-sudo')
+ USE_SUDO="false"
+ ;;
+ '--help'|-h)
+ help
+ exit 0
+ ;;
+ *) exit 1
+ ;;
+ esac
+ shift
+done
+set +u
+
+initArch
+initOS
+verifySupported
+checkDesiredVersion
+if ! checkHelmInstalledVersion; then
+ downloadFile
+ installFile
+fi
+testVersion
+cleanup
diff --git a/helm/scripts/get-helm-3 b/helm/scripts/get-helm-3
new file mode 100755
index 000000000..5f265a52f
--- /dev/null
+++ b/helm/scripts/get-helm-3
@@ -0,0 +1,347 @@
+#!/usr/bin/env bash
+
+# Copyright The Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The install script is based off of the MIT-licensed script from glide,
+# the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get
+
+: ${BINARY_NAME:="helm"}
+: ${USE_SUDO:="true"}
+: ${DEBUG:="false"}
+: ${VERIFY_CHECKSUM:="true"}
+: ${VERIFY_SIGNATURES:="false"}
+: ${HELM_INSTALL_DIR:="/usr/local/bin"}
+: ${GPG_PUBRING:="pubring.kbx"}
+
+HAS_CURL="$(type "curl" &> /dev/null && echo true || echo false)"
+HAS_WGET="$(type "wget" &> /dev/null && echo true || echo false)"
+HAS_OPENSSL="$(type "openssl" &> /dev/null && echo true || echo false)"
+HAS_GPG="$(type "gpg" &> /dev/null && echo true || echo false)"
+HAS_GIT="$(type "git" &> /dev/null && echo true || echo false)"
+HAS_TAR="$(type "tar" &> /dev/null && echo true || echo false)"
+
+# initArch discovers the architecture for this system.
+initArch() {
+ ARCH=$(uname -m)
+ case $ARCH in
+ armv5*) ARCH="armv5";;
+ armv6*) ARCH="armv6";;
+ armv7*) ARCH="arm";;
+ aarch64) ARCH="arm64";;
+ x86) ARCH="386";;
+ x86_64) ARCH="amd64";;
+ i686) ARCH="386";;
+ i386) ARCH="386";;
+ esac
+}
+
+# initOS discovers the operating system for this system.
+initOS() {
+ OS=$(echo `uname`|tr '[:upper:]' '[:lower:]')
+
+ case "$OS" in
+ # Minimalist GNU for Windows
+ mingw*|cygwin*) OS='windows';;
+ esac
+}
+
+# runs the given command as root (detects if we are root already)
+runAsRoot() {
+ if [ $EUID -ne 0 -a "$USE_SUDO" = "true" ]; then
+ sudo "${@}"
+ else
+ "${@}"
+ fi
+}
+
+# verifySupported checks that the os/arch combination is supported for
+# binary builds, as well whether or not necessary tools are present.
+verifySupported() {
+ local supported="darwin-amd64\ndarwin-arm64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-loong64\nlinux-ppc64le\nlinux-s390x\nlinux-riscv64\nwindows-amd64\nwindows-arm64"
+ if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then
+ echo "No prebuilt binary for ${OS}-${ARCH}."
+ echo "To build from source, go to https://github.com/helm/helm"
+ exit 1
+ fi
+
+ if [ "${HAS_CURL}" != "true" ] && [ "${HAS_WGET}" != "true" ]; then
+ echo "Either curl or wget is required"
+ exit 1
+ fi
+
+ if [ "${VERIFY_CHECKSUM}" == "true" ] && [ "${HAS_OPENSSL}" != "true" ]; then
+ echo "In order to verify checksum, openssl must first be installed."
+ echo "Please install openssl or set VERIFY_CHECKSUM=false in your environment."
+ exit 1
+ fi
+
+ if [ "${VERIFY_SIGNATURES}" == "true" ]; then
+ if [ "${HAS_GPG}" != "true" ]; then
+ echo "In order to verify signatures, gpg must first be installed."
+ echo "Please install gpg or set VERIFY_SIGNATURES=false in your environment."
+ exit 1
+ fi
+ if [ "${OS}" != "linux" ]; then
+ echo "Signature verification is currently only supported on Linux."
+ echo "Please set VERIFY_SIGNATURES=false or verify the signatures manually."
+ exit 1
+ fi
+ fi
+
+ if [ "${HAS_GIT}" != "true" ]; then
+ echo "[WARNING] Could not find git. It is required for plugin installation."
+ fi
+
+ if [ "${HAS_TAR}" != "true" ]; then
+ echo "[ERROR] Could not find tar. It is required to extract the helm binary archive."
+ exit 1
+ fi
+}
+
+# checkDesiredVersion checks if the desired version is available.
+checkDesiredVersion() {
+ if [ "x$DESIRED_VERSION" == "x" ]; then
+ # Get tag from release URL
+ local latest_release_url="https://get.helm.sh/helm3-latest-version"
+ local latest_release_response=""
+ if [ "${HAS_CURL}" == "true" ]; then
+ latest_release_response=$( curl -L --silent --show-error --fail "$latest_release_url" 2>&1 || true )
+ elif [ "${HAS_WGET}" == "true" ]; then
+ latest_release_response=$( wget "$latest_release_url" -q -O - 2>&1 || true )
+ fi
+ TAG=$( echo "$latest_release_response" | grep '^v[0-9]' )
+ if [ "x$TAG" == "x" ]; then
+ printf "Could not retrieve the latest release tag information from %s: %s\n" "${latest_release_url}" "${latest_release_response}"
+ exit 1
+ fi
+ else
+ TAG=$DESIRED_VERSION
+ fi
+}
+
+# checkHelmInstalledVersion checks which version of helm is installed and
+# if it needs to be changed.
+checkHelmInstalledVersion() {
+ if [[ -f "${HELM_INSTALL_DIR}/${BINARY_NAME}" ]]; then
+ local version=$("${HELM_INSTALL_DIR}/${BINARY_NAME}" version --template="{{ .Version }}")
+ if [[ "$version" == "$TAG" ]]; then
+ echo "Helm ${version} is already ${DESIRED_VERSION:-latest}"
+ return 0
+ else
+ echo "Helm ${TAG} is available. Changing from version ${version}."
+ return 1
+ fi
+ else
+ return 1
+ fi
+}
+
+# downloadFile downloads the latest binary package and also the checksum
+# for that binary.
+downloadFile() {
+ HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz"
+ DOWNLOAD_URL="https://get.helm.sh/$HELM_DIST"
+ CHECKSUM_URL="$DOWNLOAD_URL.sha256"
+ HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)"
+ HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST"
+ HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256"
+ echo "Downloading $DOWNLOAD_URL"
+ if [ "${HAS_CURL}" == "true" ]; then
+ curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE"
+ curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE"
+ elif [ "${HAS_WGET}" == "true" ]; then
+ wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL"
+ wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL"
+ fi
+}
+
+# verifyFile verifies the SHA256 checksum of the binary package
+# and the GPG signatures for both the package and checksum file
+# (depending on settings in environment).
+verifyFile() {
+ if [ "${VERIFY_CHECKSUM}" == "true" ]; then
+ verifyChecksum
+ fi
+ if [ "${VERIFY_SIGNATURES}" == "true" ]; then
+ verifySignatures
+ fi
+}
+
+# installFile installs the Helm binary.
+installFile() {
+ HELM_TMP="$HELM_TMP_ROOT/$BINARY_NAME"
+ mkdir -p "$HELM_TMP"
+ tar xf "$HELM_TMP_FILE" -C "$HELM_TMP"
+ HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/helm"
+ echo "Preparing to install $BINARY_NAME into ${HELM_INSTALL_DIR}"
+ runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR/$BINARY_NAME"
+ echo "$BINARY_NAME installed into $HELM_INSTALL_DIR/$BINARY_NAME"
+}
+
+# verifyChecksum verifies the SHA256 checksum of the binary package.
+verifyChecksum() {
+ printf "Verifying checksum... "
+ local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}')
+ local expected_sum=$(cat ${HELM_SUM_FILE})
+ if [ "$sum" != "$expected_sum" ]; then
+ echo "SHA sum of ${HELM_TMP_FILE} does not match. Aborting."
+ exit 1
+ fi
+ echo "Done."
+}
+
+# verifySignatures obtains the latest KEYS file from GitHub main branch
+# as well as the signature .asc files from the specific GitHub release,
+# then verifies that the release artifacts were signed by a maintainer's key.
+verifySignatures() {
+ printf "Verifying signatures... "
+ local keys_filename="KEYS"
+ local github_keys_url="https://raw.githubusercontent.com/helm/helm/main/${keys_filename}"
+ if [ "${HAS_CURL}" == "true" ]; then
+ curl -SsL "${github_keys_url}" -o "${HELM_TMP_ROOT}/${keys_filename}"
+ elif [ "${HAS_WGET}" == "true" ]; then
+ wget -q -O "${HELM_TMP_ROOT}/${keys_filename}" "${github_keys_url}"
+ fi
+ local gpg_keyring="${HELM_TMP_ROOT}/keyring.gpg"
+ local gpg_homedir="${HELM_TMP_ROOT}/gnupg"
+ mkdir -p -m 0700 "${gpg_homedir}"
+ local gpg_stderr_device="/dev/null"
+ if [ "${DEBUG}" == "true" ]; then
+ gpg_stderr_device="/dev/stderr"
+ fi
+ gpg --batch --quiet --homedir="${gpg_homedir}" --import "${HELM_TMP_ROOT}/${keys_filename}" 2> "${gpg_stderr_device}"
+ gpg --batch --no-default-keyring --keyring "${gpg_homedir}/${GPG_PUBRING}" --export > "${gpg_keyring}"
+ local github_release_url="https://github.com/helm/helm/releases/download/${TAG}"
+ if [ "${HAS_CURL}" == "true" ]; then
+ curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
+ curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
+ elif [ "${HAS_WGET}" == "true" ]; then
+ wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
+ wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
+ fi
+ local error_text="If you think this might be a potential security issue,"
+ error_text="${error_text}\nplease see here: https://github.com/helm/community/blob/master/SECURITY.md"
+ local num_goodlines_sha=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
+ if [[ ${num_goodlines_sha} -lt 2 ]]; then
+ echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256!"
+ echo -e "${error_text}"
+ exit 1
+ fi
+ local num_goodlines_tar=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
+ if [[ ${num_goodlines_tar} -lt 2 ]]; then
+ echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz!"
+ echo -e "${error_text}"
+ exit 1
+ fi
+ echo "Done."
+}
+
+# fail_trap is executed if an error occurs.
+fail_trap() {
+ result=$?
+ if [ "$result" != "0" ]; then
+ if [[ -n "$INPUT_ARGUMENTS" ]]; then
+ echo "Failed to install $BINARY_NAME with the arguments provided: $INPUT_ARGUMENTS"
+ help
+ else
+ echo "Failed to install $BINARY_NAME"
+ fi
+ echo -e "\tFor support, go to https://github.com/helm/helm."
+ fi
+ cleanup
+ exit $result
+}
+
+# testVersion tests the installed client to make sure it is working.
+testVersion() {
+ set +e
+ HELM="$(command -v $BINARY_NAME)"
+ if [ "$?" = "1" ]; then
+ echo "$BINARY_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?'
+ exit 1
+ fi
+ set -e
+}
+
+# help provides possible cli installation arguments
+help () {
+ echo "Accepted cli arguments are:"
+ echo -e "\t[--help|-h ] ->> prints this help"
+ echo -e "\t[--version|-v ] . When not defined it fetches the latest release tag from the Helm CDN"
+ echo -e "\te.g. --version v3.0.0 or -v canary"
+ echo -e "\t[--no-sudo] ->> install without sudo"
+}
+
+# cleanup temporary files to avoid https://github.com/helm/helm/issues/2977
+cleanup() {
+ if [[ -d "${HELM_TMP_ROOT:-}" ]]; then
+ rm -rf "$HELM_TMP_ROOT"
+ fi
+}
+
+# Execution
+
+#Stop execution on any error
+trap "fail_trap" EXIT
+set -e
+
+# Set debug if desired
+if [ "${DEBUG}" == "true" ]; then
+ set -x
+fi
+
+# Parsing input arguments (if any)
+export INPUT_ARGUMENTS="${@}"
+set -u
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ '--version'|-v)
+ shift
+ if [[ $# -ne 0 ]]; then
+ export DESIRED_VERSION="${1}"
+ if [[ "$1" != "v"* ]]; then
+ echo "Expected version arg ('${DESIRED_VERSION}') to begin with 'v', fixing..."
+ export DESIRED_VERSION="v${1}"
+ fi
+ else
+ echo -e "Please provide the desired version. e.g. --version v3.0.0 or -v canary"
+ exit 0
+ fi
+ ;;
+ '--no-sudo')
+ USE_SUDO="false"
+ ;;
+ '--help'|-h)
+ help
+ exit 0
+ ;;
+ *) exit 1
+ ;;
+ esac
+ shift
+done
+set +u
+
+initArch
+initOS
+verifySupported
+checkDesiredVersion
+if ! checkHelmInstalledVersion; then
+ downloadFile
+ verifyFile
+ installFile
+fi
+testVersion
+cleanup
diff --git a/helm/scripts/get-helm-4 b/helm/scripts/get-helm-4
new file mode 100644
index 000000000..1c90bbad5
--- /dev/null
+++ b/helm/scripts/get-helm-4
@@ -0,0 +1,347 @@
+#!/usr/bin/env bash
+
+# Copyright The Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The install script is based off of the MIT-licensed script from glide,
+# the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get
+
+: ${BINARY_NAME:="helm"}
+: ${USE_SUDO:="true"}
+: ${DEBUG:="false"}
+: ${VERIFY_CHECKSUM:="true"}
+: ${VERIFY_SIGNATURES:="false"}
+: ${HELM_INSTALL_DIR:="/usr/local/bin"}
+: ${GPG_PUBRING:="pubring.kbx"}
+
+HAS_CURL="$(type "curl" &> /dev/null && echo true || echo false)"
+HAS_WGET="$(type "wget" &> /dev/null && echo true || echo false)"
+HAS_OPENSSL="$(type "openssl" &> /dev/null && echo true || echo false)"
+HAS_GPG="$(type "gpg" &> /dev/null && echo true || echo false)"
+HAS_GIT="$(type "git" &> /dev/null && echo true || echo false)"
+HAS_TAR="$(type "tar" &> /dev/null && echo true || echo false)"
+
+# initArch discovers the architecture for this system.
+initArch() {
+ ARCH=$(uname -m)
+ case $ARCH in
+ armv5*) ARCH="armv5";;
+ armv6*) ARCH="armv6";;
+ armv7*) ARCH="arm";;
+ aarch64) ARCH="arm64";;
+ x86) ARCH="386";;
+ x86_64) ARCH="amd64";;
+ i686) ARCH="386";;
+ i386) ARCH="386";;
+ esac
+}
+
+# initOS discovers the operating system for this system.
+initOS() {
+ OS=$(echo `uname`|tr '[:upper:]' '[:lower:]')
+
+ case "$OS" in
+ # Minimalist GNU for Windows
+ mingw*|cygwin*) OS='windows';;
+ esac
+}
+
+# runs the given command as root (detects if we are root already)
+runAsRoot() {
+ if [ $EUID -ne 0 -a "$USE_SUDO" = "true" ]; then
+ sudo "${@}"
+ else
+ "${@}"
+ fi
+}
+
+# verifySupported checks that the os/arch combination is supported for
+# binary builds, as well whether or not necessary tools are present.
+verifySupported() {
+ local supported="darwin-amd64\ndarwin-arm64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-loong64\nlinux-ppc64le\nlinux-s390x\nlinux-riscv64\nwindows-amd64\nwindows-arm64"
+ if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then
+ echo "No prebuilt binary for ${OS}-${ARCH}."
+ echo "To build from source, go to https://github.com/helm/helm"
+ exit 1
+ fi
+
+ if [ "${HAS_CURL}" != "true" ] && [ "${HAS_WGET}" != "true" ]; then
+ echo "Either curl or wget is required"
+ exit 1
+ fi
+
+ if [ "${VERIFY_CHECKSUM}" == "true" ] && [ "${HAS_OPENSSL}" != "true" ]; then
+ echo "In order to verify checksum, openssl must first be installed."
+ echo "Please install openssl or set VERIFY_CHECKSUM=false in your environment."
+ exit 1
+ fi
+
+ if [ "${VERIFY_SIGNATURES}" == "true" ]; then
+ if [ "${HAS_GPG}" != "true" ]; then
+ echo "In order to verify signatures, gpg must first be installed."
+ echo "Please install gpg or set VERIFY_SIGNATURES=false in your environment."
+ exit 1
+ fi
+ if [ "${OS}" != "linux" ]; then
+ echo "Signature verification is currently only supported on Linux."
+ echo "Please set VERIFY_SIGNATURES=false or verify the signatures manually."
+ exit 1
+ fi
+ fi
+
+ if [ "${HAS_GIT}" != "true" ]; then
+ echo "[WARNING] Could not find git. It is required for plugin installation."
+ fi
+
+ if [ "${HAS_TAR}" != "true" ]; then
+ echo "[ERROR] Could not find tar. It is required to extract the helm binary archive."
+ exit 1
+ fi
+}
+
+# checkDesiredVersion checks if the desired version is available.
+checkDesiredVersion() {
+ if [ "x$DESIRED_VERSION" == "x" ]; then
+ # Get tag from release URL
+ local latest_release_url="https://get.helm.sh/helm4-latest-version"
+ local latest_release_response=""
+ if [ "${HAS_CURL}" == "true" ]; then
+ latest_release_response=$( curl -L --silent --show-error --fail "$latest_release_url" 2>&1 || true )
+ elif [ "${HAS_WGET}" == "true" ]; then
+ latest_release_response=$( wget "$latest_release_url" -q -O - 2>&1 || true )
+ fi
+ TAG=$( echo "$latest_release_response" | grep '^v[0-9]' )
+ if [ "x$TAG" == "x" ]; then
+ printf "Could not retrieve the latest release tag information from %s: %s\n" "${latest_release_url}" "${latest_release_response}"
+ exit 1
+ fi
+ else
+ TAG=$DESIRED_VERSION
+ fi
+}
+
+# checkHelmInstalledVersion checks which version of helm is installed and
+# if it needs to be changed.
+checkHelmInstalledVersion() {
+ if [[ -f "${HELM_INSTALL_DIR}/${BINARY_NAME}" ]]; then
+ local version=$("${HELM_INSTALL_DIR}/${BINARY_NAME}" version --template="{{ .Version }}")
+ if [[ "$version" == "$TAG" ]]; then
+ echo "Helm ${version} is already ${DESIRED_VERSION:-latest}"
+ return 0
+ else
+ echo "Helm ${TAG} is available. Changing from version ${version}."
+ return 1
+ fi
+ else
+ return 1
+ fi
+}
+
+# downloadFile downloads the latest binary package and also the checksum
+# for that binary.
+downloadFile() {
+ HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz"
+ DOWNLOAD_URL="https://get.helm.sh/$HELM_DIST"
+ CHECKSUM_URL="$DOWNLOAD_URL.sha256"
+ HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)"
+ HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST"
+ HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256"
+ echo "Downloading $DOWNLOAD_URL"
+ if [ "${HAS_CURL}" == "true" ]; then
+ curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE"
+ curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE"
+ elif [ "${HAS_WGET}" == "true" ]; then
+ wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL"
+ wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL"
+ fi
+}
+
+# verifyFile verifies the SHA256 checksum of the binary package
+# and the GPG signatures for both the package and checksum file
+# (depending on settings in environment).
+verifyFile() {
+ if [ "${VERIFY_CHECKSUM}" == "true" ]; then
+ verifyChecksum
+ fi
+ if [ "${VERIFY_SIGNATURES}" == "true" ]; then
+ verifySignatures
+ fi
+}
+
+# installFile installs the Helm binary.
+installFile() {
+ HELM_TMP="$HELM_TMP_ROOT/$BINARY_NAME"
+ mkdir -p "$HELM_TMP"
+ tar xf "$HELM_TMP_FILE" -C "$HELM_TMP"
+ HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/helm"
+ echo "Preparing to install $BINARY_NAME into ${HELM_INSTALL_DIR}"
+ runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR/$BINARY_NAME"
+ echo "$BINARY_NAME installed into $HELM_INSTALL_DIR/$BINARY_NAME"
+}
+
+# verifyChecksum verifies the SHA256 checksum of the binary package.
+verifyChecksum() {
+ printf "Verifying checksum... "
+ local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}')
+ local expected_sum=$(cat ${HELM_SUM_FILE})
+ if [ "$sum" != "$expected_sum" ]; then
+ echo "SHA sum of ${HELM_TMP_FILE} does not match. Aborting."
+ exit 1
+ fi
+ echo "Done."
+}
+
+# verifySignatures obtains the latest KEYS file from GitHub main branch
+# as well as the signature .asc files from the specific GitHub release,
+# then verifies that the release artifacts were signed by a maintainer's key.
+verifySignatures() {
+ printf "Verifying signatures... "
+ local keys_filename="KEYS"
+ local github_keys_url="https://raw.githubusercontent.com/helm/helm/main/${keys_filename}"
+ if [ "${HAS_CURL}" == "true" ]; then
+ curl -SsL "${github_keys_url}" -o "${HELM_TMP_ROOT}/${keys_filename}"
+ elif [ "${HAS_WGET}" == "true" ]; then
+ wget -q -O "${HELM_TMP_ROOT}/${keys_filename}" "${github_keys_url}"
+ fi
+ local gpg_keyring="${HELM_TMP_ROOT}/keyring.gpg"
+ local gpg_homedir="${HELM_TMP_ROOT}/gnupg"
+ mkdir -p -m 0700 "${gpg_homedir}"
+ local gpg_stderr_device="/dev/null"
+ if [ "${DEBUG}" == "true" ]; then
+ gpg_stderr_device="/dev/stderr"
+ fi
+ gpg --batch --quiet --homedir="${gpg_homedir}" --import "${HELM_TMP_ROOT}/${keys_filename}" 2> "${gpg_stderr_device}"
+ gpg --batch --no-default-keyring --keyring "${gpg_homedir}/${GPG_PUBRING}" --export > "${gpg_keyring}"
+ local github_release_url="https://github.com/helm/helm/releases/download/${TAG}"
+ if [ "${HAS_CURL}" == "true" ]; then
+ curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
+ curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
+ elif [ "${HAS_WGET}" == "true" ]; then
+ wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
+ wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
+ fi
+ local error_text="If you think this might be a potential security issue,"
+ error_text="${error_text}\nplease see here: https://github.com/helm/community/blob/master/SECURITY.md"
+ local num_goodlines_sha=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
+ if [[ ${num_goodlines_sha} -lt 2 ]]; then
+ echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256!"
+ echo -e "${error_text}"
+ exit 1
+ fi
+ local num_goodlines_tar=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
+ if [[ ${num_goodlines_tar} -lt 2 ]]; then
+ echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz!"
+ echo -e "${error_text}"
+ exit 1
+ fi
+ echo "Done."
+}
+
+# fail_trap is executed if an error occurs.
+fail_trap() {
+ result=$?
+ if [ "$result" != "0" ]; then
+ if [[ -n "$INPUT_ARGUMENTS" ]]; then
+ echo "Failed to install $BINARY_NAME with the arguments provided: $INPUT_ARGUMENTS"
+ help
+ else
+ echo "Failed to install $BINARY_NAME"
+ fi
+ echo -e "\tFor support, go to https://github.com/helm/helm."
+ fi
+ cleanup
+ exit $result
+}
+
+# testVersion tests the installed client to make sure it is working.
+testVersion() {
+ set +e
+ HELM="$(command -v $BINARY_NAME)"
+ if [ "$?" = "1" ]; then
+ echo "$BINARY_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?'
+ exit 1
+ fi
+ set -e
+}
+
+# help provides possible cli installation arguments
+help () {
+ echo "Accepted cli arguments are:"
+ echo -e "\t[--help|-h ] ->> prints this help"
+ echo -e "\t[--version|-v ] . When not defined it fetches the latest release tag from the Helm CDN"
+ echo -e "\te.g. --version v4.0.0 or -v canary"
+ echo -e "\t[--no-sudo] ->> install without sudo"
+}
+
+# cleanup temporary files to avoid https://github.com/helm/helm/issues/2977
+cleanup() {
+ if [[ -d "${HELM_TMP_ROOT:-}" ]]; then
+ rm -rf "$HELM_TMP_ROOT"
+ fi
+}
+
+# Execution
+
+#Stop execution on any error
+trap "fail_trap" EXIT
+set -e
+
+# Set debug if desired
+if [ "${DEBUG}" == "true" ]; then
+ set -x
+fi
+
+# Parsing input arguments (if any)
+export INPUT_ARGUMENTS="${@}"
+set -u
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ '--version'|-v)
+ shift
+ if [[ $# -ne 0 ]]; then
+ export DESIRED_VERSION="${1}"
+ if [[ "$1" != "v"* ]]; then
+ echo "Expected version arg ('${DESIRED_VERSION}') to begin with 'v', fixing..."
+ export DESIRED_VERSION="v${1}"
+ fi
+ else
+ echo -e "Please provide the desired version. e.g. --version v4.0.0 or -v canary"
+ exit 0
+ fi
+ ;;
+ '--no-sudo')
+ USE_SUDO="false"
+ ;;
+ '--help'|-h)
+ help
+ exit 0
+ ;;
+ *) exit 1
+ ;;
+ esac
+ shift
+done
+set +u
+
+initArch
+initOS
+verifySupported
+checkDesiredVersion
+if ! checkHelmInstalledVersion; then
+ downloadFile
+ verifyFile
+ installFile
+fi
+testVersion
+cleanup
diff --git a/helm/scripts/release-notes.sh b/helm/scripts/release-notes.sh
new file mode 100755
index 000000000..48328cb38
--- /dev/null
+++ b/helm/scripts/release-notes.sh
@@ -0,0 +1,107 @@
+#!/usr/bin/env bash
+
+# Copyright The Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+RELEASE=${RELEASE:-$2}
+PREVIOUS_RELEASE=${PREVIOUS_RELEASE:-$1}
+
+## Ensure Correct Usage
+if [[ -z "${PREVIOUS_RELEASE}" || -z "${RELEASE}" ]]; then
+ echo Usage:
+ echo ./scripts/release-notes.sh v3.0.0 v3.1.0
+ echo or
+ echo PREVIOUS_RELEASE=v3.0.0
+ echo RELEASE=v3.1.0
+ echo ./scripts/release-notes.sh
+ exit 1
+fi
+
+## validate git tags
+for tag in $RELEASE $PREVIOUS_RELEASE; do
+ OK=$(git tag -l ${tag} | wc -l)
+ if [[ "$OK" == "0" ]]; then
+ echo ${tag} is not a valid release version
+ exit 1
+ fi
+done
+
+## Check for hints that checksum files were downloaded
+## from `make fetch-dist`
+if [[ ! -e "./_dist/helm-${RELEASE}-darwin-amd64.tar.gz.sha256sum" ]]; then
+ echo "checksum file ./_dist/helm-${RELEASE}-darwin-amd64.tar.gz.sha256sum not found in ./_dist/"
+ echo "Did you forget to run \`make fetch-dist\` first ?"
+ exit 1
+fi
+
+## Generate CHANGELOG from git log
+CHANGELOG=$(git log --no-merges --pretty=format:'- %s %H (%aN)' ${PREVIOUS_RELEASE}..${RELEASE})
+if [[ ! $? -eq 0 ]]; then
+ echo "Error creating changelog"
+ echo "try running \`git log --no-merges --pretty=format:'- %s %H (%aN)' ${PREVIOUS_RELEASE}..${RELEASE}\`"
+ exit 1
+fi
+
+## guess at MAJOR / MINOR / PATCH versions
+MAJOR=$(echo ${RELEASE} | sed 's/^v//' | cut -f1 -d.)
+MINOR=$(echo ${RELEASE} | sed 's/^v//' | cut -f2 -d.)
+PATCH=$(echo ${RELEASE} | sed 's/^v//' | cut -f3 -d.)
+
+## Print release notes to stdout
+cat <. Users are encouraged to upgrade for the best experience.
+
+The community keeps growing, and we'd love to see you there!
+
+- Join the discussion in [Kubernetes Slack](https://kubernetes.slack.com):
+ - `#helm-users` for questions and just to hang out
+ - `#helm-dev` for discussing PRs, code, and bugs
+- Hang out at the Public Developer Call: Thursday, 9:30 Pacific via [Zoom](https://zoom.us/j/696660622)
+- Test, debug, and contribute charts: [ArtifactHub/packages](https://artifacthub.io/packages/search?kind=0)
+
+## Notable Changes
+
+- Add list of
+- notable changes here
+
+## Installation and Upgrading
+
+Download Helm ${RELEASE}. The common platform binaries are here:
+
+- [MacOS amd64](https://get.helm.sh/helm-${RELEASE}-darwin-amd64.tar.gz) ([checksum](https://get.helm.sh/helm-${RELEASE}-darwin-amd64.tar.gz.sha256sum) / $(cat _dist/helm-${RELEASE}-darwin-amd64.tar.gz.sha256))
+- [MacOS arm64](https://get.helm.sh/helm-${RELEASE}-darwin-arm64.tar.gz) ([checksum](https://get.helm.sh/helm-${RELEASE}-darwin-arm64.tar.gz.sha256sum) / $(cat _dist/helm-${RELEASE}-darwin-arm64.tar.gz.sha256))
+- [Linux amd64](https://get.helm.sh/helm-${RELEASE}-linux-amd64.tar.gz) ([checksum](https://get.helm.sh/helm-${RELEASE}-linux-amd64.tar.gz.sha256sum) / $(cat _dist/helm-${RELEASE}-linux-amd64.tar.gz.sha256))
+- [Linux arm](https://get.helm.sh/helm-${RELEASE}-linux-arm.tar.gz) ([checksum](https://get.helm.sh/helm-${RELEASE}-linux-arm.tar.gz.sha256sum) / $(cat _dist/helm-${RELEASE}-linux-arm.tar.gz.sha256))
+- [Linux arm64](https://get.helm.sh/helm-${RELEASE}-linux-arm64.tar.gz) ([checksum](https://get.helm.sh/helm-${RELEASE}-linux-arm64.tar.gz.sha256sum) / $(cat _dist/helm-${RELEASE}-linux-arm64.tar.gz.sha256))
+- [Linux i386](https://get.helm.sh/helm-${RELEASE}-linux-386.tar.gz) ([checksum](https://get.helm.sh/helm-${RELEASE}-linux-386.tar.gz.sha256sum) / $(cat _dist/helm-${RELEASE}-linux-386.tar.gz.sha256))
+- [Linux loong64](https://get.helm.sh/helm-${RELEASE}-linux-loong64.tar.gz) ([checksum](https://get.helm.sh/helm-${RELEASE}-linux-loong64.tar.gz.sha256sum) / $(cat _dist/helm-${RELEASE}-linux-loong64.tar.gz.sha256))
+- [Linux ppc64le](https://get.helm.sh/helm-${RELEASE}-linux-ppc64le.tar.gz) ([checksum](https://get.helm.sh/helm-${RELEASE}-linux-ppc64le.tar.gz.sha256sum) / $(cat _dist/helm-${RELEASE}-linux-ppc64le.tar.gz.sha256))
+- [Linux s390x](https://get.helm.sh/helm-${RELEASE}-linux-s390x.tar.gz) ([checksum](https://get.helm.sh/helm-${RELEASE}-linux-s390x.tar.gz.sha256sum) / $(cat _dist/helm-${RELEASE}-linux-s390x.tar.gz.sha256))
+- [Linux riscv64](https://get.helm.sh/helm-${RELEASE}-linux-riscv64.tar.gz) ([checksum](https://get.helm.sh/helm-${RELEASE}-linux-riscv64.tar.gz.sha256sum) / $(cat _dist/helm-${RELEASE}-linux-riscv64.tar.gz.sha256))
+- [Windows amd64](https://get.helm.sh/helm-${RELEASE}-windows-amd64.zip) ([checksum](https://get.helm.sh/helm-${RELEASE}-windows-amd64.zip.sha256sum) / $(cat _dist/helm-${RELEASE}-windows-amd64.zip.sha256))
+- [Windows arm64](https://get.helm.sh/helm-${RELEASE}-windows-arm64.zip) ([checksum](https://get.helm.sh/helm-${RELEASE}-windows-arm64.zip.sha256sum) / $(cat _dist/helm-${RELEASE}-windows-arm64.zip.sha256))
+
+The [Quickstart Guide](https://helm.sh/docs/intro/quickstart/) will get you going from there. For **upgrade instructions** or detailed installation notes, check the [install guide](https://helm.sh/docs/intro/install/). You can also use a [script to install](https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3) on any system with \`bash\`.
+
+## What's Next
+
+- ${MAJOR}.${MINOR}.$(expr ${PATCH} + 1) will contain only bug fixes.
+- ${MAJOR}.$(expr ${MINOR} + 1).${PATCH} is the next feature release. This release will focus on ...
+
+## Changelog
+
+${CHANGELOG}
+EOF
diff --git a/helm/scripts/sync-repo.sh b/helm/scripts/sync-repo.sh
new file mode 100755
index 000000000..0651d3634
--- /dev/null
+++ b/helm/scripts/sync-repo.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+
+# Copyright The Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Bash 'Strict Mode'
+# http://redsymbol.net/articles/unofficial-bash-strict-mode
+set -euo pipefail
+IFS=$'\n\t'
+
+# Helper Functions -------------------------------------------------------------
+
+# Display error message and exit
+error_exit() {
+ echo "error: ${1:-"unknown error"}" 1>&2
+ exit 1
+}
+
+# Checks if a command exists. Returns 1 or 0
+command_exists() {
+ hash "${1}" 2>/dev/null
+}
+
+# Program Functions ------------------------------------------------------------
+
+verify_prereqs() {
+ echo "Verifying Prerequisites...."
+ if command_exists gsutil; then
+ echo "Thumbs up! Looks like you have gsutil. Let's continue."
+ else
+ error_exit "Couldn't find gsutil. Bailing out."
+ fi
+}
+
+confirm() {
+ # shellcheck disable=SC2154
+ case $response in
+ [yY][eE][sS]|[yY])
+ true
+ ;;
+ *)
+ false
+ ;;
+ esac
+}
+
+# Main -------------------------------------------------------------------------
+
+main() {
+ if [ "$#" -ne 2 ]; then
+ error_exit "Illegal number of parameters. You must pass in local directory path and a GCS bucket name"
+ fi
+
+ echo "Getting ready to sync your local directory ($1) to a remote repository at gs://$2"
+
+ verify_prereqs
+
+ # dry run of the command
+ gsutil rsync -d -n "$1" gs://"$2"
+
+ read -r -p "Are you sure you would like to continue with these changes? [y/N] " confirm
+ if [[ $confirm =~ [yY](es)* ]]; then
+ gsutil rsync -d "$1" gs://"$2"
+ else
+ error_exit "Discontinuing sync process."
+ fi
+
+ echo "Your remote chart repository now matches the contents of the $1 directory!"
+
+}
+
+main "${@:-}"
diff --git a/helm/scripts/util.sh b/helm/scripts/util.sh
new file mode 100644
index 000000000..c1e6c3751
--- /dev/null
+++ b/helm/scripts/util.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+# Copyright The Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
+# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
+kube::util::trap_add() {
+ local trap_add_cmd
+ trap_add_cmd=$1
+ shift
+
+ for trap_add_name in "$@"; do
+ local existing_cmd
+ local new_cmd
+
+ # Grab the currently defined trap commands for this trap
+ existing_cmd=`trap -p "${trap_add_name}" | awk -F"'" '{print $2}'`
+
+ if [[ -z "${existing_cmd}" ]]; then
+ new_cmd="${trap_add_cmd}"
+ else
+ new_cmd="${existing_cmd};${trap_add_cmd}"
+ fi
+
+ # Assign the test
+ trap "${new_cmd}" "${trap_add_name}"
+ done
+}
+
+# Opposite of kube::util::ensure-temp-dir()
+kube::util::cleanup-temp-dir() {
+ rm -rf "${KUBE_TEMP}"
+}
+
+# Create a temp dir that'll be deleted at the end of this bash session.
+#
+# Vars set:
+# KUBE_TEMP
+kube::util::ensure-temp-dir() {
+ if [[ -z ${KUBE_TEMP-} ]]; then
+ KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX)
+ kube::util::trap_add kube::util::cleanup-temp-dir EXIT
+ fi
+}
diff --git a/helm/scripts/validate-license.sh b/helm/scripts/validate-license.sh
new file mode 100755
index 000000000..f67812ca5
--- /dev/null
+++ b/helm/scripts/validate-license.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+
+# Copyright The Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+set -euo pipefail
+IFS=$'\n\t'
+
+find_files() {
+ find . -not \( \
+ \( \
+ -wholename './.git' \
+ -o -wholename '*testdata*' \
+ -o -wholename '*third_party*' \
+ \) -prune \
+ \) \
+ \( -name '*.go' -o -name '*.sh' \)
+}
+
+# Use "|| :" to ignore the error code when grep returns empty
+failed_license_header=($(find_files | xargs grep -L 'Licensed under the Apache License, Version 2.0 (the "License")' || :))
+if (( ${#failed_license_header[@]} > 0 )); then
+ echo "Some source files are missing license headers."
+ printf '%s\n' "${failed_license_header[@]}"
+ exit 1
+fi
+
+# Use "|| :" to ignore the error code when grep returns empty
+failed_copyright_header=($(find_files | xargs grep -L 'Copyright The Helm Authors.' || :))
+if (( ${#failed_copyright_header[@]} > 0 )); then
+ echo "Some source files are missing the copyright header."
+ printf '%s\n' "${failed_copyright_header[@]}"
+ exit 1
+fi
diff --git a/helm/testdata/crt.pem b/helm/testdata/crt.pem
new file mode 100644
index 000000000..c4c471322
--- /dev/null
+++ b/helm/testdata/crt.pem
@@ -0,0 +1,73 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number:
+ 48:5a:94:94:51:de:97:11:3b:62:54:dd:ac:85:63:e6:40:5c:4c:f6
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=US, ST=CO, L=Boulder, O=Helm, CN=helm.sh
+ Validity
+ Not Before: Aug 24 18:07:59 2022 GMT
+ Not After : Aug 21 18:07:59 2032 GMT
+ Subject: C=US, ST=CO, L=Boulder, O=Helm, CN=helm.sh
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ RSA Public-Key: (2048 bit)
+ Modulus:
+ 00:c8:89:55:0d:0b:f1:da:e6:c0:70:7d:d3:27:cd:
+ b8:a8:81:8b:7c:a4:89:e5:d1:b1:78:01:1d:df:44:
+ 88:0b:fc:d6:81:35:3d:d1:3b:5e:8f:bb:93:b3:7e:
+ 28:db:ed:ff:a0:13:3a:70:a3:fe:94:6b:0b:fe:fb:
+ 63:00:b0:cb:dc:81:cd:80:dc:d0:2f:bf:b2:4f:9a:
+ 81:d4:22:dc:97:c8:8f:27:86:59:91:fa:92:05:75:
+ c4:cc:6b:f5:a9:6b:74:1e:f5:db:a9:f8:bf:8c:a2:
+ 25:fd:a0:cc:79:f4:25:57:74:a9:23:9b:e2:b7:22:
+ 7a:14:7a:3d:ea:f1:7e:32:6b:57:6c:2e:c6:4f:75:
+ 54:f9:6b:54:d2:ca:eb:54:1c:af:39:15:9b:d0:7c:
+ 0f:f8:55:51:04:ea:da:fa:7b:8b:63:0f:ac:39:b1:
+ f6:4b:8e:4e:f6:ea:e9:7b:e6:ba:5e:5a:8e:91:ef:
+ dc:b1:7d:52:3f:73:83:52:46:83:48:49:ff:f2:2d:
+ ca:54:f2:36:bb:49:cc:59:99:c0:9e:cf:8e:78:55:
+ 6c:ed:7d:7e:83:b8:59:2c:7d:f8:1a:81:f0:7d:f5:
+ 27:f2:db:ae:d4:31:54:38:fe:47:b2:ee:16:20:0f:
+ f1:db:2d:28:bf:6f:38:eb:11:bb:9a:d4:b2:5a:3a:
+ 4a:7f
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Subject Alternative Name:
+ DNS:helm.sh, IP Address:127.0.0.1
+ Signature Algorithm: sha256WithRSAEncryption
+ d9:95:3b:98:01:6c:cb:a2:92:d8:f7:a7:52:2c:00:c1:04:cd:
+ ef:1b:d8:fa:71:71:29:7d:1d:29:42:ea:03:ce:15:c6:d5:ee:
+ 2d:25:51:7e:96:8b:44:2e:d9:19:1b:95:a6:9c:92:52:2b:88:
+ d8:76:6e:1b:87:36:8e:3a:b1:c6:aa:a4:7a:4e:a9:8b:8d:c0:
+ 3c:77:95:81:db:9a:50:f4:fb:cc:62:21:36:36:91:3b:6c:6e:
+ 37:a8:fa:cc:21:56:f4:31:6f:07:2b:29:0e:1a:06:6c:10:87:
+ fa:6c:be:e1:29:8c:b9:84:b2:ea:4d:07:e8:2b:ff:f6:24:e6:
+ a6:95:72:c7:d8:02:53:c2:c0:68:d3:fc:e9:72:a5:da:6c:39:
+ 5a:6b:17:71:86:40:96:ac:94:dd:21:45:9e:aa:85:8a:73:4c:
+ 8c:3f:0d:2b:d0:8b:04:ef:61:bb:8e:06:6b:86:46:30:a3:64:
+ 6b:97:01:8b:46:56:7d:42:33:f5:e0:ea:fd:80:b4:8a:50:a8:
+ 20:2c:f9:ad:61:05:da:ff:b9:b5:da:9c:d6:0e:47:44:0c:9a:
+ 8f:11:e0:66:f8:76:0c:0f:43:99:6b:af:44:3c:5c:cb:30:98:
+ 6a:24:f7:ea:23:db:cf:23:35:dd:6c:2e:9d:0a:b0:82:77:b8:
+ dc:90:5f:78
+-----BEGIN CERTIFICATE-----
+MIIDRDCCAiygAwIBAgIUSFqUlFHelxE7YlTdrIVj5kBcTPYwDQYJKoZIhvcNAQEL
+BQAwTTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNPMRAwDgYDVQQHDAdCb3VsZGVy
+MQ0wCwYDVQQKDARIZWxtMRAwDgYDVQQDDAdoZWxtLnNoMB4XDTIyMDgyNDE4MDc1
+OVoXDTMyMDgyMTE4MDc1OVowTTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNPMRAw
+DgYDVQQHDAdCb3VsZGVyMQ0wCwYDVQQKDARIZWxtMRAwDgYDVQQDDAdoZWxtLnNo
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyIlVDQvx2ubAcH3TJ824
+qIGLfKSJ5dGxeAEd30SIC/zWgTU90Ttej7uTs34o2+3/oBM6cKP+lGsL/vtjALDL
+3IHNgNzQL7+yT5qB1CLcl8iPJ4ZZkfqSBXXEzGv1qWt0HvXbqfi/jKIl/aDMefQl
+V3SpI5vityJ6FHo96vF+MmtXbC7GT3VU+WtU0srrVByvORWb0HwP+FVRBOra+nuL
+Yw+sObH2S45O9urpe+a6XlqOke/csX1SP3ODUkaDSEn/8i3KVPI2u0nMWZnAns+O
+eFVs7X1+g7hZLH34GoHwffUn8tuu1DFUOP5Hsu4WIA/x2y0ov2846xG7mtSyWjpK
+fwIDAQABoxwwGjAYBgNVHREEETAPggdoZWxtLnNohwR/AAABMA0GCSqGSIb3DQEB
+CwUAA4IBAQDZlTuYAWzLopLY96dSLADBBM3vG9j6cXEpfR0pQuoDzhXG1e4tJVF+
+lotELtkZG5WmnJJSK4jYdm4bhzaOOrHGqqR6TqmLjcA8d5WB25pQ9PvMYiE2NpE7
+bG43qPrMIVb0MW8HKykOGgZsEIf6bL7hKYy5hLLqTQfoK//2JOamlXLH2AJTwsBo
+0/zpcqXabDlaaxdxhkCWrJTdIUWeqoWKc0yMPw0r0IsE72G7jgZrhkYwo2RrlwGL
+RlZ9QjP14Or9gLSKUKggLPmtYQXa/7m12pzWDkdEDJqPEeBm+HYMD0OZa69EPFzL
+MJhqJPfqI9vPIzXdbC6dCrCCd7jckF94
+-----END CERTIFICATE-----
diff --git a/helm/testdata/generate.sh b/helm/testdata/generate.sh
new file mode 100755
index 000000000..9751ef304
--- /dev/null
+++ b/helm/testdata/generate.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+openssl req -new -config openssl.conf -key key.pem -out key.csr
+openssl ca -config openssl.conf -create_serial -batch -in key.csr -out crt.pem -key rootca.key -cert rootca.crt
diff --git a/helm/testdata/key.pem b/helm/testdata/key.pem
new file mode 100644
index 000000000..691e55087
--- /dev/null
+++ b/helm/testdata/key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpgIBAAKCAQEAyIlVDQvx2ubAcH3TJ824qIGLfKSJ5dGxeAEd30SIC/zWgTU9
+0Ttej7uTs34o2+3/oBM6cKP+lGsL/vtjALDL3IHNgNzQL7+yT5qB1CLcl8iPJ4ZZ
+kfqSBXXEzGv1qWt0HvXbqfi/jKIl/aDMefQlV3SpI5vityJ6FHo96vF+MmtXbC7G
+T3VU+WtU0srrVByvORWb0HwP+FVRBOra+nuLYw+sObH2S45O9urpe+a6XlqOke/c
+sX1SP3ODUkaDSEn/8i3KVPI2u0nMWZnAns+OeFVs7X1+g7hZLH34GoHwffUn8tuu
+1DFUOP5Hsu4WIA/x2y0ov2846xG7mtSyWjpKfwIDAQABAoIBAQC/XB1m58EQzCVS
+sx7t2qedVJEQjcpxHdql0xr4VOMl3U2r2mx03pxrt+lH3NmMlN3bmL2pgzSJ2GSI
+Gsbsf8jpUIwTraKUDe9PevbswZ+Sz3Wbl96dKGhzAWCcWWEBHGKgsKe+2Hmg75Il
+Jm446btAaziDnFuJukKYi9XN/kgYPxi914O8yz2KtCIVHEHHkl1FcSqjpghPtzU3
+hm1Nv/7tW2r5IrxCGRNJQTg6l4A4mdqif1u75ZUMcbp8dTaJ2/iYBIKIsh7sFMqy
+TG6ZN0p3G92ijo7rtznxXS9rIE2rcg6qhusdK8eqhV0KHOqH2nkB4jWbw1NwKFzV
+2jXm4S5RAoGBAPIExNBpE30c++Wl4ITuzODd99CczFj527ZBxUdT/H/IszR7adtJ
+gHnayzzycul3GnCVMEGBUBp7q09OkcacA7MqS3/Zjn2zrpViz2iluP6jl0qfs2Sp
+HaePLBKz9oFVi5m17ZYYnG7etSPVzcLaEi23ws5286HToXeqfUuGd+DlAoGBANQf
+FJzQ0EbNu5QcNnQqwfAahvSqc+imPL0HuQWKEMvN3UXXU7Nn8bqba/JGVhgD7/5u
+3g2DyyIou6gnocN669CqY8hm0jEboggD4pC8LVj+Iot25UzoNeNuHfqeu7wAlWWL
+zjfC3UpSbh1O4H8i5chpFxe9N7syzOXBI5IVPBuTAoGBAITrrZSxQSzj8E0uj2Mz
+LH8MKgD/PRRZFhzBfrIwJGuiNRpL9dWkRtWmHx14IziqW3Ed3wT7Gp2Q8oN6KYIl
+SbrrLdAoEqRjPS16uWNGMZZZDszDbWmJoGnYrmIPSQG7lBJ14uke1zvlQSNPV9T+
+pCFL3cg7eI+WhgYNMwd58PkpAoGBAKTXFlyaxRAQtrFtjz+NLrMY2kFt6K8l6FN5
+meXdGhpW+5pXsBreLvK17xgSYrs87BbML1FPVt9Pyiztx36ymmjI0MweYz94Wt1h
+r4KMSa07qLq6hYzTc3Uu0Ks/CWMbDP4hu/qHOxKTpjCuaDVEeE7ao/B1wcZ+vs3Y
+3nyadeBzAoGBAJAZl50nHPwXpEIsHO3nC1ff51cVoV3+gpcCgQ270rLEa2Uv8+Zc
+8rXD/LgcLzZ6Fvp0I3jv1mXlN8W0OruZS71lCM/zBd++E04HMxcvuv4lfqzcW+3E
+V0ZBn2ErSTF9yKvGedRJk+vbCi7cy38WaA+z59ct/gpiw2Z3q6w85jlF
+-----END RSA PRIVATE KEY-----
diff --git a/helm/testdata/localhost-crt.pem b/helm/testdata/localhost-crt.pem
new file mode 100644
index 000000000..70fa0a429
--- /dev/null
+++ b/helm/testdata/localhost-crt.pem
@@ -0,0 +1,73 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number:
+ 7f:5e:fa:21:fa:ee:e4:6a:be:9b:c2:80:bf:ed:42:f3:2d:47:f5:d2
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=US, ST=CO, L=Boulder, O=Helm, CN=helm.sh
+ Validity
+ Not Before: Nov 6 21:59:18 2023 GMT
+ Not After : Nov 3 21:59:18 2033 GMT
+ Subject: C=CA, ST=ON, L=Kitchener, O=Helm, CN=localhost
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ RSA Public-Key: (2048 bit)
+ Modulus:
+ 00:c8:89:55:0d:0b:f1:da:e6:c0:70:7d:d3:27:cd:
+ b8:a8:81:8b:7c:a4:89:e5:d1:b1:78:01:1d:df:44:
+ 88:0b:fc:d6:81:35:3d:d1:3b:5e:8f:bb:93:b3:7e:
+ 28:db:ed:ff:a0:13:3a:70:a3:fe:94:6b:0b:fe:fb:
+ 63:00:b0:cb:dc:81:cd:80:dc:d0:2f:bf:b2:4f:9a:
+ 81:d4:22:dc:97:c8:8f:27:86:59:91:fa:92:05:75:
+ c4:cc:6b:f5:a9:6b:74:1e:f5:db:a9:f8:bf:8c:a2:
+ 25:fd:a0:cc:79:f4:25:57:74:a9:23:9b:e2:b7:22:
+ 7a:14:7a:3d:ea:f1:7e:32:6b:57:6c:2e:c6:4f:75:
+ 54:f9:6b:54:d2:ca:eb:54:1c:af:39:15:9b:d0:7c:
+ 0f:f8:55:51:04:ea:da:fa:7b:8b:63:0f:ac:39:b1:
+ f6:4b:8e:4e:f6:ea:e9:7b:e6:ba:5e:5a:8e:91:ef:
+ dc:b1:7d:52:3f:73:83:52:46:83:48:49:ff:f2:2d:
+ ca:54:f2:36:bb:49:cc:59:99:c0:9e:cf:8e:78:55:
+ 6c:ed:7d:7e:83:b8:59:2c:7d:f8:1a:81:f0:7d:f5:
+ 27:f2:db:ae:d4:31:54:38:fe:47:b2:ee:16:20:0f:
+ f1:db:2d:28:bf:6f:38:eb:11:bb:9a:d4:b2:5a:3a:
+ 4a:7f
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Subject Alternative Name:
+ DNS:localhost
+ Signature Algorithm: sha256WithRSAEncryption
+ 47:47:fe:29:ca:94:28:75:59:ba:ab:67:ab:c6:a6:0b:0a:f2:
+ 0f:26:d9:1d:35:db:68:a5:d8:f5:1f:d1:87:e7:a7:74:fd:c0:
+ 22:aa:c8:ec:6c:d3:ac:8a:0b:ed:59:3a:a0:12:77:7c:53:74:
+ fd:30:59:34:8f:a4:ef:5b:98:3f:ff:cf:89:87:ed:d3:7f:41:
+ 2f:b1:9a:12:71:bb:fe:3a:cf:77:16:32:bc:83:90:cc:52:2f:
+ 3b:f4:ae:db:b1:bb:f0:dd:30:d4:03:17:5e:47:b7:06:86:7a:
+ 16:b1:72:2f:80:5d:d4:c0:f9:6c:91:df:5a:c5:15:86:66:68:
+ c8:90:8e:f1:a2:bb:40:0f:ef:26:1b:02:c4:42:de:8c:69:ec:
+ ad:27:d0:bc:da:7c:76:33:86:de:b7:c4:04:64:e6:f6:dc:44:
+ 89:7b:b8:2f:c7:28:7a:4c:a6:01:ad:a5:17:64:3a:23:da:aa:
+ db:ce:3f:86:e9:92:dc:0d:c4:5a:b4:52:a8:8a:ee:3d:62:7d:
+ b1:c8:fa:ef:96:2b:ab:f1:e1:6d:6f:7d:1e:ce:bc:7a:d0:92:
+ 02:1b:c8:55:36:77:bf:d4:42:d3:fc:57:ca:b7:cc:95:be:ce:
+ f8:6e:b2:28:ca:4d:9a:00:7d:78:c8:56:04:2e:b3:ac:03:fa:
+ 05:d8:42:bd
+-----BEGIN CERTIFICATE-----
+MIIDRDCCAiygAwIBAgIUf176Ifru5Gq+m8KAv+1C8y1H9dIwDQYJKoZIhvcNAQEL
+BQAwTTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNPMRAwDgYDVQQHDAdCb3VsZGVy
+MQ0wCwYDVQQKDARIZWxtMRAwDgYDVQQDDAdoZWxtLnNoMB4XDTIzMTEwNjIxNTkx
+OFoXDTMzMTEwMzIxNTkxOFowUTELMAkGA1UEBhMCQ0ExCzAJBgNVBAgMAk9OMRIw
+EAYDVQQHDAlLaXRjaGVuZXIxDTALBgNVBAoMBEhlbG0xEjAQBgNVBAMMCWxvY2Fs
+aG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMiJVQ0L8drmwHB9
+0yfNuKiBi3ykieXRsXgBHd9EiAv81oE1PdE7Xo+7k7N+KNvt/6ATOnCj/pRrC/77
+YwCwy9yBzYDc0C+/sk+agdQi3JfIjyeGWZH6kgV1xMxr9alrdB7126n4v4yiJf2g
+zHn0JVd0qSOb4rciehR6PerxfjJrV2wuxk91VPlrVNLK61QcrzkVm9B8D/hVUQTq
+2vp7i2MPrDmx9kuOTvbq6Xvmul5ajpHv3LF9Uj9zg1JGg0hJ//ItylTyNrtJzFmZ
+wJ7PjnhVbO19foO4WSx9+BqB8H31J/LbrtQxVDj+R7LuFiAP8dstKL9vOOsRu5rU
+slo6Sn8CAwEAAaMYMBYwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEB
+CwUAA4IBAQBHR/4pypQodVm6q2erxqYLCvIPJtkdNdtopdj1H9GH56d0/cAiqsjs
+bNOsigvtWTqgEnd8U3T9MFk0j6TvW5g//8+Jh+3Tf0EvsZoScbv+Os93FjK8g5DM
+Ui879K7bsbvw3TDUAxdeR7cGhnoWsXIvgF3UwPlskd9axRWGZmjIkI7xortAD+8m
+GwLEQt6MaeytJ9C82nx2M4bet8QEZOb23ESJe7gvxyh6TKYBraUXZDoj2qrbzj+G
+6ZLcDcRatFKoiu49Yn2xyPrvliur8eFtb30ezrx60JICG8hVNne/1ELT/FfKt8yV
+vs74brIoyk2aAH14yFYELrOsA/oF2EK9
+-----END CERTIFICATE-----
diff --git a/helm/testdata/openssl.conf b/helm/testdata/openssl.conf
new file mode 100644
index 000000000..be5ff04b7
--- /dev/null
+++ b/helm/testdata/openssl.conf
@@ -0,0 +1,46 @@
+[ca]
+default_ca = CA_default
+
+[CA_default]
+dir = ./
+database = $dir/index.txt
+new_certs_dir = ./
+serial = $dir/serial
+private_key = ./rootca.key
+certificate = ./rootca.crt
+default_days = 3650
+default_md = sha256
+policy = policy_anything
+copy_extensions = copyall
+
+[policy_anything]
+countryName = optional
+stateOrProvinceName = optional
+localityName = optional
+organizationName = optional
+organizationalUnitName = optional
+commonName = supplied
+emailAddress = optional
+
+[ req ]
+default_bits = 2048
+distinguished_name = req_distinguished_name
+req_extensions = v3_req
+
+[ req_distinguished_name ]
+countryName = Country Name (2 letter code)
+stateOrProvinceName = State or Province Name (full name)
+localityName = Locality Name (eg, city)
+organizationName = Organization Name (eg, company)
+commonName = Common Name (e.g. server FQDN or YOUR name)
+
+[ v3_req ]
+subjectAltName = @alternate_names
+
+[alternate_names]
+DNS.1 = helm.sh
+IP.1 = 127.0.0.1
+
+# # Used to generate localhost-crt.pem
+# [alternate_names]
+# DNS.1 = localhost
diff --git a/helm/testdata/releases.yaml b/helm/testdata/releases.yaml
new file mode 100644
index 000000000..e960e815d
--- /dev/null
+++ b/helm/testdata/releases.yaml
@@ -0,0 +1,43 @@
+# This file can be used as input to create test releases:
+# HELM_MEMORY_DRIVER_DATA=./testdata/releases.yaml HELM_DRIVER=memory helm list --all-namespaces
+- name: athos
+ version: 1
+ namespace: default
+ info:
+ status: deployed
+ chart:
+ metadata:
+ name: athos-chart
+ version: 1.0.0
+ appversion: 1.1.0
+- name: porthos
+ version: 2
+ namespace: default
+ info:
+ status: deployed
+ chart:
+ metadata:
+ name: porthos-chart
+ version: 0.2.0
+ appversion: 0.2.2
+- name: aramis
+ version: 3
+ namespace: default
+ info:
+ status: deployed
+ chart:
+ metadata:
+ name: aramis-chart
+ version: 0.0.3
+ appversion: 3.0.3
+- name: dartagnan
+ version: 4
+ namespace: gascony
+ info:
+ status: deployed
+ chart:
+ metadata:
+ name: dartagnan-chart
+ version: 0.4.4
+ appversion: 4.4.4
+
diff --git a/helm/testdata/rootca.crt b/helm/testdata/rootca.crt
new file mode 100644
index 000000000..874cdbc1d
--- /dev/null
+++ b/helm/testdata/rootca.crt
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIUQTwAoToO0ZxUZZCSWuJI4/ROB+4wDQYJKoZIhvcNAQEL
+BQAwTTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNPMRAwDgYDVQQHDAdCb3VsZGVy
+MQ0wCwYDVQQKDARIZWxtMRAwDgYDVQQDDAdoZWxtLnNoMB4XDTIyMDgyNDE4MDYx
+MVoXDTI4MDQwMjE4MDYxMVowTTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNPMRAw
+DgYDVQQHDAdCb3VsZGVyMQ0wCwYDVQQKDARIZWxtMRAwDgYDVQQDDAdoZWxtLnNo
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4Z4zHBdV+ID8PdPYRpZp
+I8QXhDiMV/kgUSWTqfWMxW9n9X7Tg2jTnypKqX3aIxiHBi3+/VryWRTosZReZI6t
+Xv1iuIDbyJuoWskZlZowwsRNA6n7IBFVmUZvRWJk3ThOgXRcOetojH9HG3LnRjtf
+HPqmBxq3ZAwDjYw3YzbN3UO2CkXjIc8eEXo/UaUtPFWCuwJNSKAgYTS12Rr1/Ydx
+9q9u5+fKZoS9WWdRhxu3sHRshs9ekkr1vIhaS06n7YCAO6TCngo+UDi+JG53kqEc
+LV9R31sbc3618QLZTSa6NKMzdu/bnZ15ID0c2HNSUTHExa8XE85mEc87HgMKoZy2
+hQIDAQABo1MwUTAdBgNVHQ4EFgQUicAFxDIXaZuRdpc3D265zOceBDQwHwYDVR0j
+BBgwFoAUicAFxDIXaZuRdpc3D265zOceBDQwDwYDVR0TAQH/BAUwAwEB/zANBgkq
+hkiG9w0BAQsFAAOCAQEAyIndA2vsHWhn+PqxAnaCai0xAJ6awye7CAWKsLmT3rC2
+zR+EI5dCJgPJ0zrltQyngWz1IgUGoC4klgj/37lY5cG8/HYBJ37IAPya+pVukQuL
+qqe2RCWqi4XZUPFRHjbJbHoM3AMsFeYOWJy+bTCMKyyYqUO0S7OM77ID9k7gcJFj
+TZ6fvWvRqWFQCLJpQh95kt5wOkAKyttPf5Qkh37fLHtyrwkpbJCj+Yv3kcdKBYpw
+kYLbK6DqqbgIKJHRbpu5xGOhKZ0/jnHJRvGAE6g6OKOXJQ/ydIZauoXKQ7hpcV43
+UAIXGjdbKVoPyLNgMueviW8+64GKqllWONPbBai5jQ==
+-----END CERTIFICATE-----
diff --git a/helm/testdata/rootca.key b/helm/testdata/rootca.key
new file mode 100644
index 000000000..14a2a0c0c
--- /dev/null
+++ b/helm/testdata/rootca.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEA4Z4zHBdV+ID8PdPYRpZpI8QXhDiMV/kgUSWTqfWMxW9n9X7T
+g2jTnypKqX3aIxiHBi3+/VryWRTosZReZI6tXv1iuIDbyJuoWskZlZowwsRNA6n7
+IBFVmUZvRWJk3ThOgXRcOetojH9HG3LnRjtfHPqmBxq3ZAwDjYw3YzbN3UO2CkXj
+Ic8eEXo/UaUtPFWCuwJNSKAgYTS12Rr1/Ydx9q9u5+fKZoS9WWdRhxu3sHRshs9e
+kkr1vIhaS06n7YCAO6TCngo+UDi+JG53kqEcLV9R31sbc3618QLZTSa6NKMzdu/b
+nZ15ID0c2HNSUTHExa8XE85mEc87HgMKoZy2hQIDAQABAoIBACFgRNFQBnDHrAj9
+cM4obA9Vb+EoeGJ/QS+f7nNDFvsSGv/vLh0PgdbW68qdCosMktTwMvuJ27Yf6Lh0
+aW5YyP73XwZKUbkghcxAWZ+O+s2lOntjRvocdlxBVi6eeqtbLAnsi8QptgKqxXsj
+CWGTYOOplKwSYLTVLiVfa8YqklO77HHKQCMpCU7KsDbNpvhpme345nrAkAGX4Sd+
+STNTM3jdmyzC4jFycMz2eaSbJZjFefn9OkiAL+RNlm4dFo/l9sJIAaIZ5gPV3Jzl
++uDRFO0eW5oE/mHmfS450yOMPwl/mf4GxRbq2JNTBFSroYaz+n/p3Ii+3U5oWmi3
+D9C/EkECgYEA9CiCM5Vc5yPyq4UWjxRD6vedv0Ihur7x7bo1zxTdMBc6feRnJFp2
+HTz33gTY+mhyjstVshj+58rmIR7Ns0bLBJ5v0GyorxhnqhgfsWn9fiKR0lb79DpS
+0APrnMdsz0/5NbK45b7qui6p4aDfRxr+EsUlwTUfbEjISn9/YgBk+rECgYEA7I9+
+S1sXBkRuBEyga8X77m/ZyF0ucqyJGxpXfsvR3udgWB3uyV5mEs4pnpLm0SPowuRl
+8RUGBF9IUfMwvqcQkGN9qy+f0fpSZmLm0nFOyKD2aE/7A3JlMhY0KsSj2odUotzU
+rTXqtlS87zsQl7t028B3r1Cw+y10qLcw3Se0BhUCgYAP5oN0MIn4U5L+MJCjiMJT
+jwSq6/eeXckLnlDax5UQCLM6d6Fv8KQ4izvpLY+j3yF2wy81hgMzvTb3eTYUMswN
+5POLM0hY/tHhdei6eRiVGlM8y4VlBldWTKsPbr1bUu373UPFUoWe0mMl2oAv9UYO
+muA2kOsW9jZ1A5CcJUJuQQKBgDEnuASMjwI8Yef+zC7Y2vq2vzhFNIubknnRRXER
+hTCeP4TP43hwZyFtOXS77b5zicBFmXE4/yEVc3+j2vMi3+xA4DIcGUeWjly8HF6K
+MOa7m7gdNnmG4cRAnOJuLeYQzONyo7bCR11PylqjmVUOHMA1BCmnyL7IuT79oeey
+glPpAoGAICOwp+bh1nqPt+nINO1q/zCCdl9hVakGVkQkuCiDK8wLW3R/vNrBtTf+
+PDM87BasvZkzA2VBcTgtDCcnP/aNDLyy2FDKIUyVtcpfheHgxjlT1txGHBUXJf6z
+rS1fGWIYbpMb3RSCtGJTa1hyDJdN424nYUD3phL4SPx2Cn5eAPs=
+-----END RSA PRIVATE KEY-----
diff --git a/internal/action/config.go b/internal/action/config.go
index a247359e3..8dffb1684 100644
--- a/internal/action/config.go
+++ b/internal/action/config.go
@@ -18,11 +18,12 @@ package action
import (
"fmt"
+ "log/slog"
- helmaction "helm.sh/helm/v3/pkg/action"
- helmkube "helm.sh/helm/v3/pkg/kube"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmaction "helm.sh/helm/v4/pkg/action"
+ helmkube "helm.sh/helm/v4/pkg/kube"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
"k8s.io/cli-runtime/pkg/genericclioptions"
"github.com/fluxcd/helm-controller/internal/storage"
@@ -49,7 +50,7 @@ type ConfigFactory struct {
// Driver to use for the Helm action.
Driver helmdriver.Driver
// StorageLog is the logger to use for the Helm storage driver.
- StorageLog helmaction.DebugLog
+ StorageLog slog.Handler
}
// ConfigFactoryOption is a function that configures a ConfigFactory.
@@ -122,7 +123,7 @@ func WithDriver(driver helmdriver.Driver) ConfigFactoryOption {
}
// WithStorageLog sets the ConfigFactory.StorageLog.
-func WithStorageLog(log helmaction.DebugLog) ConfigFactoryOption {
+func WithStorageLog(log slog.Handler) ConfigFactoryOption {
return func(f *ConfigFactory) error {
f.StorageLog = log
return nil
@@ -137,31 +138,27 @@ func (c *ConfigFactory) NewStorage(observers ...storage.ObserveFunc) *helmstorag
driver = storage.NewObserver(driver, observers...)
}
s := helmstorage.Init(driver)
- if c.StorageLog != nil {
- s.Log = c.StorageLog
- }
+ s.SetLogger(c.StorageLog)
return s
}
// Build returns a new Helm action.Configuration configured with the receiver
// values, and the provided logger and observer(s).
-func (c *ConfigFactory) Build(log helmaction.DebugLog, observers ...storage.ObserveFunc) *helmaction.Configuration {
+func (c *ConfigFactory) Build(log slog.Handler, observers ...storage.ObserveFunc) *helmaction.Configuration {
client := c.KubeClient
+
+ var opts []helmaction.ConfigurationOption
if log != nil {
- // As Helm emits important information to the log of the client, we
- // need to configure it with the same logger as the action.Configuration.
- // This is not ideal, as we would like to re-use the client between
- // actions, but otherwise this would not be thread-safe.
client = helmkube.New(c.Getter)
- client.Log = log
+ client.SetLogger(log)
+ opts = append(opts, helmaction.ConfigurationSetLogger(log))
}
- return &helmaction.Configuration{
- RESTClientGetter: c.Getter,
- Releases: c.NewStorage(observers...),
- KubeClient: client,
- Log: log,
- }
+ conf := helmaction.NewConfiguration(opts...)
+ conf.RESTClientGetter = c.Getter
+ conf.Releases = c.NewStorage(observers...)
+ conf.KubeClient = client
+ return conf
}
// Valid returns an error if the ConfigFactory is missing configuration
diff --git a/internal/action/config_test.go b/internal/action/config_test.go
index 16ef468dd..8a4452277 100644
--- a/internal/action/config_test.go
+++ b/internal/action/config_test.go
@@ -18,18 +18,19 @@ package action
import (
"errors"
+ "log/slog"
"testing"
. "github.com/onsi/gomega"
- helmaction "helm.sh/helm/v3/pkg/action"
- helmkube "helm.sh/helm/v3/pkg/kube"
- helmrelease "helm.sh/helm/v3/pkg/release"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmkube "helm.sh/helm/v4/pkg/kube"
+ helmrelease "helm.sh/helm/v4/pkg/release"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
"k8s.io/cli-runtime/pkg/genericclioptions"
cmdtest "k8s.io/kubectl/pkg/cmd/testing"
"github.com/fluxcd/helm-controller/internal/kube"
"github.com/fluxcd/helm-controller/internal/storage"
+ "github.com/fluxcd/helm-controller/internal/testutil"
)
func TestNewConfigFactory(t *testing.T) {
@@ -57,9 +58,7 @@ func TestNewConfigFactory(t *testing.T) {
getter: &kube.MemoryRESTClientGetter{},
opts: []ConfigFactoryOption{
WithDriver(helmdriver.NewMemory()),
- WithStorageLog(func(format string, v ...interface{}) {
- // noop
- }),
+ WithStorageLog(slog.DiscardHandler),
},
wantErr: nil,
},
@@ -170,10 +169,7 @@ func TestStorageLog(t *testing.T) {
g := NewWithT(t)
factory := &ConfigFactory{}
- log := helmaction.DebugLog(func(format string, v ...interface{}) {
- // noop
- })
- g.Expect(WithStorageLog(log)(factory)).NotTo(HaveOccurred())
+ g.Expect(WithStorageLog(slog.DiscardHandler)(factory)).NotTo(HaveOccurred())
g.Expect(factory.StorageLog).ToNot(BeNil())
}
@@ -197,7 +193,7 @@ func TestConfigFactory_NewStorage(t *testing.T) {
Driver: helmdriver.NewMemory(),
}
- obsFunc := func(rel *helmrelease.Release) {}
+ obsFunc := func(rel helmrelease.Releaser) {}
s := factory.NewStorage(obsFunc)
g.Expect(s).ToNot(BeNil())
g.Expect(s.Driver).To(BeAssignableToTypeOf(&storage.Observer{}))
@@ -206,10 +202,7 @@ func TestConfigFactory_NewStorage(t *testing.T) {
t.Run("with storage log", func(t *testing.T) {
g := NewWithT(t)
- var called bool
- log := func(fmt string, v ...interface{}) {
- called = true
- }
+ log := &testutil.MockSLogHandler{}
factory := &ConfigFactory{
Driver: helmdriver.NewMemory(),
@@ -218,8 +211,8 @@ func TestConfigFactory_NewStorage(t *testing.T) {
s := factory.NewStorage()
g.Expect(s).ToNot(BeNil())
- s.Log("test")
- g.Expect(called).To(BeTrue())
+ s.Logger().Info("test log")
+ g.Expect(log.Called).To(BeTrue())
})
}
@@ -242,15 +235,12 @@ func TestConfigFactory_Build(t *testing.T) {
t.Run("with log", func(t *testing.T) {
g := NewWithT(t)
- var called bool
- log := func(fmt string, v ...interface{}) {
- called = true
- }
+ log := &testutil.MockSLogHandler{}
cfg := (&ConfigFactory{}).Build(log)
g.Expect(cfg).ToNot(BeNil())
- cfg.Log("")
- g.Expect(called).To(BeTrue())
+ cfg.Logger().Info("test log")
+ g.Expect(log.Called).To(BeTrue())
})
t.Run("with observe func", func(t *testing.T) {
@@ -260,7 +250,7 @@ func TestConfigFactory_Build(t *testing.T) {
Driver: helmdriver.NewMemory(),
}
- obsFunc := func(rel *helmrelease.Release) {}
+ obsFunc := func(rel helmrelease.Releaser) {}
cfg := factory.Build(nil, obsFunc)
g.Expect(cfg).To(Not(BeNil()))
diff --git a/internal/action/crds.go b/internal/action/crds.go
index a2dc25825..5883ce5a8 100644
--- a/internal/action/crds.go
+++ b/internal/action/crds.go
@@ -22,10 +22,12 @@ import (
"fmt"
"time"
- helmaction "helm.sh/helm/v3/pkg/action"
- helmchart "helm.sh/helm/v3/pkg/chart"
- helmchartutil "helm.sh/helm/v3/pkg/chartutil"
- helmkube "helm.sh/helm/v3/pkg/kube"
+ helmaction "helm.sh/helm/v4/pkg/action"
+ helmchartcommon "helm.sh/helm/v4/pkg/chart/common"
+ helmchart "helm.sh/helm/v4/pkg/chart/v2"
+ helmchartutil "helm.sh/helm/v4/pkg/chart/v2/util"
+ "helm.sh/helm/v4/pkg/kube"
+ helmkube "helm.sh/helm/v4/pkg/kube"
apiextension "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
@@ -66,21 +68,26 @@ func (*rootScoped) Name() apimeta.RESTScopeName {
}
func applyCRDs(cfg *helmaction.Configuration, policy v2.CRDsPolicy, chrt *helmchart.Chart,
- vals helmchartutil.Values, visitorFunc ...resource.VisitorFunc) error {
+ vals helmchartcommon.Values, serverSideApply bool, visitorFunc ...resource.VisitorFunc) error {
if len(chrt.CRDObjects()) == 0 {
return nil
}
+ l := cfg.Logger()
+
if policy == v2.Skip {
- cfg.Log("skipping CustomResourceDefinition apply: policy is set to %s", policy)
+ l.Info(fmt.Sprintf("skipping CustomResourceDefinition apply: policy is set to %s", policy))
return nil
}
- if err := helmchartutil.ProcessDependenciesWithMerge(chrt, vals); err != nil {
+ if err := helmchartutil.ProcessDependencies(chrt, vals); err != nil {
return fmt.Errorf("failed to process chart dependencies: %w", err)
}
+ // We always force conflicts on server-side apply.
+ forceConflicts := serverSideApply
+
// Collect all CRDs from all files in `crds` directory.
allCRDs := make(helmkube.ResourceList, 0)
for _, obj := range chrt.CRDObjects() {
@@ -88,7 +95,7 @@ func applyCRDs(cfg *helmaction.Configuration, policy v2.CRDsPolicy, chrt *helmch
res, err := cfg.KubeClient.Build(bytes.NewBuffer(obj.File.Data), false)
if err != nil {
err = fmt.Errorf("failed to parse CustomResourceDefinitions from %s: %w", obj.Name, err)
- cfg.Log(err.Error())
+ l.Error(err.Error())
return err
}
allCRDs = append(allCRDs, res...)
@@ -101,23 +108,24 @@ func applyCRDs(cfg *helmaction.Configuration, policy v2.CRDsPolicy, chrt *helmch
}
}
- cfg.Log("applying CustomResourceDefinition(s) with policy %s", policy)
+ l.Info(fmt.Sprintf("applying CustomResourceDefinition(s) with policy %s", policy))
var totalItems []*resource.Info
switch policy {
case v2.Create:
for i := range allCRDs {
- if rr, err := cfg.KubeClient.Create(allCRDs[i : i+1]); err != nil {
+ if rr, err := cfg.KubeClient.Create(allCRDs[i:i+1],
+ helmkube.ClientCreateOptionServerSideApply(serverSideApply, forceConflicts)); err != nil {
crdName := allCRDs[i].Name
// If the CustomResourceDefinition already exists, we skip it.
if apierrors.IsAlreadyExists(err) {
- cfg.Log("CustomResourceDefinition %s is already present. Skipping.", crdName)
+ l.Info(fmt.Sprintf("CustomResourceDefinition %s is already present. Skipping.", crdName))
if rr != nil && rr.Created != nil {
totalItems = append(totalItems, rr.Created...)
}
continue
}
err = fmt.Errorf("failed to create CustomResourceDefinition %s: %w", crdName, err)
- cfg.Log(err.Error())
+ l.Error(err.Error())
return err
} else {
if rr != nil && rr.Created != nil {
@@ -129,13 +137,13 @@ func applyCRDs(cfg *helmaction.Configuration, policy v2.CRDsPolicy, chrt *helmch
config, err := cfg.RESTClientGetter.ToRESTConfig()
if err != nil {
err = fmt.Errorf("could not create Kubernetes client REST config: %w", err)
- cfg.Log(err.Error())
+ l.Error(err.Error())
return err
}
clientSet, err := apiextension.NewForConfig(config)
if err != nil {
err = fmt.Errorf("could not create Kubernetes client set for API extensions: %w", err)
- cfg.Log(err.Error())
+ l.Error(err.Error())
return err
}
client := clientSet.ApiextensionsV1().CustomResourceDefinitions()
@@ -170,13 +178,15 @@ func applyCRDs(cfg *helmaction.Configuration, policy v2.CRDsPolicy, chrt *helmch
})
} else if !apierrors.IsNotFound(err) {
err = fmt.Errorf("failed to get CustomResourceDefinition %s: %w", r.Name, err)
- cfg.Log(err.Error())
+ l.Error(err.Error())
return err
}
}
// Send them to Kubernetes...
- if rr, err := cfg.KubeClient.Update(original, allCRDs, true); err != nil {
+ if rr, err := cfg.KubeClient.Update(original, allCRDs,
+ kube.ClientUpdateOptionForceReplace(true),
+ kube.ClientUpdateOptionServerSideApply(serverSideApply, forceConflicts)); err != nil {
err = fmt.Errorf("failed to update CustomResourceDefinition(s): %w", err)
return err
} else {
@@ -194,25 +204,31 @@ func applyCRDs(cfg *helmaction.Configuration, policy v2.CRDsPolicy, chrt *helmch
}
default:
err := fmt.Errorf("unexpected policy %s", policy)
- cfg.Log(err.Error())
+ l.Error(err.Error())
return err
}
if len(totalItems) > 0 {
// Give time for the CRD to be recognized.
- if err := cfg.KubeClient.Wait(totalItems, 60*time.Second); err != nil {
+ waiter, err := cfg.KubeClient.GetWaiter(helmkube.LegacyStrategy)
+ if err != nil {
+ err = fmt.Errorf("failed to create CustomResourceDefinition waiter: %w", err)
+ l.Error(err.Error())
+ return err
+ }
+ if err := waiter.Wait(totalItems, 60*time.Second); err != nil {
err = fmt.Errorf("failed to wait for CustomResourceDefinition(s): %w", err)
- cfg.Log(err.Error())
+ l.Error(err.Error())
return err
}
- cfg.Log("successfully applied %d CustomResourceDefinition(s)", len(totalItems))
+ l.Info(fmt.Sprintf("successfully applied %d CustomResourceDefinition(s)", len(totalItems)))
// Clear the RESTMapper cache, since it will not have the new CRDs.
// Helm does further invalidation of the client at a later stage
// when it gathers the server capabilities.
if m, err := cfg.RESTClientGetter.ToRESTMapper(); err == nil {
if rm, ok := m.(apimeta.ResettableRESTMapper); ok {
- cfg.Log("clearing REST mapper cache")
+ l.Info("clearing REST mapper cache")
rm.Reset()
}
}
diff --git a/internal/action/defaults.go b/internal/action/defaults.go
new file mode 100644
index 000000000..e6385fffb
--- /dev/null
+++ b/internal/action/defaults.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2026 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+// UseHelm3Defaults must be set from the feature gate of same name in main.go.
+var UseHelm3Defaults bool
diff --git a/internal/action/diff.go b/internal/action/diff.go
index 4580b3733..0f6d8bdc5 100644
--- a/internal/action/diff.go
+++ b/internal/action/diff.go
@@ -24,8 +24,8 @@ import (
"sort"
"strings"
- helmaction "helm.sh/helm/v3/pkg/action"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmaction "helm.sh/helm/v4/pkg/action"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
diff --git a/internal/action/diff_test.go b/internal/action/diff_test.go
index 5e8eaf8de..a2326581a 100644
--- a/internal/action/diff_test.go
+++ b/internal/action/diff_test.go
@@ -28,8 +28,8 @@ import (
"github.com/google/go-cmp/cmp/cmpopts"
. "github.com/onsi/gomega"
extjsondiff "github.com/wI2L/jsondiff"
- helmaction "helm.sh/helm/v3/pkg/action"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmaction "helm.sh/helm/v4/pkg/action"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -398,10 +398,10 @@ data:
mutateCluster: func(objs []*unstructured.Unstructured, namespace string) ([]*unstructured.Unstructured, error) {
var clusterObjs []*unstructured.Unstructured
- otherNS := unstructured.Unstructured{Object: map[string]interface{}{
+ otherNS := unstructured.Unstructured{Object: map[string]any{
"apiVersion": "v1",
"kind": "Namespace",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "diff-fixed-ns",
},
}}
@@ -463,11 +463,11 @@ data:
{
Type: extjsondiff.OperationAdd,
Path: "/metadata",
- Value: map[string]interface{}{
- "labels": map[string]interface{}{
+ Value: map[string]any{
+ "labels": map[string]any{
appManagedByLabel: appManagedByHelm,
},
- "annotations": map[string]interface{}{
+ "annotations": map[string]any{
helmReleaseNameAnnotation: "configures Helm metadata",
helmReleaseNamespaceAnnotation: namespace,
},
@@ -602,14 +602,14 @@ func TestApplyDiff(t *testing.T) {
{
Type: jsondiff.DiffTypeCreate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "Secret",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "test-secret",
"namespace": namespace,
},
- "stringData": map[string]interface{}{
+ "stringData": map[string]any{
"key": "value",
},
},
@@ -618,27 +618,27 @@ func TestApplyDiff(t *testing.T) {
{
Type: jsondiff.DiffTypeUpdate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "ConfigMap",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "test-cm",
"namespace": namespace,
},
- "data": map[string]interface{}{
+ "data": map[string]any{
"key": "value",
},
},
},
ClusterObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "ConfigMap",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "test-cm",
"namespace": namespace,
},
- "data": map[string]interface{}{
+ "data": map[string]any{
"key": "changed",
},
},
@@ -685,14 +685,14 @@ func TestApplyDiff(t *testing.T) {
{
Type: jsondiff.DiffTypeCreate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "Secret",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "invalid-test-secret",
"namespace": namespace,
},
- "data": map[string]interface{}{
+ "data": map[string]any{
// Illegal base64 encoded data.
"key": "secret value",
},
@@ -702,10 +702,10 @@ func TestApplyDiff(t *testing.T) {
{
Type: jsondiff.DiffTypeCreate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "ConfigMap",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "test-cm",
"namespace": namespace,
},
@@ -715,28 +715,28 @@ func TestApplyDiff(t *testing.T) {
{
Type: jsondiff.DiffTypeUpdate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "Secret",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "invalid-test-secret-update",
"namespace": namespace,
},
- "data": map[string]interface{}{
+ "data": map[string]any{
// Illegal base64 encoded data.
"key": "secret value2",
},
},
},
ClusterObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "Secret",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "invalid-test-secret-update",
"namespace": namespace,
},
- "stringData": map[string]interface{}{
+ "stringData": map[string]any{
"key": "value",
},
},
@@ -753,27 +753,27 @@ func TestApplyDiff(t *testing.T) {
{
Type: jsondiff.DiffTypeUpdate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "ConfigMap",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "test-cm-2",
"namespace": namespace,
},
- "data": map[string]interface{}{
+ "data": map[string]any{
"key": "value",
},
},
},
ClusterObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "ConfigMap",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "test-cm-2",
"namespace": namespace,
},
- "data": map[string]interface{}{
+ "data": map[string]any{
"key": "changed",
},
},
@@ -830,10 +830,10 @@ func TestApplyDiff(t *testing.T) {
{
Type: jsondiff.DiffTypeCreate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "ConfigMap",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "test-cm",
"namespace": otherNS,
},
@@ -843,10 +843,10 @@ func TestApplyDiff(t *testing.T) {
{
Type: jsondiff.DiffTypeCreate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "Namespace",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": otherNS,
},
},
diff --git a/internal/action/install.go b/internal/action/install.go
index 00dd9396b..26287b6f0 100644
--- a/internal/action/install.go
+++ b/internal/action/install.go
@@ -20,10 +20,10 @@ import (
"context"
"fmt"
- helmaction "helm.sh/helm/v3/pkg/action"
- helmchart "helm.sh/helm/v3/pkg/chart"
- helmchartutil "helm.sh/helm/v3/pkg/chartutil"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmaction "helm.sh/helm/v4/pkg/action"
+ helmchartutil "helm.sh/helm/v4/pkg/chart/common"
+ helmchart "helm.sh/helm/v4/pkg/chart/v2"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/features"
@@ -50,26 +50,44 @@ type InstallOption func(action *helmaction.Install)
func Install(ctx context.Context, config *helmaction.Configuration, obj *v2.HelmRelease,
chrt *helmchart.Chart, vals helmchartutil.Values, opts ...InstallOption) (*helmrelease.Release, error) {
install := newInstall(config, obj, opts)
+ install.ForceConflicts = install.ServerSideApply // We always force conflicts on server-side apply.
policy, err := crdPolicyOrDefault(obj.GetInstall().CRDs)
if err != nil {
return nil, err
}
- if err := applyCRDs(config, policy, chrt, vals, setOriginVisitor(v2.GroupVersion.Group, obj.Namespace, obj.Name)); err != nil {
+ if err := applyCRDs(config, policy, chrt, vals, install.ServerSideApply, setOriginVisitor(v2.GroupVersion.Group, obj.Namespace, obj.Name)); err != nil {
return nil, fmt.Errorf("failed to apply CustomResourceDefinitions: %w", err)
}
- return install.RunWithContext(ctx, chrt, vals.AsMap())
+ rlsr, err := install.RunWithContext(ctx, chrt, vals.AsMap())
+ if err != nil {
+ return nil, err
+ }
+ rlsrTyped, ok := rlsr.(*helmrelease.Release)
+ if !ok {
+ return nil, fmt.Errorf("only the Chart API v2 is supported")
+ }
+ return rlsrTyped, err
}
func newInstall(config *helmaction.Configuration, obj *v2.HelmRelease, opts []InstallOption) *helmaction.Install {
install := helmaction.NewInstall(config)
+ switch {
+ case UseHelm3Defaults:
+ install.ServerSideApply = false
+ default:
+ install.ServerSideApply = true
+ }
+ if ssa := obj.GetInstall().ServerSideApply; ssa != nil {
+ install.ServerSideApply = *ssa
+ }
install.ReleaseName = release.ShortenName(obj.GetReleaseName())
install.Namespace = obj.GetReleaseNamespace()
install.Timeout = obj.GetInstall().GetTimeout(obj.GetTimeout()).Duration
install.TakeOwnership = !obj.GetInstall().DisableTakeOwnership
- install.Wait = !obj.GetInstall().DisableWait
+ install.WaitStrategy = getWaitStrategy(obj.GetInstall())
install.WaitForJobs = !obj.GetInstall().DisableWaitForJobs
install.DisableHooks = obj.GetInstall().DisableHooks
install.DisableOpenAPIValidation = obj.GetInstall().DisableOpenAPIValidation
diff --git a/internal/action/install_test.go b/internal/action/install_test.go
index 5436b56dd..492c02de6 100644
--- a/internal/action/install_test.go
+++ b/internal/action/install_test.go
@@ -21,7 +21,7 @@ import (
"time"
. "github.com/onsi/gomega"
- helmaction "helm.sh/helm/v3/pkg/action"
+ helmaction "helm.sh/helm/v4/pkg/action"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
@@ -84,15 +84,15 @@ func Test_newInstall(t *testing.T) {
got := newInstall(&helmaction.Configuration{}, obj, []InstallOption{
func(install *helmaction.Install) {
- install.Atomic = true
+ install.DisableHooks = true
},
func(install *helmaction.Install) {
- install.DryRun = true
+ install.DryRunStrategy = helmaction.DryRunClient
},
})
g.Expect(got).ToNot(BeNil())
- g.Expect(got.Atomic).To(BeTrue())
- g.Expect(got.DryRun).To(BeTrue())
+ g.Expect(got.DisableHooks).To(BeTrue())
+ g.Expect(got.DryRunStrategy).To(Equal(helmaction.DryRunClient))
})
t.Run("disable take ownership", func(t *testing.T) {
@@ -114,4 +114,98 @@ func Test_newInstall(t *testing.T) {
g.Expect(got).ToNot(BeNil())
g.Expect(got.TakeOwnership).To(BeFalse())
})
+
+ t.Run("server side apply defaults to true with Helm4 defaults", func(t *testing.T) {
+ g := NewWithT(t)
+
+ // Save and restore UseHelm3Defaults
+ oldUseHelm3Defaults := UseHelm3Defaults
+ t.Cleanup(func() { UseHelm3Defaults = oldUseHelm3Defaults })
+ UseHelm3Defaults = false
+
+ obj := &v2.HelmRelease{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "install",
+ Namespace: "install-ns",
+ },
+ Spec: v2.HelmReleaseSpec{},
+ }
+
+ got := newInstall(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(BeTrue())
+ })
+
+ t.Run("server side apply defaults to false with UseHelm3Defaults", func(t *testing.T) {
+ g := NewWithT(t)
+
+ // Save and restore UseHelm3Defaults
+ oldUseHelm3Defaults := UseHelm3Defaults
+ t.Cleanup(func() { UseHelm3Defaults = oldUseHelm3Defaults })
+ UseHelm3Defaults = true
+
+ obj := &v2.HelmRelease{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "install",
+ Namespace: "install-ns",
+ },
+ Spec: v2.HelmReleaseSpec{},
+ }
+
+ got := newInstall(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(BeFalse())
+ })
+
+ t.Run("server side apply user specified true", func(t *testing.T) {
+ g := NewWithT(t)
+
+ // Save and restore UseHelm3Defaults
+ oldUseHelm3Defaults := UseHelm3Defaults
+ t.Cleanup(func() { UseHelm3Defaults = oldUseHelm3Defaults })
+ UseHelm3Defaults = true // default would be false
+
+ ssa := true
+ obj := &v2.HelmRelease{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "install",
+ Namespace: "install-ns",
+ },
+ Spec: v2.HelmReleaseSpec{
+ Install: &v2.Install{
+ ServerSideApply: &ssa,
+ },
+ },
+ }
+
+ got := newInstall(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(BeTrue())
+ })
+
+ t.Run("server side apply user specified false", func(t *testing.T) {
+ g := NewWithT(t)
+
+ // Save and restore UseHelm3Defaults
+ oldUseHelm3Defaults := UseHelm3Defaults
+ t.Cleanup(func() { UseHelm3Defaults = oldUseHelm3Defaults })
+ UseHelm3Defaults = false // default would be true
+
+ ssa := false
+ obj := &v2.HelmRelease{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "install",
+ Namespace: "install-ns",
+ },
+ Spec: v2.HelmReleaseSpec{
+ Install: &v2.Install{
+ ServerSideApply: &ssa,
+ },
+ },
+ }
+
+ got := newInstall(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(BeFalse())
+ })
}
diff --git a/internal/action/log.go b/internal/action/log.go
index 5caad3bf5..f3b364485 100644
--- a/internal/action/log.go
+++ b/internal/action/log.go
@@ -18,33 +18,166 @@ package action
import (
"container/ring"
+ "context"
+ "encoding/json"
"fmt"
+ "log/slog"
"strings"
"sync"
"time"
"github.com/go-logr/logr"
- helmaction "helm.sh/helm/v3/pkg/action"
-)
+ "sigs.k8s.io/controller-runtime/pkg/log"
-// DefaultLogBufferSize is the default size of the LogBuffer.
-const DefaultLogBufferSize = 5
+ "github.com/fluxcd/pkg/runtime/logger"
+)
// nowTS can be used to stub out time.Now() in tests.
var nowTS = time.Now
-// NewDebugLog returns an action.DebugLog that logs to the given logr.Logger.
-func NewDebugLog(log logr.Logger) helmaction.DebugLog {
- return func(format string, v ...interface{}) {
- log.Info(fmt.Sprintf(format, v...))
- }
+// NewTraceLogger returns an slog.Handler that logs to the logger from the context at Trace level.
+func NewTraceLogger(ctx context.Context) slog.Handler {
+ return newLogBuffer(ctx, logger.TraceLevel)
+}
+
+// NewDebugLogBuffer returns an slog.Handler that logs to the logger from the context at Debug level,
+// and also keeps a ring buffer of log messages.
+func NewDebugLogBuffer(ctx context.Context) *LogBuffer {
+ l := newLogBuffer(ctx, logger.DebugLevel)
+ l.buf = newLogRingBuffer(ctx)
+ return l
}
-// LogBuffer is a ring buffer that logs to a Helm action.DebugLog.
+// LogBuffer implements slog.Handler by logging to a
+// logr.Logger calling log.Info, and to a ring buffer
+// if level is Debug.
type LogBuffer struct {
- mu sync.RWMutex
- log helmaction.DebugLog
- buffer *ring.Ring
+ attrs []groupedAttr
+ group []string
+
+ // destinations
+ log logr.Logger
+ buf *logRingBuffer
+}
+
+// groupedAttr is an slog.Attr belonging to a group.
+type groupedAttr struct {
+ group []string
+ attr slog.Attr
+}
+
+// newLogBuffer creates a new LogBuffer.
+func newLogBuffer(ctx context.Context, level int) *LogBuffer {
+ return &LogBuffer{log: log.FromContext(ctx).V(level)}
+}
+
+// Appendf adds the log message to the ring buffer.
+func (l *LogBuffer) Appendf(format string, v ...any) {
+ if l != nil {
+ l.buf.Appendf(format, v...)
+ }
+}
+
+// Empty returns true if the buffer is empty.
+func (l *LogBuffer) Empty() bool {
+ return l == nil || l.buf.Empty()
+}
+
+// String returns the contents of the buffer as a string.
+func (l *LogBuffer) String() string {
+ if l == nil {
+ return ""
+ }
+ return l.buf.String()
+}
+
+// Enabled implements slog.Handler.
+func (l *LogBuffer) Enabled(context.Context, slog.Level) bool {
+ // We handle the level on the logr.Logger side.
+ return true
+}
+
+// Handle implements slog.Handler.
+func (l *LogBuffer) Handle(_ context.Context, r slog.Record) error {
+ // Prepare message based on the record level.
+ var msg string
+ switch r.Level {
+ case slog.LevelError:
+ msg = fmt.Sprintf("error: %s", r.Message)
+ case slog.LevelWarn:
+ msg = fmt.Sprintf("warning: %s", r.Message)
+ default:
+ msg = r.Message
+ }
+
+ // Collect record attributes.
+ slogAttrs := make([]slog.Attr, 0, r.NumAttrs())
+ r.Attrs(func(a slog.Attr) bool {
+ slogAttrs = append(slogAttrs, a)
+ return true
+ })
+ l = l.withAttrs(slogAttrs) // We intentionally update the method receiver here (it doesn't mutate the original).
+
+ // Build nested attribute map.
+ attrs := make(map[string]any)
+ for _, ga := range l.attrs {
+ target := attrs
+ for _, g := range ga.group {
+ next, ok := target[g].(map[string]any)
+ if !ok {
+ node := make(map[string]any)
+ target[g] = node
+ next = node
+ }
+ target = next
+ }
+ target[ga.attr.Key] = ga.attr.Value.Any()
+ }
+
+ // Sink to logger.
+ keysAndValues := make([]any, 0, len(attrs)*2)
+ for k, v := range attrs {
+ keysAndValues = append(keysAndValues, k, v)
+ }
+ l.log.Info(msg, keysAndValues...)
+
+ // Sink to buffer.
+ b, err := json.Marshal(attrs)
+ if err != nil {
+ l.buf.Appendf("%s", msg)
+ return err
+ }
+ l.buf.Appendf("%s: %s", msg, string(b))
+ return nil
+}
+
+// WithAttrs implements slog.Handler.
+func (l *LogBuffer) WithAttrs(attrs []slog.Attr) slog.Handler {
+ return l.withAttrs(attrs)
+}
+func (l *LogBuffer) withAttrs(attrs []slog.Attr) *LogBuffer {
+ nl := *l
+ nl.attrs = make([]groupedAttr, 0, len(l.attrs)+len(attrs))
+ nl.attrs = append(nl.attrs, l.attrs...)
+ for _, attr := range attrs {
+ nl.attrs = append(nl.attrs, groupedAttr{
+ group: l.group,
+ attr: attr,
+ })
+ }
+ return &nl
+}
+
+// WithGroup implements slog.Handler.
+func (l *LogBuffer) WithGroup(name string) slog.Handler {
+ if name == "" {
+ return l
+ }
+ nl := *l
+ nl.group = make([]string, 0, len(l.group)+1)
+ nl.group = append(nl.group, l.group...)
+ nl.group = append(nl.group, name)
+ return &nl
}
// logLine is a log message with a timestamp.
@@ -76,49 +209,63 @@ func (l *logLine) String() string {
return msg
}
-// NewLogBuffer creates a new LogBuffer with the given log function
-// and a buffer of the given size. If size <= 0, it defaults to
-// DefaultLogBufferSize.
-func NewLogBuffer(log helmaction.DebugLog, size int) *LogBuffer {
- if size <= 0 {
- size = DefaultLogBufferSize
- }
- return &LogBuffer{
- log: log,
- buffer: ring.New(size),
+// logRingBuffer is a ring buffer for logLine entries.
+type logRingBuffer struct {
+ buf *ring.Ring
+ mu sync.RWMutex
+}
+
+// ringBufferSizeContextKey is the context key for the ring buffer size.
+// Used only for testing logRingBuffer.
+type ringBufferSizeContextKey struct{}
+
+// newLogRingBuffer creates a new logRingBuffer that logs to the logger from the context at Debug level.
+func newLogRingBuffer(ctx context.Context) *logRingBuffer {
+ size := 10
+ if v := ctx.Value(ringBufferSizeContextKey{}); v != nil {
+ size = v.(int)
}
+ return &logRingBuffer{buf: ring.New(size)}
}
-// Log adds the log message to the ring buffer before calling the actual log
+// Appendf adds the log message to the ring buffer before calling the actual log
// function. It is safe to call this function from multiple goroutines.
-func (l *LogBuffer) Log(format string, v ...interface{}) {
+func (l *logRingBuffer) Appendf(format string, v ...any) {
+ if l == nil {
+ return
+ }
+
l.mu.Lock()
// Filter out duplicate log lines, this happens for example when
// Helm is waiting on workloads to become ready.
msg := fmt.Sprintf(format, v...)
- prev, ok := l.buffer.Prev().Value.(*logLine)
+ prev, ok := l.buf.Prev().Value.(*logLine)
if ok && prev.msg == msg {
prev.count++
prev.lastTS = nowTS().UTC()
- l.buffer.Prev().Value = prev
+ l.buf.Prev().Value = prev
}
if !ok || prev.msg != msg {
- l.buffer.Value = &logLine{
+ l.buf.Value = &logLine{
ts: nowTS().UTC(),
msg: msg,
}
- l.buffer = l.buffer.Next()
+ l.buf = l.buf.Next()
}
l.mu.Unlock()
- l.log(format, v...)
}
-// Len returns the count of non-empty values in the buffer.
-func (l *LogBuffer) Len() (count int) {
+// Empty returns true if the buffer is empty.
+func (l *logRingBuffer) Empty() bool {
+ if l == nil {
+ return true
+ }
+
+ var count int
l.mu.RLock()
- l.buffer.Do(func(s interface{}) {
+ l.buf.Do(func(s any) {
if s == nil {
return
}
@@ -129,21 +276,18 @@ func (l *LogBuffer) Len() (count int) {
count++
})
l.mu.RUnlock()
- return
-}
-
-// Reset clears the buffer.
-func (l *LogBuffer) Reset() {
- l.mu.Lock()
- l.buffer = ring.New(l.buffer.Len())
- l.mu.Unlock()
+ return count == 0
}
// String returns the contents of the buffer as a string.
-func (l *LogBuffer) String() string {
+func (l *logRingBuffer) String() string {
+ if l == nil {
+ return ""
+ }
+
var str string
l.mu.RLock()
- l.buffer.Do(func(s interface{}) {
+ l.buf.Do(func(s any) {
if s == nil {
return
}
diff --git a/internal/action/log_test.go b/internal/action/log_test.go
index 16aab7d5f..c60bfd316 100644
--- a/internal/action/log_test.go
+++ b/internal/action/log_test.go
@@ -17,124 +17,451 @@ limitations under the License.
package action
import (
+ "context"
+ "encoding/json"
"fmt"
+ "log/slog"
+ "strings"
"testing"
"time"
"github.com/go-logr/logr"
+ . "github.com/onsi/gomega"
+ "sigs.k8s.io/controller-runtime/pkg/log"
)
-func TestLogBuffer_Log(t *testing.T) {
- nowTS = stubNowTS
+// stubNowTS returns a fixed time for testing purposes.
+func stubNowTS() time.Time {
+ return time.Date(2016, 2, 18, 12, 24, 5, 12345600, time.UTC)
+}
- tests := []struct {
- name string
- size int
- fill []string
- wantCount int
- want string
- }{
- {name: "log", size: 2, fill: []string{"a", "b", "c"}, wantCount: 3, want: fmt.Sprintf("%[1]s: b\n%[1]s: c", stubNowTS().Format(time.RFC3339Nano))},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- var count int
- l := NewLogBuffer(func(format string, v ...interface{}) {
- count++
- }, tt.size)
- for _, v := range tt.fill {
- l.Log("%s", v)
- }
- if count != tt.wantCount {
- t.Errorf("Inner Log() called %v times, want %v", count, tt.wantCount)
- }
- if got := l.String(); got != tt.want {
- t.Errorf("String() = %v, want %v", got, tt.want)
- }
- })
- }
+// stubNowTS2 returns a different fixed time for testing duplicate log line timestamps.
+func stubNowTS2() time.Time {
+ return time.Date(2016, 2, 18, 12, 24, 6, 12345600, time.UTC)
}
-func TestLogBuffer_Len(t *testing.T) {
- tests := []struct {
+func Test_logLine_String(t *testing.T) {
+ ts := stubNowTS()
+ ts2 := stubNowTS2()
+
+ for _, tt := range []struct {
name string
- size int
- fill []string
- want int
+ line *logLine
+ want string
}{
- {name: "empty buffer", fill: []string{}, want: 0},
- {name: "filled buffer", size: 2, fill: []string{"a", "b"}, want: 2},
- {name: "half full buffer", size: 4, fill: []string{"a", "b"}, want: 2},
- }
- for _, tt := range tests {
+ {
+ name: "nil logLine",
+ line: nil,
+ want: "",
+ },
+ {
+ name: "empty message",
+ line: &logLine{ts: ts, msg: ""},
+ want: "",
+ },
+ {
+ name: "simple message",
+ line: &logLine{ts: ts, msg: "test message"},
+ want: fmt.Sprintf("%s: test message", ts.Format(time.RFC3339Nano)),
+ },
+ {
+ name: "message with one duplicate",
+ line: &logLine{ts: ts, lastTS: ts2, msg: "duplicate message", count: 1},
+ want: fmt.Sprintf("%s: duplicate message\n%s: duplicate message", ts.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano)),
+ },
+ {
+ name: "message with two duplicates",
+ line: &logLine{ts: ts, lastTS: ts2, msg: "duplicate message", count: 2},
+ want: fmt.Sprintf("%s: duplicate message\n%s: duplicate message (1 duplicate line omitted)", ts.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano)),
+ },
+ {
+ name: "message with three duplicates",
+ line: &logLine{ts: ts, lastTS: ts2, msg: "duplicate message", count: 3},
+ want: fmt.Sprintf("%s: duplicate message\n%s: duplicate message (2 duplicate lines omitted)", ts.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano)),
+ },
+ } {
t.Run(tt.name, func(t *testing.T) {
- l := NewLogBuffer(NewDebugLog(logr.Discard()), tt.size)
- for _, v := range tt.fill {
- l.Log("%s", v)
- }
- if got := l.Len(); got != tt.want {
- t.Errorf("String() = %v, want %v", got, tt.want)
- }
+ g := NewWithT(t)
+ g.Expect(tt.line.String()).To(Equal(tt.want))
})
}
}
-func TestLogBuffer_Reset(t *testing.T) {
- bufferSize := 10
- l := NewLogBuffer(NewDebugLog(logr.Discard()), bufferSize)
+func Test_logRingBuffer_Appendf(t *testing.T) {
+ origNowTS := nowTS
+ defer func() { nowTS = origNowTS }()
+ nowTS = stubNowTS
- if got := l.buffer.Len(); got != bufferSize {
- t.Errorf("Len() = %v, want %v", got, bufferSize)
- }
+ t.Run("nil buffer is safe to call", func(t *testing.T) {
+ g := NewWithT(t)
+ var l *logRingBuffer
+ g.Expect(func() { l.Appendf("test") }).NotTo(Panic())
+ })
- for _, v := range []string{"a", "b", "c"} {
- l.Log("%s", v)
- }
+ t.Run("appends messages to buffer", func(t *testing.T) {
+ g := NewWithT(t)
+ ctx := context.WithValue(context.Background(), ringBufferSizeContextKey{}, 3)
+ l := newLogRingBuffer(ctx)
- if got := l.String(); got == "" {
- t.Errorf("String() = empty")
- }
+ l.Appendf("message %d", 1)
+ l.Appendf("message %d", 2)
- l.Reset()
+ want := fmt.Sprintf("%[1]s: message 1\n%[1]s: message 2", stubNowTS().Format(time.RFC3339Nano))
+ g.Expect(l.String()).To(Equal(want))
+ })
- if got := l.buffer.Len(); got != bufferSize {
- t.Errorf("Len() = %v after Reset(), want %v", got, bufferSize)
- }
- if got := l.String(); got != "" {
- t.Errorf("String() != empty after Reset()")
- }
+ t.Run("handles duplicate messages", func(t *testing.T) {
+ g := NewWithT(t)
+ ctx := context.WithValue(context.Background(), ringBufferSizeContextKey{}, 5)
+ l := newLogRingBuffer(ctx)
+
+ l.Appendf("same message")
+ l.Appendf("same message")
+ l.Appendf("same message")
+
+ want := fmt.Sprintf("%[1]s: same message\n%[1]s: same message (1 duplicate line omitted)", stubNowTS().Format(time.RFC3339Nano))
+ g.Expect(l.String()).To(Equal(want))
+ })
+
+ t.Run("ring buffer wraps around", func(t *testing.T) {
+ g := NewWithT(t)
+ ctx := context.WithValue(context.Background(), ringBufferSizeContextKey{}, 2)
+ l := newLogRingBuffer(ctx)
+
+ l.Appendf("a")
+ l.Appendf("b")
+ l.Appendf("c")
+
+ want := fmt.Sprintf("%[1]s: b\n%[1]s: c", stubNowTS().Format(time.RFC3339Nano))
+ g.Expect(l.String()).To(Equal(want))
+ })
}
-func TestLogBuffer_String(t *testing.T) {
+func Test_logRingBuffer_Empty(t *testing.T) {
+ t.Run("nil buffer is empty", func(t *testing.T) {
+ g := NewWithT(t)
+ var l *logRingBuffer
+ g.Expect(l.Empty()).To(BeTrue())
+ })
+
+ t.Run("new buffer is empty", func(t *testing.T) {
+ g := NewWithT(t)
+ ctx := context.WithValue(context.Background(), ringBufferSizeContextKey{}, 5)
+ l := newLogRingBuffer(ctx)
+ g.Expect(l.Empty()).To(BeTrue())
+ })
+
+ t.Run("buffer with entries is not empty", func(t *testing.T) {
+ g := NewWithT(t)
+ ctx := context.WithValue(context.Background(), ringBufferSizeContextKey{}, 5)
+ l := newLogRingBuffer(ctx)
+ l.Appendf("test message")
+ g.Expect(l.Empty()).To(BeFalse())
+ })
+}
+
+func Test_logRingBuffer_String(t *testing.T) {
+ origNowTS := nowTS
+ defer func() { nowTS = origNowTS }()
nowTS = stubNowTS
- tests := []struct {
- name string
- size int
- fill []string
- want string
- }{
- {name: "empty buffer", fill: []string{}, want: ""},
- {name: "filled buffer", size: 2, fill: []string{"a", "b", "c"}, want: fmt.Sprintf("%[1]s: b\n%[1]s: c", stubNowTS().Format(time.RFC3339Nano))},
- {name: "duplicate buffer items", fill: []string{"b", "b"}, want: fmt.Sprintf("%[1]s: b\n%[1]s: b", stubNowTS().Format(time.RFC3339Nano))},
- {name: "duplicate buffer items", fill: []string{"b", "b", "b"}, want: fmt.Sprintf("%[1]s: b\n%[1]s: b (1 duplicate line omitted)", stubNowTS().Format(time.RFC3339Nano))},
- {name: "duplicate buffer items", fill: []string{"b", "b", "b", "b"}, want: fmt.Sprintf("%[1]s: b\n%[1]s: b (2 duplicate lines omitted)", stubNowTS().Format(time.RFC3339Nano))},
- {name: "duplicate buffer items", fill: []string{"a", "b", "b", "b", "c", "c"}, want: fmt.Sprintf("%[1]s: a\n%[1]s: b\n%[1]s: b (1 duplicate line omitted)\n%[1]s: c\n%[1]s: c", stubNowTS().Format(time.RFC3339Nano))},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- l := NewLogBuffer(NewDebugLog(logr.Discard()), tt.size)
- for _, v := range tt.fill {
- l.Log("%s", v)
- }
- if got := l.String(); got != tt.want {
- t.Errorf("String() = %v, want %v", got, tt.want)
- }
- })
- }
+ t.Run("nil buffer returns empty string", func(t *testing.T) {
+ g := NewWithT(t)
+ var l *logRingBuffer
+ g.Expect(l.String()).To(Equal(""))
+ })
+
+ t.Run("empty buffer returns empty string", func(t *testing.T) {
+ g := NewWithT(t)
+ ctx := context.WithValue(context.Background(), ringBufferSizeContextKey{}, 5)
+ l := newLogRingBuffer(ctx)
+ g.Expect(l.String()).To(Equal(""))
+ })
+
+ t.Run("returns all messages joined by newlines", func(t *testing.T) {
+ g := NewWithT(t)
+ ctx := context.WithValue(context.Background(), ringBufferSizeContextKey{}, 5)
+ l := newLogRingBuffer(ctx)
+
+ l.Appendf("first")
+ l.Appendf("second")
+ l.Appendf("third")
+
+ want := fmt.Sprintf("%[1]s: first\n%[1]s: second\n%[1]s: third", stubNowTS().Format(time.RFC3339Nano))
+ g.Expect(l.String()).To(Equal(want))
+ })
+
+ t.Run("handles mixed duplicates and unique messages", func(t *testing.T) {
+ g := NewWithT(t)
+ ctx := context.WithValue(context.Background(), ringBufferSizeContextKey{}, 10)
+ l := newLogRingBuffer(ctx)
+
+ l.Appendf("a")
+ l.Appendf("b")
+ l.Appendf("b")
+ l.Appendf("b")
+ l.Appendf("c")
+ l.Appendf("c")
+
+ want := fmt.Sprintf("%[1]s: a\n%[1]s: b\n%[1]s: b (1 duplicate line omitted)\n%[1]s: c\n%[1]s: c", stubNowTS().Format(time.RFC3339Nano))
+ g.Expect(l.String()).To(Equal(want))
+ })
}
-// stubNowTS returns a fixed time for testing purposes.
-func stubNowTS() time.Time {
- return time.Date(2016, 2, 18, 12, 24, 5, 12345600, time.UTC)
+func TestLogBuffer_Enabled(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ l := newLogBuffer(ctx, 0)
+
+ g.Expect(l.Enabled(ctx, slog.LevelDebug)).To(BeTrue())
+ g.Expect(l.Enabled(ctx, slog.LevelInfo)).To(BeTrue())
+ g.Expect(l.Enabled(ctx, slog.LevelWarn)).To(BeTrue())
+ g.Expect(l.Enabled(ctx, slog.LevelError)).To(BeTrue())
+}
+
+func TestLogBuffer_Handle(t *testing.T) {
+ origNowTS := nowTS
+ defer func() { nowTS = origNowTS }()
+ nowTS = stubNowTS
+
+ t.Run("handles info level message", func(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ l := NewDebugLogBuffer(ctx)
+
+ record := slog.NewRecord(time.Now(), slog.LevelInfo, "info message", 0)
+ err := l.Handle(ctx, record)
+
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(l.String()).To(ContainSubstring("info message"))
+ })
+
+ t.Run("handles error level message with prefix", func(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ l := NewDebugLogBuffer(ctx)
+
+ record := slog.NewRecord(time.Now(), slog.LevelError, "error occurred", 0)
+ err := l.Handle(ctx, record)
+
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(l.String()).To(ContainSubstring("error: error occurred"))
+ })
+
+ t.Run("handles warning level message with prefix", func(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ l := NewDebugLogBuffer(ctx)
+
+ record := slog.NewRecord(time.Now(), slog.LevelWarn, "warning issued", 0)
+ err := l.Handle(ctx, record)
+
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(l.String()).To(ContainSubstring("warning: warning issued"))
+ })
+
+ t.Run("handles message with attributes", func(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ l := NewDebugLogBuffer(ctx)
+
+ record := slog.NewRecord(time.Now(), slog.LevelInfo, "test message", 0)
+ record.AddAttrs(slog.String("key", "value"))
+ err := l.Handle(ctx, record)
+
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(l.String()).To(ContainSubstring(`"key":"value"`))
+ })
+}
+
+func TestLogBuffer_WithAttrs(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ l := NewDebugLogBuffer(ctx)
+
+ handler := l.WithAttrs([]slog.Attr{slog.String("attr1", "val1")})
+ g.Expect(handler).ToNot(BeNil())
+
+ lb, ok := handler.(*LogBuffer)
+ g.Expect(ok).To(BeTrue())
+ g.Expect(lb.attrs).To(HaveLen(1))
+ g.Expect(lb.attrs[0].attr.Key).To(Equal("attr1"))
+ g.Expect(lb.attrs[0].attr.Value.String()).To(Equal("val1"))
+}
+
+func TestLogBuffer_WithGroup(t *testing.T) {
+ t.Run("returns same handler for empty group name", func(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ l := NewDebugLogBuffer(ctx)
+
+ handler := l.WithGroup("")
+ g.Expect(handler).To(Equal(l))
+ })
+
+ t.Run("creates new handler with group", func(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ l := NewDebugLogBuffer(ctx)
+
+ handler := l.WithGroup("mygroup")
+ g.Expect(handler).ToNot(Equal(l))
+
+ lb, ok := handler.(*LogBuffer)
+ g.Expect(ok).To(BeTrue())
+ g.Expect(lb.group).To(Equal([]string{"mygroup"}))
+ })
+
+ t.Run("supports nested groups", func(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ l := NewDebugLogBuffer(ctx)
+
+ handler := l.WithGroup("outer").WithGroup("inner")
+ lb, ok := handler.(*LogBuffer)
+ g.Expect(ok).To(BeTrue())
+ g.Expect(lb.group).To(Equal([]string{"outer", "inner"}))
+ })
+}
+
+func TestLogBuffer_HandleWithMixedAttrsAndGroups(t *testing.T) {
+ origNowTS := nowTS
+ defer func() { nowTS = origNowTS }()
+ nowTS = stubNowTS
+
+ t.Run("attrs before group remain ungrouped", func(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ l := NewDebugLogBuffer(ctx)
+
+ // Add attr first, then group with its own attr
+ handler := l.WithAttrs([]slog.Attr{slog.String("root", "val1")}).
+ WithGroup("nested").
+ WithAttrs([]slog.Attr{slog.String("inner", "val2")})
+
+ record := slog.NewRecord(time.Now(), slog.LevelInfo, "mixed test", 0)
+ lb, ok := handler.(*LogBuffer)
+ g.Expect(ok).To(BeTrue())
+
+ err := lb.Handle(ctx, record)
+ g.Expect(err).ToNot(HaveOccurred())
+
+ output := lb.String()
+ g.Expect(output).To(ContainSubstring("mixed test"))
+
+ // Extract and parse JSON from output
+ jsonStart := strings.Index(output, "{")
+ g.Expect(jsonStart).To(BeNumerically(">=", 0), "expected JSON in output")
+ var attrs map[string]any
+ err = json.Unmarshal([]byte(output[jsonStart:]), &attrs)
+ g.Expect(err).ToNot(HaveOccurred())
+
+ // root attr should be at top level
+ g.Expect(attrs).To(HaveKeyWithValue("root", "val1"))
+ // inner attr should be nested under "nested" group
+ g.Expect(attrs).To(HaveKey("nested"))
+ nested, ok := attrs["nested"].(map[string]any)
+ g.Expect(ok).To(BeTrue(), "nested should be a map")
+ g.Expect(nested).To(HaveKeyWithValue("inner", "val2"))
+ })
+
+ t.Run("alternating attrs and groups", func(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ l := NewDebugLogBuffer(ctx)
+
+ // Create a chain: attr -> group -> attr -> group -> attr
+ handler := l.
+ WithAttrs([]slog.Attr{slog.String("level0", "a")}).
+ WithGroup("g1").
+ WithAttrs([]slog.Attr{slog.String("level1", "b")}).
+ WithGroup("g2").
+ WithAttrs([]slog.Attr{slog.String("level2", "c")})
+
+ record := slog.NewRecord(time.Now(), slog.LevelInfo, "alternating test", 0)
+ lb, ok := handler.(*LogBuffer)
+ g.Expect(ok).To(BeTrue())
+
+ err := lb.Handle(ctx, record)
+ g.Expect(err).ToNot(HaveOccurred())
+
+ output := lb.String()
+ g.Expect(output).To(ContainSubstring("alternating test"))
+
+ // Extract and parse JSON from output
+ jsonStart := strings.Index(output, "{")
+ g.Expect(jsonStart).To(BeNumerically(">=", 0), "expected JSON in output")
+ var attrs map[string]any
+ err = json.Unmarshal([]byte(output[jsonStart:]), &attrs)
+ g.Expect(err).ToNot(HaveOccurred())
+
+ // level0 should be at root
+ g.Expect(attrs).To(HaveKeyWithValue("level0", "a"))
+
+ // level1 should be under g1
+ g.Expect(attrs).To(HaveKey("g1"))
+ g1, ok := attrs["g1"].(map[string]any)
+ g.Expect(ok).To(BeTrue(), "g1 should be a map")
+ g.Expect(g1).To(HaveKeyWithValue("level1", "b"))
+
+ // level2 should be under g1.g2
+ g.Expect(g1).To(HaveKey("g2"))
+ g2, ok := g1["g2"].(map[string]any)
+ g.Expect(ok).To(BeTrue(), "g2 should be a map")
+ g.Expect(g2).To(HaveKeyWithValue("level2", "c"))
+ })
+}
+
+func TestNewTraceLogger(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ handler := NewTraceLogger(ctx)
+
+ g.Expect(handler).ToNot(BeNil())
+
+ lb, ok := handler.(*LogBuffer)
+ g.Expect(ok).To(BeTrue())
+ g.Expect(lb.buf).To(BeNil())
+}
+
+func TestNewDebugLogBuffer(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ handler := NewDebugLogBuffer(ctx)
+
+ g.Expect(handler).ToNot(BeNil())
+ g.Expect(handler.buf).ToNot(BeNil())
+}
+
+func Test_newLogRingBuffer_defaultSize(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := context.Background()
+ l := newLogRingBuffer(ctx)
+
+ g.Expect(l).ToNot(BeNil())
+ g.Expect(l.buf.Len()).To(Equal(10))
+}
+
+func Test_newLogRingBuffer_customSize(t *testing.T) {
+ g := NewWithT(t)
+
+ ctx := context.WithValue(context.Background(), ringBufferSizeContextKey{}, 20)
+ l := newLogRingBuffer(ctx)
+
+ g.Expect(l).ToNot(BeNil())
+ g.Expect(l.buf.Len()).To(Equal(20))
}
diff --git a/internal/action/reset.go b/internal/action/reset.go
index 556700ac5..5bbbe0fca 100644
--- a/internal/action/reset.go
+++ b/internal/action/reset.go
@@ -18,11 +18,12 @@ package action
import (
"github.com/opencontainers/go-digest"
- "helm.sh/helm/v3/pkg/chart"
- "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
- v2 "github.com/fluxcd/helm-controller/api/v2"
intchartutil "github.com/fluxcd/pkg/chartutil"
+
+ v2 "github.com/fluxcd/helm-controller/api/v2"
)
const (
@@ -38,7 +39,7 @@ const (
// changed in a way that indicates that a new attempt should be made.
// For example, a change in generation, chart version, or values.
// If no change is detected, an empty string is returned along with false.
-func MustResetFailures(obj *v2.HelmRelease, chart *chart.Metadata, values chartutil.Values) (string, bool) {
+func MustResetFailures(obj *v2.HelmRelease, chart *chart.Metadata, values common.Values) (string, bool) {
// Always check if a reset is requested.
// This is done first, so that the HelmReleaseStatus.LastHandledResetAt
// field is updated even if the reset request is not handled due to other
diff --git a/internal/action/reset_test.go b/internal/action/reset_test.go
index 1aa3ee877..4b22392e0 100644
--- a/internal/action/reset_test.go
+++ b/internal/action/reset_test.go
@@ -20,8 +20,8 @@ import (
"testing"
. "github.com/onsi/gomega"
- "helm.sh/helm/v3/pkg/chart"
- "helm.sh/helm/v3/pkg/chartutil"
+ "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
@@ -34,7 +34,7 @@ func TestMustResetFailures(t *testing.T) {
name string
obj *v2.HelmRelease
chart *chart.Metadata
- values chartutil.Values
+ values common.Values
want bool
wantReason string
}{
@@ -83,7 +83,7 @@ func TestMustResetFailures(t *testing.T) {
chart: &chart.Metadata{
Version: "1.0.0",
},
- values: chartutil.Values{
+ values: common.Values{
"foo": "bar",
},
want: true,
@@ -104,7 +104,7 @@ func TestMustResetFailures(t *testing.T) {
chart: &chart.Metadata{
Version: "1.0.0",
},
- values: chartutil.Values{
+ values: common.Values{
"foo": "bar",
},
want: true,
@@ -129,7 +129,7 @@ func TestMustResetFailures(t *testing.T) {
chart: &chart.Metadata{
Version: "1.0.0",
},
- values: chartutil.Values{
+ values: common.Values{
"foo": "bar",
},
want: true,
@@ -150,7 +150,7 @@ func TestMustResetFailures(t *testing.T) {
chart: &chart.Metadata{
Version: "1.0.0",
},
- values: chartutil.Values{
+ values: common.Values{
"foo": "bar",
},
want: false,
diff --git a/internal/action/rollback.go b/internal/action/rollback.go
index 34d77a97b..7b4c7f379 100644
--- a/internal/action/rollback.go
+++ b/internal/action/rollback.go
@@ -17,7 +17,10 @@ limitations under the License.
package action
import (
- helmaction "helm.sh/helm/v3/pkg/action"
+ "fmt"
+
+ helmaction "helm.sh/helm/v4/pkg/action"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
)
@@ -35,16 +38,9 @@ func RollbackToVersion(version int) RollbackOption {
}
}
-// RollbackDryRun returns a RollbackOption which enables the dry-run setting.
-func RollbackDryRun() RollbackOption {
- return func(rollback *helmaction.Rollback) {
- rollback.DryRun = true
- }
-}
-
// Rollback runs the Helm rollback action with the provided config. Targeting
// a specific release or enabling dry-run is possible by providing
-// RollbackToVersion and/or RollbackDryRun as options.
+// RollbackToVersion as option.
//
// It does not determine if there is a desire to perform the action, this is
// expected to be done by the caller. In addition, it does not take note of the
@@ -52,18 +48,71 @@ func RollbackDryRun() RollbackOption {
// storage.ObserveFunc, which provides superior access to Helm storage writes.
func Rollback(config *helmaction.Configuration, obj *v2.HelmRelease, releaseName string, opts ...RollbackOption) error {
rollback := newRollback(config, obj, opts)
+
+ // Resolve "auto" server-side apply setting.
+ // We need to copy this code from Helm because we need to set ForceConflicts
+ // based on the resolved value, since we always force conflicts on server-side apply
+ // (Helm does not).
+ serverSideApply := rollback.ServerSideApply == "true"
+ if rollback.ServerSideApply == "auto" {
+ currentRelease, err := config.Releases.Last(releaseName)
+ if err != nil {
+ return err
+ }
+ currentReleaseTyped, ok := currentRelease.(*helmrelease.Release)
+ if !ok {
+ return fmt.Errorf("only the Chart API v2 is supported")
+ }
+ previousVersion := rollback.Version
+ if rollback.Version == 0 {
+ previousVersion = currentReleaseTyped.Version - 1
+ }
+ historyReleases, err := config.Releases.History(releaseName)
+ if err != nil {
+ return err
+ }
+ previousVersionExist := false
+ for _, rlsr := range historyReleases {
+ rlsrTyped, ok := rlsr.(*helmrelease.Release)
+ if !ok {
+ return fmt.Errorf("only the Chart API v2 is supported")
+ }
+ if previousVersion == rlsrTyped.Version {
+ previousVersionExist = true
+ break
+ }
+ }
+ if !previousVersionExist {
+ return fmt.Errorf("release has no %d version", previousVersion)
+ }
+ previousRelease, err := config.Releases.Get(releaseName, previousVersion)
+ if err != nil {
+ return err
+ }
+ previousReleaseTyped, ok := previousRelease.(*helmrelease.Release)
+ if !ok {
+ return fmt.Errorf("only the Chart API v2 is supported")
+ }
+ serverSideApply = previousReleaseTyped.ApplyMethod == "ssa"
+ rollback.ServerSideApply = fmt.Sprint(serverSideApply)
+ }
+ rollback.ForceConflicts = serverSideApply // We always force conflicts on server-side apply.
+
return rollback.Run(releaseName)
}
func newRollback(config *helmaction.Configuration, obj *v2.HelmRelease, opts []RollbackOption) *helmaction.Rollback {
rollback := helmaction.NewRollback(config)
+ rollback.ServerSideApply = "auto" // This must be the rollback default regardless of UseHelm3Defaults.
+ if ssa := obj.GetRollback().ServerSideApply; ssa != "" {
+ rollback.ServerSideApply = toHelmSSAValue(ssa)
+ }
rollback.Timeout = obj.GetRollback().GetTimeout(obj.GetTimeout()).Duration
- rollback.Wait = !obj.GetRollback().DisableWait
+ rollback.WaitStrategy = getWaitStrategy(obj.GetRollback())
rollback.WaitForJobs = !obj.GetRollback().DisableWaitForJobs
rollback.DisableHooks = obj.GetRollback().DisableHooks
- rollback.Force = obj.GetRollback().Force
- rollback.Recreate = obj.GetRollback().Recreate
+ rollback.ForceReplace = obj.GetRollback().Force
rollback.CleanupOnFail = obj.GetRollback().CleanupOnFail
rollback.MaxHistory = obj.GetMaxHistory()
diff --git a/internal/action/rollback_test.go b/internal/action/rollback_test.go
index 267cc53dc..0b39a8cfb 100644
--- a/internal/action/rollback_test.go
+++ b/internal/action/rollback_test.go
@@ -21,7 +21,7 @@ import (
"time"
. "github.com/onsi/gomega"
- helmaction "helm.sh/helm/v3/pkg/action"
+ helmaction "helm.sh/helm/v4/pkg/action"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
@@ -48,7 +48,7 @@ func Test_newRollback(t *testing.T) {
got := newRollback(&helmaction.Configuration{}, obj, nil)
g.Expect(got).ToNot(BeNil())
g.Expect(got.Timeout).To(Equal(obj.Spec.Rollback.Timeout.Duration))
- g.Expect(got.Force).To(Equal(obj.Spec.Rollback.Force))
+ g.Expect(got.ForceReplace).To(Equal(obj.Spec.Rollback.Force))
g.Expect(got.MaxHistory).To(Equal(obj.GetMaxHistory()))
})
@@ -102,11 +102,64 @@ func Test_newRollback(t *testing.T) {
rollback.CleanupOnFail = true
},
func(rollback *helmaction.Rollback) {
- rollback.DryRun = true
+ rollback.DryRunStrategy = helmaction.DryRunClient
},
})
g.Expect(got).ToNot(BeNil())
g.Expect(got.CleanupOnFail).To(BeTrue())
- g.Expect(got.DryRun).To(BeTrue())
+ g.Expect(got.DryRunStrategy).To(Equal(helmaction.DryRunClient))
+ })
+
+ t.Run("server side apply is auto regardless of UseHelm3Defaults", func(t *testing.T) {
+ g := NewWithT(t)
+
+ obj := &v2.HelmRelease{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "rollback",
+ Namespace: "rollback-ns",
+ },
+ Spec: v2.HelmReleaseSpec{},
+ }
+
+ // Save and restore UseHelm3Defaults
+ oldUseHelm3Defaults := UseHelm3Defaults
+ t.Cleanup(func() { UseHelm3Defaults = oldUseHelm3Defaults })
+
+ // Test with UseHelm3Defaults = false
+ UseHelm3Defaults = false
+ got := newRollback(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(Equal("auto"))
+
+ // Test with UseHelm3Defaults = true
+ UseHelm3Defaults = true
+ got = newRollback(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(Equal("auto"))
+ })
+
+ t.Run("server side apply user specified", func(t *testing.T) {
+ g := NewWithT(t)
+
+ obj := &v2.HelmRelease{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "rollback",
+ Namespace: "rollback-ns",
+ },
+ Spec: v2.HelmReleaseSpec{
+ Rollback: &v2.Rollback{
+ ServerSideApply: v2.ServerSideApplyEnabled,
+ },
+ },
+ }
+
+ got := newRollback(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(Equal("true"))
+
+ obj.Spec.Rollback.ServerSideApply = v2.ServerSideApplyDisabled
+ got = newRollback(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(Equal("false"))
})
}
diff --git a/internal/action/ssa.go b/internal/action/ssa.go
new file mode 100644
index 000000000..f413a2393
--- /dev/null
+++ b/internal/action/ssa.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2026 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ v2 "github.com/fluxcd/helm-controller/api/v2"
+)
+
+// toHelmSSAValue converts the API ServerSideApplyMode to the Helm SDK value.
+// The API uses "enabled"/"disabled"/"auto" to avoid YAML boolean auto-conversion,
+// while the Helm SDK expects "true"/"false"/"auto".
+func toHelmSSAValue(mode v2.ServerSideApplyMode) string {
+ switch mode {
+ case v2.ServerSideApplyEnabled:
+ return "true"
+ case v2.ServerSideApplyDisabled:
+ return "false"
+ default:
+ return string(mode)
+ }
+}
diff --git a/internal/action/test.go b/internal/action/test.go
index a469443b8..ab08d664b 100644
--- a/internal/action/test.go
+++ b/internal/action/test.go
@@ -18,9 +18,10 @@ package action
import (
"context"
+ "fmt"
- helmaction "helm.sh/helm/v3/pkg/action"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmaction "helm.sh/helm/v4/pkg/action"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
)
@@ -40,7 +41,16 @@ type TestOption func(action *helmaction.ReleaseTesting)
// storage.ObserveFunc, which provides superior access to Helm storage writes.
func Test(_ context.Context, config *helmaction.Configuration, obj *v2.HelmRelease, opts ...TestOption) (*helmrelease.Release, error) {
test := newTest(config, obj, opts)
- return test.Run(obj.GetReleaseName())
+ rlsr, shutdownFunc, err := test.Run(obj.GetReleaseName())
+ defer shutdownFunc() // A non-nil shutdownFunc is always returned.
+ if err != nil {
+ return nil, err
+ }
+ rlsrTyped, ok := rlsr.(*helmrelease.Release)
+ if !ok {
+ return nil, fmt.Errorf("only the Chart API v2 is supported")
+ }
+ return rlsrTyped, err
}
func newTest(config *helmaction.Configuration, obj *v2.HelmRelease, opts []TestOption) *helmaction.ReleaseTesting {
diff --git a/internal/action/test_test.go b/internal/action/test_test.go
index 03222b1c0..37f13e01c 100644
--- a/internal/action/test_test.go
+++ b/internal/action/test_test.go
@@ -21,7 +21,7 @@ import (
"time"
. "github.com/onsi/gomega"
- helmaction "helm.sh/helm/v3/pkg/action"
+ helmaction "helm.sh/helm/v4/pkg/action"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
diff --git a/internal/action/uninstall.go b/internal/action/uninstall.go
index 8afbc4e6f..3e55edd7e 100644
--- a/internal/action/uninstall.go
+++ b/internal/action/uninstall.go
@@ -19,8 +19,8 @@ package action
import (
"context"
- helmaction "helm.sh/helm/v3/pkg/action"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmaction "helm.sh/helm/v4/pkg/action"
+ helmrelease "helm.sh/helm/v4/pkg/release"
v2 "github.com/fluxcd/helm-controller/api/v2"
)
@@ -49,7 +49,7 @@ func newUninstall(config *helmaction.Configuration, obj *v2.HelmRelease, opts []
uninstall.Timeout = obj.GetUninstall().GetTimeout(obj.GetTimeout()).Duration
uninstall.DisableHooks = obj.GetUninstall().DisableHooks
uninstall.KeepHistory = obj.GetUninstall().KeepHistory
- uninstall.Wait = !obj.GetUninstall().DisableWait
+ uninstall.WaitStrategy = getWaitStrategy(obj.GetUninstall())
uninstall.DeletionPropagation = obj.GetUninstall().GetDeletionPropagation()
for _, opt := range opts {
diff --git a/internal/action/uninstall_test.go b/internal/action/uninstall_test.go
index 9f8329feb..6f890e05c 100644
--- a/internal/action/uninstall_test.go
+++ b/internal/action/uninstall_test.go
@@ -21,7 +21,8 @@ import (
"time"
. "github.com/onsi/gomega"
- helmaction "helm.sh/helm/v3/pkg/action"
+ helmaction "helm.sh/helm/v4/pkg/action"
+ helmkube "helm.sh/helm/v4/pkg/kube"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
@@ -82,14 +83,14 @@ func Test_newUninstall(t *testing.T) {
got := newUninstall(&helmaction.Configuration{}, obj, []UninstallOption{
func(uninstall *helmaction.Uninstall) {
- uninstall.Wait = true
+ uninstall.WaitStrategy = helmkube.LegacyStrategy
},
func(uninstall *helmaction.Uninstall) {
uninstall.DisableHooks = true
},
})
g.Expect(got).ToNot(BeNil())
- g.Expect(got.Wait).To(BeTrue())
+ g.Expect(got.WaitStrategy).To(Equal(helmkube.LegacyStrategy))
g.Expect(got.DisableHooks).To(BeTrue())
})
}
diff --git a/internal/action/upgrade.go b/internal/action/upgrade.go
index f4947ef27..e4c064619 100644
--- a/internal/action/upgrade.go
+++ b/internal/action/upgrade.go
@@ -18,12 +18,14 @@ package action
import (
"context"
+ "errors"
"fmt"
- helmaction "helm.sh/helm/v3/pkg/action"
- helmchart "helm.sh/helm/v3/pkg/chart"
- helmchartutil "helm.sh/helm/v3/pkg/chartutil"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmaction "helm.sh/helm/v4/pkg/action"
+ helmchartutil "helm.sh/helm/v4/pkg/chart/common"
+ helmchart "helm.sh/helm/v4/pkg/chart/v2"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/features"
@@ -51,31 +53,68 @@ func Upgrade(ctx context.Context, config *helmaction.Configuration, obj *v2.Helm
vals helmchartutil.Values, opts ...UpgradeOption) (*helmrelease.Release, error) {
upgrade := newUpgrade(config, obj, opts)
+ // Resolve "auto" server-side apply setting.
+ // We need to copy this code from Helm because we need to set ForceConflicts
+ // based on the resolved value, since we always force conflicts on server-side apply
+ // (Helm does not).
+ releaseName := release.ShortenName(obj.GetReleaseName())
+ serverSideApply := upgrade.ServerSideApply == "true"
+ if upgrade.ServerSideApply == "auto" {
+ lastRelease, err := config.Releases.Last(releaseName)
+ if err != nil {
+ if errors.Is(err, helmdriver.ErrReleaseNotFound) {
+ return nil, helmdriver.NewErrNoDeployedReleases(releaseName)
+ }
+ return nil, err
+ }
+ lastReleaseTyped, ok := lastRelease.(*helmrelease.Release)
+ if !ok {
+ return nil, fmt.Errorf("only the Chart API v2 is supported")
+ }
+ serverSideApply = lastReleaseTyped.ApplyMethod == "ssa"
+ upgrade.ServerSideApply = fmt.Sprint(serverSideApply)
+ }
+ upgrade.ForceConflicts = serverSideApply // We always force conflicts on server-side apply.
+
policy, err := crdPolicyOrDefault(obj.GetUpgrade().CRDs)
if err != nil {
return nil, err
}
- if err := applyCRDs(config, policy, chrt, vals, setOriginVisitor(v2.GroupVersion.Group, obj.Namespace, obj.Name)); err != nil {
+
+ if err := applyCRDs(config, policy, chrt, vals, serverSideApply, setOriginVisitor(v2.GroupVersion.Group, obj.Namespace, obj.Name)); err != nil {
return nil, fmt.Errorf("failed to apply CustomResourceDefinitions: %w", err)
}
- return upgrade.RunWithContext(ctx, release.ShortenName(obj.GetReleaseName()), chrt, vals.AsMap())
+ rlsr, err := upgrade.RunWithContext(ctx, releaseName, chrt, vals.AsMap())
+ if err != nil {
+ return nil, err
+ }
+ rlsrTyped, ok := rlsr.(*helmrelease.Release)
+ if !ok {
+ return nil, fmt.Errorf("only the Chart API v2 is supported")
+ }
+ return rlsrTyped, err
}
func newUpgrade(config *helmaction.Configuration, obj *v2.HelmRelease, opts []UpgradeOption) *helmaction.Upgrade {
upgrade := helmaction.NewUpgrade(config)
+ upgrade.ServerSideApply = "auto" // This must be the upgrade default regardless of UseHelm3Defaults.
+ if ssa := obj.GetUpgrade().ServerSideApply; ssa != "" {
+ upgrade.ServerSideApply = toHelmSSAValue(ssa)
+ }
+
upgrade.Namespace = obj.GetReleaseNamespace()
upgrade.ResetValues = !obj.GetUpgrade().PreserveValues
upgrade.ReuseValues = obj.GetUpgrade().PreserveValues
upgrade.MaxHistory = obj.GetMaxHistory()
upgrade.Timeout = obj.GetUpgrade().GetTimeout(obj.GetTimeout()).Duration
upgrade.TakeOwnership = !obj.GetUpgrade().DisableTakeOwnership
- upgrade.Wait = !obj.GetUpgrade().DisableWait
+ upgrade.WaitStrategy = getWaitStrategy(obj.GetUpgrade())
upgrade.WaitForJobs = !obj.GetUpgrade().DisableWaitForJobs
upgrade.DisableHooks = obj.GetUpgrade().DisableHooks
upgrade.DisableOpenAPIValidation = obj.GetUpgrade().DisableOpenAPIValidation
upgrade.SkipSchemaValidation = obj.GetUpgrade().DisableSchemaValidation
- upgrade.Force = obj.GetUpgrade().Force
+ upgrade.ForceReplace = obj.GetUpgrade().Force
upgrade.CleanupOnFail = obj.GetUpgrade().CleanupOnFail
upgrade.Devel = true
diff --git a/internal/action/upgrade_test.go b/internal/action/upgrade_test.go
index 76462cd0a..7b1f09c2e 100644
--- a/internal/action/upgrade_test.go
+++ b/internal/action/upgrade_test.go
@@ -21,7 +21,7 @@ import (
"time"
. "github.com/onsi/gomega"
- helmaction "helm.sh/helm/v3/pkg/action"
+ helmaction "helm.sh/helm/v4/pkg/action"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
@@ -49,7 +49,7 @@ func Test_newUpgrade(t *testing.T) {
g.Expect(got).ToNot(BeNil())
g.Expect(got.Namespace).To(Equal(obj.Namespace))
g.Expect(got.Timeout).To(Equal(obj.Spec.Upgrade.Timeout.Duration))
- g.Expect(got.Force).To(Equal(obj.Spec.Upgrade.Force))
+ g.Expect(got.ForceReplace).To(Equal(obj.Spec.Upgrade.Force))
})
t.Run("timeout fallback", func(t *testing.T) {
@@ -87,12 +87,12 @@ func Test_newUpgrade(t *testing.T) {
upgrade.Install = true
},
func(upgrade *helmaction.Upgrade) {
- upgrade.DryRun = true
+ upgrade.DryRunStrategy = helmaction.DryRunClient
},
})
g.Expect(got).ToNot(BeNil())
g.Expect(got.Install).To(BeTrue())
- g.Expect(got.DryRun).To(BeTrue())
+ g.Expect(got.DryRunStrategy).To(Equal(helmaction.DryRunClient))
})
t.Run("disable take ownership", func(t *testing.T) {
@@ -114,4 +114,57 @@ func Test_newUpgrade(t *testing.T) {
g.Expect(got).ToNot(BeNil())
g.Expect(got.TakeOwnership).To(BeFalse())
})
+
+ t.Run("server side apply is auto regardless of UseHelm3Defaults", func(t *testing.T) {
+ g := NewWithT(t)
+
+ obj := &v2.HelmRelease{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "upgrade",
+ Namespace: "upgrade-ns",
+ },
+ Spec: v2.HelmReleaseSpec{},
+ }
+
+ // Save and restore UseHelm3Defaults
+ oldUseHelm3Defaults := UseHelm3Defaults
+ t.Cleanup(func() { UseHelm3Defaults = oldUseHelm3Defaults })
+
+ // Test with UseHelm3Defaults = false
+ UseHelm3Defaults = false
+ got := newUpgrade(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(Equal("auto"))
+
+ // Test with UseHelm3Defaults = true
+ UseHelm3Defaults = true
+ got = newUpgrade(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(Equal("auto"))
+ })
+
+ t.Run("server side apply user specified", func(t *testing.T) {
+ g := NewWithT(t)
+
+ obj := &v2.HelmRelease{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "upgrade",
+ Namespace: "upgrade-ns",
+ },
+ Spec: v2.HelmReleaseSpec{
+ Upgrade: &v2.Upgrade{
+ ServerSideApply: v2.ServerSideApplyEnabled,
+ },
+ },
+ }
+
+ got := newUpgrade(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(Equal("true"))
+
+ obj.Spec.Upgrade.ServerSideApply = v2.ServerSideApplyDisabled
+ got = newUpgrade(&helmaction.Configuration{}, obj, nil)
+ g.Expect(got).ToNot(BeNil())
+ g.Expect(got.ServerSideApply).To(Equal("false"))
+ })
}
diff --git a/internal/action/verify.go b/internal/action/verify.go
index 92e55e030..999420b97 100644
--- a/internal/action/verify.go
+++ b/internal/action/verify.go
@@ -18,14 +18,15 @@ package action
import (
"errors"
+ "fmt"
"github.com/opencontainers/go-digest"
- helmaction "helm.sh/helm/v3/pkg/action"
- helmchart "helm.sh/helm/v3/pkg/chart"
- helmchartutil "helm.sh/helm/v3/pkg/chartutil"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmaction "helm.sh/helm/v4/pkg/action"
+ helmchartutil "helm.sh/helm/v4/pkg/chart/common"
+ helmchart "helm.sh/helm/v4/pkg/chart/v2"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/release"
@@ -87,7 +88,11 @@ func LastRelease(config *helmaction.Configuration, releaseName string) (*helmrel
}
return nil, err
}
- return rls, nil
+ rlsTyped, ok := rls.(*helmrelease.Release)
+ if !ok {
+ return nil, fmt.Errorf("only the Chart API v2 is supported")
+ }
+ return rlsTyped, nil
}
// VerifySnapshot verifies the data of the given v2.Snapshot
@@ -100,13 +105,17 @@ func VerifySnapshot(config *helmaction.Configuration, snapshot *v2.Snapshot) (rl
return nil, ErrReleaseNotFound
}
- rls, err = config.Releases.Get(snapshot.Name, snapshot.Version)
+ rlsr, err := config.Releases.Get(snapshot.Name, snapshot.Version)
if err != nil {
if errors.Is(err, helmdriver.ErrReleaseNotFound) {
return nil, ErrReleaseDisappeared
}
return nil, err
}
+ rls, ok := rlsr.(*helmrelease.Release)
+ if !ok {
+ return nil, fmt.Errorf("only the Chart API v2 is supported")
+ }
if err = VerifyReleaseObject(snapshot, rls); err != nil {
return nil, err
diff --git a/internal/action/verify_test.go b/internal/action/verify_test.go
index 03519ef79..e2103d669 100644
--- a/internal/action/verify_test.go
+++ b/internal/action/verify_test.go
@@ -21,12 +21,13 @@ import (
"testing"
. "github.com/onsi/gomega"
- helmaction "helm.sh/helm/v3/pkg/action"
- helmchart "helm.sh/helm/v3/pkg/chart"
- "helm.sh/helm/v3/pkg/chartutil"
- helmrelease "helm.sh/helm/v3/pkg/release"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- "helm.sh/helm/v3/pkg/storage/driver"
+ helmaction "helm.sh/helm/v4/pkg/action"
+ "helm.sh/helm/v4/pkg/chart/common"
+ helmchart "helm.sh/helm/v4/pkg/chart/v2"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ "helm.sh/helm/v4/pkg/storage/driver"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
@@ -229,13 +230,13 @@ func TestVerifySnapshot(t *testing.T) {
mock := testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: "release",
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Namespace: "default",
})
otherMock := testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: "release",
Version: 1,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Namespace: "default",
})
mockInfo := release.ObservedToSnapshot(release.ObserveRelease(mock))
@@ -312,7 +313,7 @@ func TestVerifyReleaseObject(t *testing.T) {
mockRls := testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: "release",
Version: 1,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Namespace: "default",
})
mockSnapshot := release.ObservedToSnapshot(release.ObserveRelease(mockRls))
@@ -341,7 +342,7 @@ func TestVerifyReleaseObject(t *testing.T) {
rls: testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: "release",
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Namespace: "default",
}),
wantErr: ErrReleaseNotObserved,
@@ -368,7 +369,7 @@ func TestVerifyRelease(t *testing.T) {
mockRls := testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: "release",
Version: 1,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Namespace: "default",
})
mockSnapshot := release.ObservedToSnapshot(release.ObserveRelease(mockRls))
@@ -378,7 +379,7 @@ func TestVerifyRelease(t *testing.T) {
rls *helmrelease.Release
snapshot *v2.Snapshot
chrt *helmchart.Metadata
- vals chartutil.Values
+ vals common.Values
wantErr error
}{
{
@@ -420,7 +421,7 @@ func TestVerifyRelease(t *testing.T) {
rls: mockRls,
snapshot: mockSnapshot,
chrt: mockRls.Chart.Metadata,
- vals: chartutil.Values{
+ vals: common.Values{
"some": "other",
},
wantErr: ErrConfigDigest,
diff --git a/internal/action/wait.go b/internal/action/wait.go
new file mode 100644
index 000000000..07dbf81d2
--- /dev/null
+++ b/internal/action/wait.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2026 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import helmkube "helm.sh/helm/v4/pkg/kube"
+
+// actionThatWaits is implemented by HelmRelease action specs that
+// support wait strategies.
+type actionThatWaits interface {
+ GetDisableWait() bool
+}
+
+// getWaitStrategy returns the wait strategy for the given action spec.
+func getWaitStrategy(spec actionThatWaits) helmkube.WaitStrategy {
+ switch {
+ case spec.GetDisableWait():
+ return helmkube.HookOnlyStrategy
+ case UseHelm3Defaults:
+ return helmkube.LegacyStrategy
+ default:
+ return helmkube.StatusWatcherStrategy
+ }
+}
diff --git a/internal/action/wait_test.go b/internal/action/wait_test.go
new file mode 100644
index 000000000..0c8e5bfe5
--- /dev/null
+++ b/internal/action/wait_test.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2026 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package action
+
+import (
+ "testing"
+
+ . "github.com/onsi/gomega"
+ helmkube "helm.sh/helm/v4/pkg/kube"
+)
+
+type mockActionThatWaits struct {
+ disableWait bool
+}
+
+func (m *mockActionThatWaits) GetDisableWait() bool {
+ return m.disableWait
+}
+
+func TestGetWaitStrategy(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ useHelm3Defaults bool
+ actionSpec actionThatWaits
+ expectedWait helmkube.WaitStrategy
+ }{
+ {
+ name: "wait disabled",
+ useHelm3Defaults: false,
+ actionSpec: &mockActionThatWaits{disableWait: true},
+ expectedWait: helmkube.HookOnlyStrategy,
+ },
+ {
+ name: "wait disabled with UseHelm3Defaults",
+ useHelm3Defaults: true,
+ actionSpec: &mockActionThatWaits{disableWait: true},
+ expectedWait: helmkube.HookOnlyStrategy,
+ },
+ {
+ name: "wait enabled with UseHelm3Defaults",
+ useHelm3Defaults: true,
+ actionSpec: &mockActionThatWaits{disableWait: false},
+ expectedWait: helmkube.LegacyStrategy,
+ },
+ {
+ name: "wait enabled with Helm4 defaults",
+ useHelm3Defaults: false,
+ actionSpec: &mockActionThatWaits{disableWait: false},
+ expectedWait: helmkube.StatusWatcherStrategy,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ g := NewWithT(t)
+
+ // Save and restore UseHelm3Defaults
+ oldUseHelm3Defaults := UseHelm3Defaults
+ t.Cleanup(func() { UseHelm3Defaults = oldUseHelm3Defaults })
+ UseHelm3Defaults = tt.useHelm3Defaults
+
+ waitStrategy := getWaitStrategy(tt.actionSpec)
+ g.Expect(waitStrategy).To(Equal(tt.expectedWait))
+ })
+ }
+}
diff --git a/internal/cmp/simple_unstructured_test.go b/internal/cmp/simple_unstructured_test.go
index 6cba9fa11..26ecc821b 100644
--- a/internal/cmp/simple_unstructured_test.go
+++ b/internal/cmp/simple_unstructured_test.go
@@ -277,7 +277,7 @@ c`},
}
func yamlToUnstructured(str string) (*unstructured.Unstructured, error) {
- var obj map[string]interface{}
+ var obj map[string]any
if err := yaml.Unmarshal([]byte(str), &obj); err != nil {
return nil, err
}
diff --git a/internal/controller/helmrelease_controller.go b/internal/controller/helmrelease_controller.go
index bfc896350..3d47ebbd9 100644
--- a/internal/controller/helmrelease_controller.go
+++ b/internal/controller/helmrelease_controller.go
@@ -25,7 +25,8 @@ import (
"github.com/fluxcd/pkg/runtime/cel"
celtypes "github.com/google/cel-go/common/types"
- "helm.sh/helm/v3/pkg/chart"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -204,6 +205,12 @@ func (r *HelmReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
func (r *HelmReleaseReconciler) reconcileRelease(ctx context.Context, patchHelper *patch.SerialPatcher, obj *v2.HelmRelease) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx)
+ // Check deprecated fields.
+ if obj.GetRollback().Recreate {
+ log.Info("warning: the .spec.rollback.recreate field is deprecated and has no effect. " +
+ "for details, please see: https://github.com/fluxcd/helm-controller/issues/1300#issuecomment-3740272924")
+ }
+
// Mark the resource as under reconciliation.
// We set Ready=Unknown down below after we assess the readiness of dependencies and the source.
conditions.MarkReconciling(obj, meta.ProgressingReason, "Fulfilling prerequisites")
@@ -382,7 +389,7 @@ func (r *HelmReleaseReconciler) reconcileRelease(ctx context.Context, patchHelpe
// Construct config factory for any further Helm actions.
cfg, err := action.NewConfigFactory(getter,
action.WithStorage(action.DefaultStorageDriver, obj.Status.StorageNamespace),
- action.WithStorageLog(action.NewDebugLog(ctrl.LoggerFrom(ctx).V(logger.TraceLevel))),
+ action.WithStorageLog(action.NewTraceLogger(ctx)),
)
if err != nil {
conditions.MarkFalse(obj, meta.ReadyCondition, "FactoryError", "%s", err)
@@ -547,7 +554,7 @@ func (r *HelmReleaseReconciler) reconcileUninstall(ctx context.Context, getter g
// Construct config factory for current release.
cfg, err := action.NewConfigFactory(getter,
action.WithStorage(action.DefaultStorageDriver, obj.Status.StorageNamespace),
- action.WithStorageLog(action.NewDebugLog(ctrl.LoggerFrom(ctx).V(logger.TraceLevel))),
+ action.WithStorageLog(action.NewTraceLogger(ctx)),
)
if err != nil {
conditions.MarkFalse(obj, meta.ReadyCondition, "ConfigFactoryErr", "%s", err)
@@ -669,7 +676,7 @@ func (r *HelmReleaseReconciler) adoptLegacyRelease(ctx context.Context, getter g
// Construct config factory for current release.
cfg, err := action.NewConfigFactory(getter,
action.WithStorage(action.DefaultStorageDriver, storageNamespace),
- action.WithStorageLog(action.NewDebugLog(ctrl.LoggerFrom(ctx).V(logger.TraceLevel))),
+ action.WithStorageLog(action.NewTraceLogger(ctx)),
)
if err != nil {
return err
@@ -683,11 +690,15 @@ func (r *HelmReleaseReconciler) adoptLegacyRelease(ctx context.Context, getter g
}
// Convert it to a v2 release snapshot.
- snap := release.ObservedToSnapshot(release.ObserveRelease(rls))
+ rlsTyped, ok := rls.(*helmrelease.Release)
+ if !ok {
+ return fmt.Errorf("only the Chart API v2 is supported")
+ }
+ snap := release.ObservedToSnapshot(release.ObserveRelease(rlsTyped))
// If tests are enabled, include them as well.
if obj.GetTest().Enable {
- snap.SetTestHooks(release.TestHooksFromRelease(rls))
+ snap.SetTestHooks(release.TestHooksFromRelease(rlsTyped))
}
// Adopt it as the current release in the history.
diff --git a/internal/controller/helmrelease_controller_fuzz_test.go b/internal/controller/helmrelease_controller_fuzz_test.go
index 29ffdbe53..b772f381e 100644
--- a/internal/controller/helmrelease_controller_fuzz_test.go
+++ b/internal/controller/helmrelease_controller_fuzz_test.go
@@ -128,9 +128,9 @@ type DummyRecorder struct{}
func (r *DummyRecorder) Event(object runtime.Object, eventtype, reason, message string) {
}
-func (r *DummyRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
+func (r *DummyRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...any) {
}
func (r *DummyRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string,
- eventtype, reason string, messageFmt string, args ...interface{}) {
+ eventtype, reason string, messageFmt string, args ...any) {
}
diff --git a/internal/controller/helmrelease_controller_test.go b/internal/controller/helmrelease_controller_test.go
index 16b63fc57..941a7950b 100644
--- a/internal/controller/helmrelease_controller_test.go
+++ b/internal/controller/helmrelease_controller_test.go
@@ -26,10 +26,11 @@ import (
. "github.com/onsi/gomega"
"github.com/opencontainers/go-digest"
- "helm.sh/helm/v3/pkg/chart"
- helmrelease "helm.sh/helm/v3/pkg/release"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -464,7 +465,7 @@ func TestHelmReleaseReconciler_reconcileRelease(t *testing.T) {
Namespace: ns.Name,
Version: 1,
Chart: chartMock,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}, testutil.ReleaseWithConfig(nil))
valChecksum := chartutil.DigestValues("sha1", rls.Config)
@@ -872,7 +873,7 @@ func TestHelmReleaseReconciler_reconcileRelease(t *testing.T) {
Namespace: ns.Name,
Version: 1,
Chart: chartMock,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}, testutil.ReleaseWithConfig(nil))
obj := &v2.HelmRelease{
@@ -1335,7 +1336,7 @@ func TestHelmReleaseReconciler_reconcileReleaseFromHelmChartSource(t *testing.T)
Namespace: ns.Name,
Version: 1,
Chart: chartMock,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
})
obj := &v2.HelmRelease{
@@ -2198,7 +2199,7 @@ func TestHelmReleaseReconciler_reconcileReleaseFromOCIRepositorySource(t *testin
Namespace: ns.Name,
Version: 1,
Chart: chartMock,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
})
obj := &v2.HelmRelease{
@@ -2302,7 +2303,7 @@ func TestHelmReleaseReconciler_reconcileDelete(t *testing.T) {
Namespace: ns.Name,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
})
obj := &v2.HelmRelease{
@@ -2415,7 +2416,7 @@ func TestHelmReleaseReconciler_reconcileReleaseDeletion(t *testing.T) {
Namespace: ns.Name,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
})
obj := &v2.HelmRelease{
@@ -2478,7 +2479,7 @@ func TestHelmReleaseReconciler_reconcileReleaseDeletion(t *testing.T) {
Namespace: ns.Name,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
})
obj := &v2.HelmRelease{
@@ -2579,7 +2580,7 @@ func TestHelmReleaseReconciler_reconcileReleaseDeletion(t *testing.T) {
Namespace: ns.Name,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
})
obj := &v2.HelmRelease{
@@ -2705,8 +2706,8 @@ func TestHelmReleaseReconciler_reconcileReleaseDeletion(t *testing.T) {
err := r.reconcileReleaseDeletion(context.TODO(), obj)
g.Expect(err).To(HaveOccurred())
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
- *conditions.FalseCondition(meta.ReadyCondition, v2.UninstallFailedReason, "Kubernetes cluster unreachable"),
- *conditions.FalseCondition(v2.ReleasedCondition, v2.UninstallFailedReason, "Kubernetes cluster unreachable"),
+ *conditions.FalseCondition(meta.ReadyCondition, v2.UninstallFailedReason, "kubernetes cluster unreachable"),
+ *conditions.FalseCondition(v2.ReleasedCondition, v2.UninstallFailedReason, "kubernetes cluster unreachable"),
}))
})
@@ -2844,7 +2845,7 @@ func TestHelmReleaseReconciler_reconcileUninstall(t *testing.T) {
Namespace: ns.Name,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithFailingHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}, testutil.ReleaseWithFailingHook())
obj := &v2.HelmRelease{
@@ -3318,7 +3319,7 @@ func TestHelmReleaseReconciler_adoptLegacyRelease(t *testing.T) {
Namespace: namespace,
Version: 6,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}, testutil.ReleaseWithTestHook()),
}
},
@@ -3344,7 +3345,7 @@ func TestHelmReleaseReconciler_adoptLegacyRelease(t *testing.T) {
Namespace: namespace,
Version: 3,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}, testutil.ReleaseWithTestHook()),
}
},
diff --git a/internal/controller_test/suite_test.go b/internal/controller_test/suite_test.go
index 545d3c4f5..4934ccc24 100644
--- a/internal/controller_test/suite_test.go
+++ b/internal/controller_test/suite_test.go
@@ -29,7 +29,7 @@ import (
"github.com/fluxcd/pkg/testserver"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
"go.uber.org/zap/zapcore"
- "helm.sh/helm/v3/pkg/kube"
+ "helm.sh/helm/v4/pkg/kube"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
diff --git a/internal/diff/summarize_test.go b/internal/diff/summarize_test.go
index b67206e8a..749b1c30d 100644
--- a/internal/diff/summarize_test.go
+++ b/internal/diff/summarize_test.go
@@ -30,9 +30,9 @@ func TestSummarizeDiffSet(t *testing.T) {
diffSet := jsondiff.DiffSet{
&jsondiff.Diff{
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"kind": "ConfigMap",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "config",
"namespace": "namespace-1",
},
@@ -42,9 +42,9 @@ func TestSummarizeDiffSet(t *testing.T) {
},
&jsondiff.Diff{
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"kind": "Secret",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "naughty",
"namespace": "namespace-x",
},
@@ -54,9 +54,9 @@ func TestSummarizeDiffSet(t *testing.T) {
},
&jsondiff.Diff{
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"kind": "StatefulSet",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "hello-world",
"namespace": "default",
},
@@ -66,9 +66,9 @@ func TestSummarizeDiffSet(t *testing.T) {
},
&jsondiff.Diff{
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"kind": "Deployment",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "touched-me",
"namespace": "tenant-y",
},
@@ -215,9 +215,9 @@ func TestResourceName(t *testing.T) {
{
name: "with namespace",
resource: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"kind": "Deployment",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "touched-me",
"namespace": "tenant-y",
},
@@ -228,9 +228,9 @@ func TestResourceName(t *testing.T) {
{
name: "without namespace",
resource: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"kind": "ClusterIssuer",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "letsencrypt",
},
},
diff --git a/internal/diff/unstructured_test.go b/internal/diff/unstructured_test.go
index 8c0d42868..2b3cfd2ab 100644
--- a/internal/diff/unstructured_test.go
+++ b/internal/diff/unstructured_test.go
@@ -27,7 +27,7 @@ func TestWithoutStatus(t *testing.T) {
g := NewWithT(t)
u := unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"status": "test",
},
}
@@ -46,19 +46,19 @@ func TestUnstructured(t *testing.T) {
}{
{
name: "equal objects",
- x: &unstructured.Unstructured{Object: map[string]interface{}{
- "spec": map[string]interface{}{
+ x: &unstructured.Unstructured{Object: map[string]any{
+ "spec": map[string]any{
"replicas": int64(4),
},
- "status": map[string]interface{}{
+ "status": map[string]any{
"readyReplicas": int64(4),
},
}},
- y: &unstructured.Unstructured{Object: map[string]interface{}{
- "spec": map[string]interface{}{
+ y: &unstructured.Unstructured{Object: map[string]any{
+ "spec": map[string]any{
"replicas": int64(4),
},
- "status": map[string]interface{}{
+ "status": map[string]any{
"readyReplicas": int64(4),
},
}},
@@ -67,17 +67,17 @@ func TestUnstructured(t *testing.T) {
},
{
name: "added simple value",
- x: &unstructured.Unstructured{Object: map[string]interface{}{
- "spec": map[string]interface{}{
+ x: &unstructured.Unstructured{Object: map[string]any{
+ "spec": map[string]any{
"replicas": int64(1),
},
- "status": map[string]interface{}{},
+ "status": map[string]any{},
}},
- y: &unstructured.Unstructured{Object: map[string]interface{}{
- "spec": map[string]interface{}{
+ y: &unstructured.Unstructured{Object: map[string]any{
+ "spec": map[string]any{
"replicas": int64(1),
},
- "status": map[string]interface{}{
+ "status": map[string]any{
"readyReplicas": int64(1),
},
}},
@@ -87,17 +87,17 @@ func TestUnstructured(t *testing.T) {
},
{
name: "removed simple value",
- x: &unstructured.Unstructured{Object: map[string]interface{}{
- "spec": map[string]interface{}{
+ x: &unstructured.Unstructured{Object: map[string]any{
+ "spec": map[string]any{
"replicas": int64(1),
},
- "status": map[string]interface{}{
+ "status": map[string]any{
"readyReplicas": int64(4),
},
}},
- y: &unstructured.Unstructured{Object: map[string]interface{}{
- "spec": map[string]interface{}{},
- "status": map[string]interface{}{
+ y: &unstructured.Unstructured{Object: map[string]any{
+ "spec": map[string]any{},
+ "status": map[string]any{
"readyReplicas": int64(4),
},
}},
@@ -107,19 +107,19 @@ func TestUnstructured(t *testing.T) {
},
{
name: "changed simple value",
- x: &unstructured.Unstructured{Object: map[string]interface{}{
- "spec": map[string]interface{}{
+ x: &unstructured.Unstructured{Object: map[string]any{
+ "spec": map[string]any{
"replicas": int64(3),
},
- "status": map[string]interface{}{
+ "status": map[string]any{
"readyReplicas": int64(1),
},
}},
- y: &unstructured.Unstructured{Object: map[string]interface{}{
- "spec": map[string]interface{}{
+ y: &unstructured.Unstructured{Object: map[string]any{
+ "spec": map[string]any{
"replicas": int64(3),
},
- "status": map[string]interface{}{
+ "status": map[string]any{
"readyReplicas": int64(3),
},
}},
@@ -131,19 +131,19 @@ func TestUnstructured(t *testing.T) {
{
name: "with options",
opts: []CompareOption{WithoutStatus()},
- x: &unstructured.Unstructured{Object: map[string]interface{}{
- "spec": map[string]interface{}{
+ x: &unstructured.Unstructured{Object: map[string]any{
+ "spec": map[string]any{
"replicas": int64(3),
},
- "status": map[string]interface{}{
+ "status": map[string]any{
"readyReplicas": int64(4),
},
}},
- y: &unstructured.Unstructured{Object: map[string]interface{}{
- "spec": map[string]interface{}{
+ y: &unstructured.Unstructured{Object: map[string]any{
+ "spec": map[string]any{
"replicas": int64(3),
},
- "status": map[string]interface{}{
+ "status": map[string]any{
"readyReplicas": int64(1),
},
}},
diff --git a/internal/features/features.go b/internal/features/features.go
index f17a83149..143f558ff 100644
--- a/internal/features/features.go
+++ b/internal/features/features.go
@@ -77,6 +77,10 @@ const (
// ExternalArtifact controls whether the ExternalArtifact source type is enabled.
ExternalArtifact = "ExternalArtifact"
+
+ // UseHelm3Defaults makes the controller use the Helm 3 default behaviors
+ // when defaults are used.
+ UseHelm3Defaults = "UseHelm3Defaults"
)
var features = map[string]bool{
@@ -110,6 +114,9 @@ var features = map[string]bool{
// DisableConfigWatchers
// opt-in from v1.4.4
controller.FeatureGateDisableConfigWatchers: false,
+ // UseHelm3Defaults
+ // opt-in from v1.5.0
+ UseHelm3Defaults: false,
}
func init() {
diff --git a/internal/loader/artifact_url.go b/internal/loader/artifact_url.go
index 35e82c4f5..af63a4f5e 100644
--- a/internal/loader/artifact_url.go
+++ b/internal/loader/artifact_url.go
@@ -31,8 +31,8 @@ import (
"github.com/hashicorp/go-retryablehttp"
digestlib "github.com/opencontainers/go-digest"
_ "github.com/opencontainers/go-digest/blake3"
- "helm.sh/helm/v3/pkg/chart"
- "helm.sh/helm/v3/pkg/chart/loader"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ "helm.sh/helm/v4/pkg/chart/v2/loader"
)
const (
diff --git a/internal/loader/client.go b/internal/loader/client.go
index 29ecddc4c..867c89292 100644
--- a/internal/loader/client.go
+++ b/internal/loader/client.go
@@ -47,18 +47,18 @@ type errorLogger struct {
log logr.Logger
}
-func (l *errorLogger) Error(msg string, keysAndValues ...interface{}) {
+func (l *errorLogger) Error(msg string, keysAndValues ...any) {
l.log.Info(msg, keysAndValues...)
}
-func (l *errorLogger) Info(msg string, keysAndValues ...interface{}) {
+func (l *errorLogger) Info(msg string, keysAndValues ...any) {
// Do nothing.
}
-func (l *errorLogger) Debug(msg string, keysAndValues ...interface{}) {
+func (l *errorLogger) Debug(msg string, keysAndValues ...any) {
// Do nothing.
}
-func (l *errorLogger) Warn(msg string, keysAndValues ...interface{}) {
+func (l *errorLogger) Warn(msg string, keysAndValues ...any) {
// Do nothing.
}
diff --git a/internal/postrender/build.go b/internal/postrender/build.go
index 6977c3d52..4209eb1a0 100644
--- a/internal/postrender/build.go
+++ b/internal/postrender/build.go
@@ -20,7 +20,7 @@ import (
"encoding/json"
"github.com/opencontainers/go-digest"
- helmpostrender "helm.sh/helm/v3/pkg/postrender"
+ helmpostrender "helm.sh/helm/v4/pkg/postrenderer"
v2 "github.com/fluxcd/helm-controller/api/v2"
)
diff --git a/internal/postrender/combined.go b/internal/postrender/combined.go
index c25947437..6de4506c8 100644
--- a/internal/postrender/combined.go
+++ b/internal/postrender/combined.go
@@ -19,7 +19,7 @@ package postrender
import (
"bytes"
- helmpostrender "helm.sh/helm/v3/pkg/postrender"
+ helmpostrender "helm.sh/helm/v4/pkg/postrenderer"
)
// Combined is a collection of Helm PostRenders which are
diff --git a/internal/reconcile/atomic_release.go b/internal/reconcile/atomic_release.go
index b57f31587..2e7444965 100644
--- a/internal/reconcile/atomic_release.go
+++ b/internal/reconcile/atomic_release.go
@@ -23,7 +23,7 @@ import (
"strings"
"time"
- "helm.sh/helm/v3/pkg/kube"
+ "helm.sh/helm/v4/pkg/kube"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
diff --git a/internal/reconcile/atomic_release_test.go b/internal/reconcile/atomic_release_test.go
index 6da82cec3..864394e24 100644
--- a/internal/reconcile/atomic_release_test.go
+++ b/internal/reconcile/atomic_release_test.go
@@ -24,11 +24,12 @@ import (
. "github.com/onsi/gomega"
extjsondiff "github.com/wI2L/jsondiff"
- helmchart "helm.sh/helm/v3/pkg/chart"
- helmrelease "helm.sh/helm/v3/pkg/release"
- "helm.sh/helm/v3/pkg/releaseutil"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmchart "helm.sh/helm/v4/pkg/chart/v2"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -148,7 +149,7 @@ func TestAtomicRelease_Reconcile(t *testing.T) {
Enable: true,
},
StorageNamespace: releaseNamespace,
- Timeout: &metav1.Duration{Duration: 100 * time.Millisecond},
+ Timeout: &metav1.Duration{Duration: 200 * time.Second},
},
}
@@ -219,7 +220,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
spec func(spec *v2.HelmReleaseSpec)
status func(namespace string, releases []*helmrelease.Release) v2.HelmReleaseStatus
chart *helmchart.Chart
- values map[string]interface{}
+ values map[string]any
expectHistory func(releases []*helmrelease.Release) v2.Snapshots
wantErr error
}{
@@ -232,7 +233,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}, testutil.ReleaseWithConfig(nil)),
}
},
@@ -260,7 +261,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}, testutil.ReleaseWithConfig(nil)),
}
},
@@ -289,8 +290,8 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ Status: helmreleasecommon.StatusDeployed,
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
}
},
status: func(namespace string, releases []*helmrelease.Release) v2.HelmReleaseStatus {
@@ -301,7 +302,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
}
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "baz"},
+ values: map[string]any{"foo": "baz"},
expectHistory: func(releases []*helmrelease.Release) v2.Snapshots {
return v2.Snapshots{
release.ObservedToSnapshot(release.ObserveRelease(releases[1])),
@@ -318,7 +319,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusPendingInstall,
+ Status: helmreleasecommon.StatusPendingInstall,
}, testutil.ReleaseWithConfig(nil)),
}
},
@@ -343,14 +344,14 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}, testutil.ReleaseWithConfig(nil)),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusPendingUpgrade,
+ Status: helmreleasecommon.StatusPendingUpgrade,
}, testutil.ReleaseWithConfig(nil)),
}
},
@@ -377,21 +378,21 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}, testutil.ReleaseWithConfig(nil)),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
}, testutil.ReleaseWithConfig(nil)),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 3,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusPendingRollback,
+ Status: helmreleasecommon.StatusPendingRollback,
}, testutil.ReleaseWithConfig(nil)),
}
},
@@ -428,7 +429,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -448,27 +449,27 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 3,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
}
},
status: func(namespace string, releases []*helmrelease.Release) v2.HelmReleaseStatus {
previousDeployed := release.ObserveRelease(releases[1])
- previousDeployed.Info.Status = helmrelease.StatusDeployed
+ previousDeployed.Info.Status = helmreleasecommon.StatusDeployed
return v2.HelmReleaseStatus{
History: v2.Snapshots{
@@ -492,27 +493,27 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 3,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
}
},
status: func(namespace string, releases []*helmrelease.Release) v2.HelmReleaseStatus {
modifiedRelease := release.ObserveRelease(releases[1])
- modifiedRelease.Info.Status = helmrelease.StatusFailed
+ modifiedRelease.Info.Status = helmreleasecommon.StatusFailed
return v2.HelmReleaseStatus{
History: v2.Snapshots{
@@ -538,7 +539,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
)),
},
@@ -560,21 +561,21 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 3,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -585,7 +586,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 4,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
}),
))
@@ -736,7 +737,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusUninstalling,
+ Status: helmreleasecommon.StatusUninstalling,
}),
)),
},
@@ -756,7 +757,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -785,7 +786,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -822,7 +823,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -863,7 +864,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -900,7 +901,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -943,7 +944,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -986,7 +987,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -1023,21 +1024,21 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 3,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -1072,21 +1073,21 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 3,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
}),
}
},
@@ -1121,21 +1122,21 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 3,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
}),
}
},
@@ -1188,7 +1189,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
ReleaseName: mockReleaseName,
TargetNamespace: releaseNamespace,
StorageNamespace: releaseNamespace,
- Timeout: &metav1.Duration{Duration: 100 * time.Millisecond},
+ Timeout: &metav1.Duration{Duration: 200 * time.Millisecond},
},
}
if tt.spec != nil {
@@ -1237,7 +1238,7 @@ func TestAtomicRelease_Reconcile_Scenarios(t *testing.T) {
g.Expect(err).To(wantErr)
if tt.expectHistory != nil {
- history, _ := store.History(mockReleaseName)
+ history, _ := storeHistory(store, mockReleaseName)
releaseutil.SortByRevision(history)
g.Expect(req.Object.Status.History).To(testutil.Equal(tt.expectHistory(history)))
@@ -1251,7 +1252,7 @@ func TestAtomicRelease_Reconcile_PostRenderers_Scenarios(t *testing.T) {
name string
releases func(namespace string) []*helmrelease.Release
spec func(spec *v2.HelmReleaseSpec)
- values map[string]interface{}
+ values map[string]any
status func(releases []*helmrelease.Release) v2.HelmReleaseStatus
wantDigest string
wantReleaseAction v2.ReleaseAction
@@ -1264,7 +1265,7 @@ func TestAtomicRelease_Reconcile_PostRenderers_Scenarios(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}, testutil.ReleaseWithConfig(nil)),
}
@@ -1297,7 +1298,7 @@ func TestAtomicRelease_Reconcile_PostRenderers_Scenarios(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}, testutil.ReleaseWithConfig(nil)),
}
@@ -1319,7 +1320,7 @@ func TestAtomicRelease_Reconcile_PostRenderers_Scenarios(t *testing.T) {
},
}
},
- values: map[string]interface{}{"foo": "baz"},
+ values: map[string]any{"foo": "baz"},
wantDigest: postrender.Digest(digest.Canonical, postRenderers).String(),
wantReleaseAction: v2.ReleaseActionUpgrade,
},
@@ -1331,7 +1332,7 @@ func TestAtomicRelease_Reconcile_PostRenderers_Scenarios(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}, testutil.ReleaseWithConfig(nil)),
}
@@ -1365,7 +1366,7 @@ func TestAtomicRelease_Reconcile_PostRenderers_Scenarios(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}, testutil.ReleaseWithConfig(nil)),
}
@@ -1418,7 +1419,7 @@ func TestAtomicRelease_Reconcile_PostRenderers_Scenarios(t *testing.T) {
ReleaseName: mockReleaseName,
TargetNamespace: releaseNamespace,
StorageNamespace: releaseNamespace,
- Timeout: &metav1.Duration{Duration: 100 * time.Millisecond},
+ Timeout: &metav1.Duration{Duration: 200 * time.Millisecond},
},
}
@@ -1597,10 +1598,10 @@ func TestAtomicRelease_actionForState(t *testing.T) {
{
Type: jsondiff.DiffTypeCreate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "apps/v1",
"kind": "Deployment",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "mock",
"namespace": "something",
},
@@ -1657,27 +1658,27 @@ func TestAtomicRelease_actionForState(t *testing.T) {
{
Type: jsondiff.DiffTypeUpdate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "apps/v1",
"kind": "Deployment",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "mock",
"namespace": "something",
},
- "spec": map[string]interface{}{
+ "spec": map[string]any{
"replicas": 2,
},
},
},
ClusterObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "apps/v1",
"kind": "Deployment",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "mock",
"namespace": "something",
},
- "spec": map[string]interface{}{
+ "spec": map[string]any{
"replicas": 1,
},
},
@@ -1855,14 +1856,14 @@ func TestAtomicRelease_actionForState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Chart: testutil.BuildChart(),
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
Chart: testutil.BuildChart(),
}),
},
@@ -1893,14 +1894,14 @@ func TestAtomicRelease_actionForState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Chart: testutil.BuildChart(),
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
Chart: testutil.BuildChart(),
}),
},
@@ -1931,7 +1932,7 @@ func TestAtomicRelease_actionForState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
Chart: testutil.BuildChart(),
}),
},
@@ -1961,7 +1962,7 @@ func TestAtomicRelease_actionForState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
Chart: testutil.BuildChart(),
}),
},
@@ -1981,7 +1982,7 @@ func TestAtomicRelease_actionForState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Chart: testutil.BuildChart(),
}),
)),
@@ -2002,7 +2003,7 @@ func TestAtomicRelease_actionForState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Chart: testutil.BuildChart(),
}),
},
@@ -2212,7 +2213,7 @@ func TestAtomicRelease_Reconcile_CommonMetadata_Scenarios(t *testing.T) {
name string
releases func(namespace string) []*helmrelease.Release
spec func(spec *v2.HelmReleaseSpec)
- values map[string]interface{}
+ values map[string]any
status func(releases []*helmrelease.Release) v2.HelmReleaseStatus
wantDigest string
wantReleaseAction v2.ReleaseAction
@@ -2225,7 +2226,7 @@ func TestAtomicRelease_Reconcile_CommonMetadata_Scenarios(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}, testutil.ReleaseWithConfig(nil)),
}
@@ -2258,7 +2259,7 @@ func TestAtomicRelease_Reconcile_CommonMetadata_Scenarios(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}, testutil.ReleaseWithConfig(nil)),
}
@@ -2280,7 +2281,7 @@ func TestAtomicRelease_Reconcile_CommonMetadata_Scenarios(t *testing.T) {
},
}
},
- values: map[string]interface{}{"foo": "baz"},
+ values: map[string]any{"foo": "baz"},
wantDigest: postrender.CommonMetadataDigest(digest.Canonical, commonMetadata).String(),
wantReleaseAction: v2.ReleaseActionUpgrade,
},
@@ -2292,7 +2293,7 @@ func TestAtomicRelease_Reconcile_CommonMetadata_Scenarios(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}, testutil.ReleaseWithConfig(nil)),
}
@@ -2326,7 +2327,7 @@ func TestAtomicRelease_Reconcile_CommonMetadata_Scenarios(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}, testutil.ReleaseWithConfig(nil)),
}
@@ -2379,7 +2380,7 @@ func TestAtomicRelease_Reconcile_CommonMetadata_Scenarios(t *testing.T) {
ReleaseName: mockReleaseName,
TargetNamespace: releaseNamespace,
StorageNamespace: releaseNamespace,
- Timeout: &metav1.Duration{Duration: 100 * time.Millisecond},
+ Timeout: &metav1.Duration{Duration: 200 * time.Millisecond},
},
}
diff --git a/internal/reconcile/correct_cluster_drift_test.go b/internal/reconcile/correct_cluster_drift_test.go
index 66dcc9dee..b3d27ac45 100644
--- a/internal/reconcile/correct_cluster_drift_test.go
+++ b/internal/reconcile/correct_cluster_drift_test.go
@@ -71,10 +71,10 @@ func TestCorrectClusterDrift_Reconcile(t *testing.T) {
{
Type: jsondiff.DiffTypeCreate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "Secret",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "secret",
"namespace": namespace,
},
@@ -84,23 +84,23 @@ func TestCorrectClusterDrift_Reconcile(t *testing.T) {
{
Type: jsondiff.DiffTypeUpdate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "ConfigMap",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "configmap",
"namespace": namespace,
},
- "data": map[string]interface{}{
+ "data": map[string]any{
"key": "value",
},
},
},
ClusterObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "ConfigMap",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "configmap",
"namespace": namespace,
},
@@ -110,7 +110,7 @@ func TestCorrectClusterDrift_Reconcile(t *testing.T) {
{
Type: extjsondiff.OperationAdd,
Path: "/data",
- Value: map[string]interface{}{
+ Value: map[string]any{
"key": "value",
},
},
diff --git a/internal/reconcile/install.go b/internal/reconcile/install.go
index cbd3845ed..d501ed283 100644
--- a/internal/reconcile/install.go
+++ b/internal/reconcile/install.go
@@ -25,11 +25,9 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
- ctrl "sigs.k8s.io/controller-runtime"
"github.com/fluxcd/pkg/chartutil"
"github.com/fluxcd/pkg/runtime/conditions"
- "github.com/fluxcd/pkg/runtime/logger"
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
@@ -71,9 +69,9 @@ func NewInstall(cfg *action.ConfigFactory, recorder record.EventRecorder) *Insta
func (r *Install) Reconcile(ctx context.Context, req *Request) error {
var (
- logBuf = action.NewLogBuffer(action.NewDebugLog(ctrl.LoggerFrom(ctx).V(logger.DebugLevel)), 10)
+ logBuf = action.NewDebugLogBuffer(ctx)
obsReleases = make(observedReleases)
- cfg = r.configFactory.Build(logBuf.Log, observeRelease(obsReleases))
+ cfg = r.configFactory.Build(logBuf, observeRelease(obsReleases))
startTime = time.Now()
)
diff --git a/internal/reconcile/install_test.go b/internal/reconcile/install_test.go
index 5f349b1fb..2947c893f 100644
--- a/internal/reconcile/install_test.go
+++ b/internal/reconcile/install_test.go
@@ -24,12 +24,13 @@ import (
"time"
. "github.com/onsi/gomega"
- "helm.sh/helm/v3/pkg/chart"
- helmchartutil "helm.sh/helm/v3/pkg/chartutil"
- helmrelease "helm.sh/helm/v3/pkg/release"
- "helm.sh/helm/v3/pkg/releaseutil"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmchartutil "helm.sh/helm/v4/pkg/chart/common"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -142,7 +143,7 @@ func TestInstall_Reconcile(t *testing.T) {
Namespace: namespace,
Chart: testutil.BuildChart(),
Version: 1,
- Status: helmrelease.StatusUninstalled,
+ Status: helmreleasecommon.StatusUninstalled,
}),
}
},
@@ -180,7 +181,7 @@ func TestInstall_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: "other",
Version: 1,
- Status: helmrelease.StatusUninstalled,
+ Status: helmreleasecommon.StatusUninstalled,
Chart: testutil.BuildChart(),
}))),
},
@@ -245,7 +246,7 @@ func TestInstall_Reconcile(t *testing.T) {
ReleaseName: mockReleaseName,
TargetNamespace: releaseNamespace,
StorageNamespace: releaseNamespace,
- Timeout: &metav1.Duration{Duration: 100 * time.Millisecond},
+ Timeout: &metav1.Duration{Duration: 200 * time.Millisecond},
},
}
if tt.spec != nil {
@@ -286,7 +287,7 @@ func TestInstall_Reconcile(t *testing.T) {
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.expectConditions))
- releases, _ = store.History(mockReleaseName)
+ releases, _ = storeHistory(store, mockReleaseName)
releaseutil.SortByRevision(releases)
if tt.expectHistory != nil {
@@ -394,7 +395,7 @@ func TestInstall_Reconcile_withSubchartWithCRDs(t *testing.T) {
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(expectConditions))
- releases, _ := store.History(mockReleaseName)
+ releases, _ := storeHistory(store, mockReleaseName)
releaseutil.SortByRevision(releases)
g.Expect(obj.Status.History).To(testutil.Equal(expectHistory(releases)))
@@ -465,7 +466,7 @@ func TestInstall_failure(t *testing.T) {
eventRecorder: recorder,
}
- req := &Request{Object: obj.DeepCopy(), Chart: chrt, Values: map[string]interface{}{"foo": "bar"}}
+ req := &Request{Object: obj.DeepCopy(), Chart: chrt, Values: map[string]any{"foo": "bar"}}
r.failure(req, nil, err)
expectMsg := fmt.Sprintf(fmtInstallFailure, mockReleaseNamespace, mockReleaseName, chrt.Name(),
@@ -500,7 +501,7 @@ func TestInstall_failure(t *testing.T) {
eventRecorder: recorder,
}
req := &Request{Object: obj.DeepCopy(), Chart: chrt}
- r.failure(req, mockLogBuffer(5, 10), err)
+ r.failure(req, mockLogBuffer(), err)
expectSubStr := "Last Helm logs"
g.Expect(conditions.IsFalse(req.Object, v2.ReleasedCondition)).To(BeTrue())
diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go
index 6c51dd1cc..de98349a9 100644
--- a/internal/reconcile/reconcile.go
+++ b/internal/reconcile/reconcile.go
@@ -19,8 +19,8 @@ package reconcile
import (
"context"
- helmchart "helm.sh/helm/v3/pkg/chart"
- helmchartutil "helm.sh/helm/v3/pkg/chartutil"
+ helmchartutil "helm.sh/helm/v4/pkg/chart/common"
+ helmchart "helm.sh/helm/v4/pkg/chart/v2"
v2 "github.com/fluxcd/helm-controller/api/v2"
)
diff --git a/internal/reconcile/release.go b/internal/reconcile/release.go
index 269b1a2f8..f2d2c5296 100644
--- a/internal/reconcile/release.go
+++ b/internal/reconcile/release.go
@@ -23,7 +23,8 @@ import (
eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/conditions"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmrelease "helm.sh/helm/v4/pkg/release"
+ helmreleasev1 "helm.sh/helm/v4/pkg/release/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
@@ -104,7 +105,7 @@ func mutateOCIDigest(obj *v2.HelmRelease, obs release.Observation) release.Obser
return obs
}
-func releaseToObservation(rls *helmrelease.Release, snapshot *v2.Snapshot) release.Observation {
+func releaseToObservation(rls *helmreleasev1.Release, snapshot *v2.Snapshot) release.Observation {
obs := release.ObserveRelease(rls)
obs.OCIDigest = snapshot.OCIDigest
return obs
@@ -115,8 +116,12 @@ func releaseToObservation(rls *helmrelease.Release, snapshot *v2.Snapshot) relea
// It can be used for Helm actions that modify multiple releases in the
// Helm storage, such as install and upgrade.
func observeRelease(observed observedReleases) storage.ObserveFunc {
- return func(rls *helmrelease.Release) {
- obs := release.ObserveRelease(rls)
+ return func(rls helmrelease.Releaser) {
+ rlsTyped, ok := rls.(*helmreleasev1.Release)
+ if !ok {
+ return
+ }
+ obs := release.ObserveRelease(rlsTyped)
observed[obs.Version] = obs
}
}
@@ -193,7 +198,7 @@ func summarize(req *Request) {
// eventMessageWithLog returns an event message composed out of the given
// message and any log messages by appending them to the message.
func eventMessageWithLog(msg string, log *action.LogBuffer) string {
- if log != nil && log.Len() > 0 {
+ if !log.Empty() {
msg = msg + "\n\nLast Helm logs:\n\n" + log.String()
}
return msg
diff --git a/internal/reconcile/release_test.go b/internal/reconcile/release_test.go
index c9092311d..40a4c62bd 100644
--- a/internal/reconcile/release_test.go
+++ b/internal/reconcile/release_test.go
@@ -17,13 +17,15 @@ limitations under the License.
package reconcile
import (
+ "context"
"fmt"
"testing"
"github.com/go-logr/logr"
. "github.com/onsi/gomega"
- "helm.sh/helm/v3/pkg/chart"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/fluxcd/pkg/apis/kustomize"
"github.com/fluxcd/pkg/apis/meta"
@@ -544,12 +546,13 @@ func Test_summarize(t *testing.T) {
}
}
-func mockLogBuffer(size int, lines int) *action.LogBuffer {
- log := action.NewLogBuffer(action.NewDebugLog(logr.Discard()), size)
- for i := 0; i < lines; i++ {
- log.Log("line %d", i+1)
+func mockLogBuffer() *action.LogBuffer {
+ ctx := log.IntoContext(context.Background(), logr.Discard())
+ buf := action.NewDebugLogBuffer(ctx)
+ for i := range 10 {
+ buf.Appendf("line %d", i+1)
}
- return log
+ return buf
}
func Test_RecordOnObject(t *testing.T) {
diff --git a/internal/reconcile/rollback_remediation.go b/internal/reconcile/rollback_remediation.go
index d1d492ac1..962f1b261 100644
--- a/internal/reconcile/rollback_remediation.go
+++ b/internal/reconcile/rollback_remediation.go
@@ -21,13 +21,12 @@ import (
"fmt"
"strings"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmrelease "helm.sh/helm/v4/pkg/release"
+ helmreleasev1 "helm.sh/helm/v4/pkg/release/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
- ctrl "sigs.k8s.io/controller-runtime"
"github.com/fluxcd/pkg/runtime/conditions"
- "github.com/fluxcd/pkg/runtime/logger"
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
@@ -76,8 +75,8 @@ func NewRollbackRemediation(configFactory *action.ConfigFactory, eventRecorder r
func (r *RollbackRemediation) Reconcile(ctx context.Context, req *Request) error {
var (
cur = req.Object.Status.History.Latest().DeepCopy()
- logBuf = action.NewLogBuffer(action.NewDebugLog(ctrl.LoggerFrom(ctx).V(logger.DebugLevel)), 10)
- cfg = r.configFactory.Build(logBuf.Log, observeRollback(req.Object))
+ logBuf = action.NewDebugLogBuffer(ctx)
+ cfg = r.configFactory.Build(logBuf, observeRollback(req.Object))
)
defer summarize(req)
@@ -180,7 +179,11 @@ func (r *RollbackRemediation) success(req *Request, prev *v2.Snapshot) {
// If no matching snapshot is found, it creates a new snapshot and prepends it
// to the release history.
func observeRollback(obj *v2.HelmRelease) storage.ObserveFunc {
- return func(rls *helmrelease.Release) {
+ return func(rlsr helmrelease.Releaser) {
+ rls, ok := rlsr.(*helmreleasev1.Release)
+ if !ok {
+ return
+ }
for i := range obj.Status.History {
snap := obj.Status.History[i]
if snap.Targets(rls.Name, rls.Namespace, rls.Version) {
diff --git a/internal/reconcile/rollback_remediation_test.go b/internal/reconcile/rollback_remediation_test.go
index 89c4c0717..65f5f39d3 100644
--- a/internal/reconcile/rollback_remediation_test.go
+++ b/internal/reconcile/rollback_remediation_test.go
@@ -25,10 +25,11 @@ import (
"time"
. "github.com/onsi/gomega"
- helmrelease "helm.sh/helm/v3/pkg/release"
- helmreleaseutil "helm.sh/helm/v3/pkg/releaseutil"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ helmreleaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
@@ -89,14 +90,14 @@ func TestRollbackRemediation_Reconcile(t *testing.T) {
Name: mockReleaseName,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Namespace: namespace,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
Namespace: namespace,
}),
}
@@ -129,14 +130,14 @@ func TestRollbackRemediation_Reconcile(t *testing.T) {
Name: mockReleaseName,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Namespace: namespace,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
Namespace: namespace,
}),
}
@@ -163,14 +164,14 @@ func TestRollbackRemediation_Reconcile(t *testing.T) {
Name: mockReleaseName,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithFailingHook()),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Namespace: namespace,
}, testutil.ReleaseWithFailingHook()),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
Namespace: namespace,
}),
}
@@ -185,9 +186,9 @@ func TestRollbackRemediation_Reconcile(t *testing.T) {
},
expectConditions: []metav1.Condition{
*conditions.FalseCondition(meta.ReadyCondition, v2.RollbackFailedReason,
- "timed out waiting for the condition"),
+ "context deadline exceeded"),
*conditions.FalseCondition(v2.RemediatedCondition, v2.RollbackFailedReason,
- "timed out waiting for the condition"),
+ "context deadline exceeded"),
},
expectHistory: func(releases []*helmrelease.Release) v2.Snapshots {
return v2.Snapshots{
@@ -212,14 +213,14 @@ func TestRollbackRemediation_Reconcile(t *testing.T) {
Name: mockReleaseName,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Namespace: namespace,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
Namespace: namespace,
}),
}
@@ -261,14 +262,14 @@ func TestRollbackRemediation_Reconcile(t *testing.T) {
Name: mockReleaseName,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Namespace: namespace,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
Namespace: namespace,
}),
}
@@ -355,7 +356,7 @@ func TestRollbackRemediation_Reconcile(t *testing.T) {
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.expectConditions))
- releases, _ = store.History(mockReleaseName)
+ releases, _ = storeHistory(store, mockReleaseName)
helmreleaseutil.SortByRevision(releases)
if tt.expectHistory != nil {
@@ -431,7 +432,7 @@ func TestRollbackRemediation_failure(t *testing.T) {
eventRecorder: recorder,
}
req := &Request{Object: obj.DeepCopy()}
- r.failure(req, release.ObservedToSnapshot(release.ObserveRelease(prev)), mockLogBuffer(5, 10), err)
+ r.failure(req, release.ObservedToSnapshot(release.ObserveRelease(prev)), mockLogBuffer(), err)
expectSubStr := "Last Helm logs"
g.Expect(conditions.IsFalse(req.Object, v2.RemediatedCondition)).To(BeTrue())
@@ -456,7 +457,7 @@ func TestRollbackRemediation_success(t *testing.T) {
r := &RollbackRemediation{
eventRecorder: recorder,
}
- req := &Request{Object: &v2.HelmRelease{}, Values: map[string]interface{}{"foo": "bar"}}
+ req := &Request{Object: &v2.HelmRelease{}, Values: map[string]any{"foo": "bar"}}
r.success(req, release.ObservedToSnapshot(release.ObserveRelease(prev)))
expectMsg := fmt.Sprintf(fmtRollbackRemediationSuccess,
@@ -492,7 +493,7 @@ func Test_observeRollback(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusPendingRollback,
+ Status: helmreleasecommon.StatusPendingRollback,
})
observeRollback(obj)(rls)
expect := release.ObservedToSnapshot(release.ObserveRelease(rls))
@@ -509,7 +510,7 @@ func Test_observeRollback(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusFailed.String(),
+ Status: helmreleasecommon.StatusFailed.String(),
}
obj := &v2.HelmRelease{
Status: v2.HelmReleaseStatus{
@@ -522,7 +523,7 @@ func Test_observeRollback(t *testing.T) {
Name: latest.Name,
Namespace: latest.Namespace,
Version: latest.Version + 1,
- Status: helmrelease.StatusPendingRollback,
+ Status: helmreleasecommon.StatusPendingRollback,
})
expect := release.ObservedToSnapshot(release.ObserveRelease(rls))
@@ -540,13 +541,13 @@ func Test_observeRollback(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusFailed.String(),
+ Status: helmreleasecommon.StatusFailed.String(),
}
latest := &v2.Snapshot{
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 3,
- Status: helmrelease.StatusDeployed.String(),
+ Status: helmreleasecommon.StatusDeployed.String(),
}
obj := &v2.HelmRelease{
@@ -561,7 +562,7 @@ func Test_observeRollback(t *testing.T) {
Name: previous.Name,
Namespace: previous.Namespace,
Version: previous.Version,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
})
expect := release.ObservedToSnapshot(release.ObserveRelease(rls))
@@ -579,7 +580,7 @@ func Test_observeRollback(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusFailed.String(),
+ Status: helmreleasecommon.StatusFailed.String(),
TestHooks: &map[string]*v2.TestHookStatus{
"test-hook": {
Phase: helmrelease.HookPhaseSucceeded.String(),
@@ -590,7 +591,7 @@ func Test_observeRollback(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 3,
- Status: helmrelease.StatusDeployed.String(),
+ Status: helmreleasecommon.StatusDeployed.String(),
}
obj := &v2.HelmRelease{
@@ -605,7 +606,7 @@ func Test_observeRollback(t *testing.T) {
Name: previous.Name,
Namespace: previous.Namespace,
Version: previous.Version,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
})
expect := release.ObservedToSnapshot(release.ObserveRelease(rls))
expect.SetTestHooks(previous.GetTestHooks())
@@ -624,14 +625,14 @@ func Test_observeRollback(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusFailed.String(),
+ Status: helmreleasecommon.StatusFailed.String(),
OCIDigest: "sha256:fcdc2b0de1581a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6",
}
latest := &v2.Snapshot{
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 3,
- Status: helmrelease.StatusDeployed.String(),
+ Status: helmreleasecommon.StatusDeployed.String(),
OCIDigest: "sha256:aedc2b0de1576a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6",
}
@@ -647,7 +648,7 @@ func Test_observeRollback(t *testing.T) {
Name: previous.Name,
Namespace: previous.Namespace,
Version: previous.Version,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
})
obs := release.ObserveRelease(rls)
obs.OCIDigest = "sha256:fcdc2b0de1581a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6"
diff --git a/internal/reconcile/state.go b/internal/reconcile/state.go
index 8a6c8b21f..052e8b4a0 100644
--- a/internal/reconcile/state.go
+++ b/internal/reconcile/state.go
@@ -24,8 +24,9 @@ import (
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/conditions"
"github.com/fluxcd/pkg/ssa/jsondiff"
- "helm.sh/helm/v3/pkg/kube"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ "helm.sh/helm/v4/pkg/kube"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/fluxcd/helm-controller/internal/action"
@@ -106,7 +107,7 @@ func DetermineReleaseState(ctx context.Context, cfg *action.ConfigFactory, req *
// Confirm we have a release object to compare against.
if req.Object.Status.History.Len() == 0 {
- if rls.Info.Status == helmrelease.StatusUninstalled {
+ if rls.Info.Status == helmreleasecommon.StatusUninstalled {
return ReleaseState{Status: ReleaseStatusAbsent, Reason: "found uninstalled release in storage"}, nil
}
return ReleaseState{Status: ReleaseStatusUnmanaged, Reason: "found existing release in storage"}, err
@@ -130,11 +131,11 @@ func DetermineReleaseState(ctx context.Context, cfg *action.ConfigFactory, req *
// Further determine the state of the release based on the Helm release
// status, which can now be considered reliable.
switch rls.Info.Status {
- case helmrelease.StatusFailed:
+ case helmreleasecommon.StatusFailed:
return ReleaseState{Status: ReleaseStatusFailed}, nil
- case helmrelease.StatusUninstalled:
+ case helmreleasecommon.StatusUninstalled:
return ReleaseState{Status: ReleaseStatusAbsent, Reason: "found uninstalled release in storage"}, nil
- case helmrelease.StatusDeployed:
+ case helmreleasecommon.StatusDeployed:
// Verify the release is in sync with the desired configuration.
if err = action.VerifyRelease(rls, cur, req.Chart.Metadata, req.Values); err != nil {
switch err {
diff --git a/internal/reconcile/state_test.go b/internal/reconcile/state_test.go
index 3262067b8..8e7c9d415 100644
--- a/internal/reconcile/state_test.go
+++ b/internal/reconcile/state_test.go
@@ -22,11 +22,12 @@ import (
"testing"
. "github.com/onsi/gomega"
- helmchart "helm.sh/helm/v3/pkg/chart"
- helmchartutil "helm.sh/helm/v3/pkg/chartutil"
- helmrelease "helm.sh/helm/v3/pkg/release"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmchartutil "helm.sh/helm/v4/pkg/chart/common"
+ helmchart "helm.sh/helm/v4/pkg/chart/v2"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -62,9 +63,9 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
status: func(releases []*helmrelease.Release) v2.HelmReleaseStatus {
return v2.HelmReleaseStatus{
@@ -74,7 +75,7 @@ func Test_DetermineReleaseState(t *testing.T) {
}
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
want: ReleaseState{
Status: ReleaseStatusInSync,
},
@@ -95,7 +96,7 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}))),
},
@@ -112,7 +113,7 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}),
},
@@ -127,9 +128,9 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
status: func(releases []*helmrelease.Release) v2.HelmReleaseStatus {
cur := release.ObservedToSnapshot(release.ObserveRelease(releases[0]))
@@ -141,7 +142,7 @@ func Test_DetermineReleaseState(t *testing.T) {
}
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
want: ReleaseState{
Status: ReleaseStatusUnmanaged,
},
@@ -153,9 +154,9 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
status: func(releases []*helmrelease.Release) v2.HelmReleaseStatus {
cur := release.ObservedToSnapshot(release.ObserveRelease(releases[0]))
@@ -168,7 +169,7 @@ func Test_DetermineReleaseState(t *testing.T) {
}
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
want: ReleaseState{
Status: ReleaseStatusUnmanaged,
},
@@ -180,12 +181,12 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusPendingInstall,
+ Status: helmreleasecommon.StatusPendingInstall,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
want: ReleaseState{
Status: ReleaseStatusLocked,
},
@@ -197,9 +198,9 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
spec: func(spec *v2.HelmReleaseSpec) {
spec.Test = &v2.Test{
@@ -214,7 +215,7 @@ func Test_DetermineReleaseState(t *testing.T) {
}
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
want: ReleaseState{
Status: ReleaseStatusUntested,
},
@@ -226,7 +227,7 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Chart: testutil.BuildChart(),
}),
testutil.BuildRelease(
@@ -234,10 +235,10 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
},
- testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"}),
+ testutil.ReleaseWithConfig(map[string]any{"foo": "bar"}),
testutil.ReleaseWithHookExecution("failure-tests", []helmrelease.HookEvent{helmrelease.HookTest},
helmrelease.HookPhaseFailed),
),
@@ -259,7 +260,7 @@ func Test_DetermineReleaseState(t *testing.T) {
}
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
want: ReleaseState{
Status: ReleaseStatusFailed,
},
@@ -272,16 +273,16 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
},
- testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"}),
+ testutil.ReleaseWithConfig(map[string]any{"foo": "bar"}),
testutil.ReleaseWithHookExecution("failure-tests", []helmrelease.HookEvent{helmrelease.HookTest},
helmrelease.HookPhaseFailed),
),
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
spec: func(spec *v2.HelmReleaseSpec) {
spec.Test = &v2.Test{
Enable: true,
@@ -311,16 +312,16 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
},
- testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"}),
+ testutil.ReleaseWithConfig(map[string]any{"foo": "bar"}),
testutil.ReleaseWithHookExecution("failure-tests", []helmrelease.HookEvent{helmrelease.HookTest},
helmrelease.HookPhaseFailed),
),
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
spec: func(spec *v2.HelmReleaseSpec) {
spec.Test = &v2.Test{
Enable: true,
@@ -344,19 +345,19 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
Chart: testutil.BuildChart(),
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 2,
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{},
+ values: map[string]any{},
status: func(releases []*helmrelease.Release) v2.HelmReleaseStatus {
return v2.HelmReleaseStatus{
History: v2.Snapshots{
@@ -376,12 +377,12 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusUninstalled,
+ Status: helmreleasecommon.StatusUninstalled,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{},
+ values: map[string]any{},
status: func(releases []*helmrelease.Release) v2.HelmReleaseStatus {
return v2.HelmReleaseStatus{
History: v2.Snapshots{
@@ -400,9 +401,9 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusUninstalled,
+ Status: helmreleasecommon.StatusUninstalled,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
want: ReleaseState{
Status: ReleaseStatusAbsent,
@@ -415,9 +416,9 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
status: func(releases []*helmrelease.Release) v2.HelmReleaseStatus {
return v2.HelmReleaseStatus{
@@ -427,7 +428,7 @@ func Test_DetermineReleaseState(t *testing.T) {
}
},
chart: testutil.BuildChart(testutil.ChartWithName("other-name")),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
want: ReleaseState{
Status: ReleaseStatusOutOfSync,
},
@@ -439,9 +440,9 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
status: func(releases []*helmrelease.Release) v2.HelmReleaseStatus {
return v2.HelmReleaseStatus{
@@ -451,7 +452,7 @@ func Test_DetermineReleaseState(t *testing.T) {
}
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"bar": "foo"},
+ values: map[string]any{"bar": "foo"},
want: ReleaseState{
Status: ReleaseStatusOutOfSync,
},
@@ -463,9 +464,9 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
spec: func(spec *v2.HelmReleaseSpec) {
spec.PostRenderers = postRenderers2
@@ -486,7 +487,7 @@ func Test_DetermineReleaseState(t *testing.T) {
}
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
want: ReleaseState{
Status: ReleaseStatusOutOfSync,
},
@@ -498,9 +499,9 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
spec: func(spec *v2.HelmReleaseSpec) {
spec.PostRenderers = postRenderers2
@@ -521,7 +522,7 @@ func Test_DetermineReleaseState(t *testing.T) {
}
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
want: ReleaseState{
Status: ReleaseStatusInSync,
},
@@ -533,9 +534,9 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
spec: func(spec *v2.HelmReleaseSpec) {
spec.CommonMetadata = commonMetadata2
@@ -556,7 +557,7 @@ func Test_DetermineReleaseState(t *testing.T) {
}
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
want: ReleaseState{
Status: ReleaseStatusOutOfSync,
},
@@ -568,9 +569,9 @@ func Test_DetermineReleaseState(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})),
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})),
},
spec: func(spec *v2.HelmReleaseSpec) {
spec.CommonMetadata = commonMetadata2
@@ -591,7 +592,7 @@ func Test_DetermineReleaseState(t *testing.T) {
}
},
chart: testutil.BuildChart(),
- values: map[string]interface{}{"foo": "bar"},
+ values: map[string]any{"foo": "bar"},
want: ReleaseState{
Status: ReleaseStatusInSync,
},
@@ -667,17 +668,17 @@ func TestDetermineReleaseState_DriftDetection(t *testing.T) {
{
Type: jsondiff.DiffTypeCreate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "Secret",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "fixture",
"namespace": namespace,
"creationTimestamp": nil,
- "labels": map[string]interface{}{
+ "labels": map[string]any{
"app.kubernetes.io/managed-by": "Helm",
},
- "annotations": map[string]interface{}{
+ "annotations": map[string]any{
"meta.helm.sh/release-name": mockReleaseName,
"meta.helm.sh/release-namespace": namespace,
},
@@ -707,17 +708,17 @@ func TestDetermineReleaseState_DriftDetection(t *testing.T) {
{
Type: jsondiff.DiffTypeCreate,
DesiredObject: &unstructured.Unstructured{
- Object: map[string]interface{}{
+ Object: map[string]any{
"apiVersion": "v1",
"kind": "Secret",
- "metadata": map[string]interface{}{
+ "metadata": map[string]any{
"name": "fixture",
"namespace": namespace,
"creationTimestamp": nil,
- "labels": map[string]interface{}{
+ "labels": map[string]any{
"app.kubernetes.io/managed-by": "Helm",
},
- "annotations": map[string]interface{}{
+ "annotations": map[string]any{
"meta.helm.sh/release-name": mockReleaseName,
"meta.helm.sh/release-namespace": namespace,
},
@@ -763,7 +764,7 @@ func TestDetermineReleaseState_DriftDetection(t *testing.T) {
Name: mockReleaseName,
Namespace: releaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: chart,
})
diff --git a/internal/reconcile/suite_test.go b/internal/reconcile/suite_test.go
index a724aa6b1..81fb8714f 100644
--- a/internal/reconcile/suite_test.go
+++ b/internal/reconcile/suite_test.go
@@ -22,7 +22,9 @@ import (
"path/filepath"
"testing"
- "helm.sh/helm/v3/pkg/kube"
+ "helm.sh/helm/v4/pkg/kube"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/meta"
@@ -149,3 +151,15 @@ func (c namespaceClientConfig) Namespace() (string, bool, error) {
func (c namespaceClientConfig) ConfigAccess() clientcmd.ConfigAccess {
return nil
}
+
+func storeHistory(store *helmstorage.Storage, releaseName string) ([]*helmrelease.Release, error) {
+ releasers, err := store.History(releaseName)
+ if err != nil {
+ return nil, err
+ }
+ history := make([]*helmrelease.Release, 0, len(releasers))
+ for _, r := range releasers {
+ history = append(history, r.(*helmrelease.Release))
+ }
+ return history, nil
+}
diff --git a/internal/reconcile/test.go b/internal/reconcile/test.go
index db28a75e0..1f9d4e2fa 100644
--- a/internal/reconcile/test.go
+++ b/internal/reconcile/test.go
@@ -21,11 +21,10 @@ import (
"fmt"
"strings"
- "github.com/fluxcd/pkg/runtime/logger"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmrelease "helm.sh/helm/v4/pkg/release"
+ helmreleasev1 "helm.sh/helm/v4/pkg/release/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
- ctrl "sigs.k8s.io/controller-runtime"
"github.com/fluxcd/pkg/runtime/conditions"
@@ -73,7 +72,7 @@ func NewTest(cfg *action.ConfigFactory, recorder record.EventRecorder) *Test {
func (r *Test) Reconcile(ctx context.Context, req *Request) error {
var (
cur = req.Object.Status.History.Latest().DeepCopy()
- cfg = r.configFactory.Build(action.NewDebugLog(ctrl.LoggerFrom(ctx).V(logger.DebugLevel)), observeTest(req.Object))
+ cfg = r.configFactory.Build(action.NewDebugLogBuffer(ctx), observeTest(req.Object))
)
defer summarize(req)
@@ -193,7 +192,11 @@ func (r *Test) success(req *Request) {
// It only accepts test results for the latest release and updates the
// latest snapshot with the observed test results.
func observeTest(obj *v2.HelmRelease) storage.ObserveFunc {
- return func(rls *helmrelease.Release) {
+ return func(rlsr helmrelease.Releaser) {
+ rls, ok := rlsr.(*helmreleasev1.Release)
+ if !ok {
+ return
+ }
// Only accept test results for the latest release.
if !obj.Status.History.Latest().Targets(rls.Name, rls.Namespace, rls.Version) {
return
diff --git a/internal/reconcile/test_test.go b/internal/reconcile/test_test.go
index 516a5027e..c458acc18 100644
--- a/internal/reconcile/test_test.go
+++ b/internal/reconcile/test_test.go
@@ -24,10 +24,11 @@ import (
"time"
. "github.com/onsi/gomega"
- helmrelease "helm.sh/helm/v3/pkg/release"
- helmreleaseutil "helm.sh/helm/v3/pkg/releaseutil"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ helmreleaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
@@ -118,7 +119,7 @@ func TestTest_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}, testutil.ReleaseWithTestHook()),
}
},
@@ -149,7 +150,7 @@ func TestTest_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}),
}
@@ -181,7 +182,7 @@ func TestTest_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(testutil.ChartWithFailingTestHook()),
}, testutil.ReleaseWithFailingTestHook()),
}
@@ -197,9 +198,9 @@ func TestTest_Reconcile(t *testing.T) {
},
expectConditions: []metav1.Condition{
*conditions.FalseCondition(meta.ReadyCondition, v2.TestFailedReason,
- "timed out waiting for the condition"),
+ "ontext deadline exceeded"),
*conditions.FalseCondition(v2.TestSuccessCondition, v2.TestFailedReason,
- "timed out waiting for the condition"),
+ "ontext deadline exceeded"),
},
expectHistory: func(releases []*helmrelease.Release) v2.Snapshots {
withTests := release.ObservedToSnapshot(release.ObserveRelease(releases[0]))
@@ -218,7 +219,7 @@ func TestTest_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}, testutil.ReleaseWithTestHook()),
}
},
@@ -234,14 +235,14 @@ func TestTest_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}, testutil.ReleaseWithTestHook()),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -289,7 +290,7 @@ func TestTest_Reconcile(t *testing.T) {
ReleaseName: mockReleaseName,
TargetNamespace: releaseNamespace,
StorageNamespace: releaseNamespace,
- Timeout: &metav1.Duration{Duration: 100 * time.Millisecond},
+ Timeout: &metav1.Duration{Duration: 200 * time.Millisecond},
Test: &v2.Test{
Enable: true,
},
@@ -331,7 +332,7 @@ func TestTest_Reconcile(t *testing.T) {
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.expectConditions))
- releases, _ = store.History(mockReleaseName)
+ releases, _ = storeHistory(store, mockReleaseName)
helmreleaseutil.SortByRevision(releases)
if tt.expectHistory != nil {
diff --git a/internal/reconcile/uninstall.go b/internal/reconcile/uninstall.go
index 7c0a66e1b..add824f69 100644
--- a/internal/reconcile/uninstall.go
+++ b/internal/reconcile/uninstall.go
@@ -22,14 +22,14 @@ import (
"fmt"
"strings"
- helmrelease "helm.sh/helm/v3/pkg/release"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmrelease "helm.sh/helm/v4/pkg/release"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmreleasev1 "helm.sh/helm/v4/pkg/release/v1"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
- ctrl "sigs.k8s.io/controller-runtime"
"github.com/fluxcd/pkg/runtime/conditions"
- "github.com/fluxcd/pkg/runtime/logger"
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
@@ -81,8 +81,8 @@ func NewUninstall(cfg *action.ConfigFactory, recorder record.EventRecorder) *Uni
func (r *Uninstall) Reconcile(ctx context.Context, req *Request) error {
var (
cur = req.Object.Status.History.Latest().DeepCopy()
- logBuf = action.NewLogBuffer(action.NewDebugLog(ctrl.LoggerFrom(ctx).V(logger.DebugLevel)), 10)
- cfg = r.configFactory.Build(logBuf.Log, observeUninstall(req.Object))
+ logBuf = action.NewDebugLogBuffer(ctx)
+ cfg = r.configFactory.Build(logBuf, observeUninstall(req.Object))
)
defer summarize(req)
@@ -119,9 +119,15 @@ func (r *Uninstall) Reconcile(ctx context.Context, req *Request) error {
// The Helm uninstall action does always target the latest release. Before
// accepting results, we need to confirm this is actually the release we
// have recorded as latest.
- if res != nil && !release.ObserveRelease(res.Release).Targets(cur.Name, cur.Namespace, cur.Version) {
- err = fmt.Errorf("%w: uninstalled release %s/%s.v%d != current release %s",
- ErrReleaseMismatch, res.Release.Namespace, res.Release.Name, res.Release.Version, cur.FullReleaseName())
+ if res != nil {
+ rls, ok := res.Release.(*helmreleasev1.Release)
+ if !ok {
+ return fmt.Errorf("only the Chart API v2 is supported")
+ }
+ if !release.ObserveRelease(rls).Targets(cur.Name, cur.Namespace, cur.Version) {
+ err = fmt.Errorf("%w: uninstalled release %s/%s.v%d != current release %s",
+ ErrReleaseMismatch, rls.Namespace, rls.Name, rls.Version, cur.FullReleaseName())
+ }
}
// The Helm uninstall action may return without an error while the update
@@ -130,7 +136,7 @@ func (r *Uninstall) Reconcile(ctx context.Context, req *Request) error {
// An exception is made for the case where the release was already marked
// as uninstalled, which would only result in the release object getting
// removed from the storage.
- if s := helmrelease.Status(cur.Status); s != helmrelease.StatusUninstalled {
+ if s := helmreleasecommon.Status(cur.Status); s != helmreleasecommon.StatusUninstalled {
err = fmt.Errorf("uninstall completed with error: %w", ErrNoStorageUpdate)
}
}
@@ -217,7 +223,11 @@ func observeUninstall(obj *v2.HelmRelease) storage.ObserveFunc {
// But like during rollback, Helm may supersede any previous releases.
// As such, we need to update all releases we have in our history.
// xref: https://github.com/helm/helm/pull/12564
- return func(rls *helmrelease.Release) {
+ return func(rlsr helmrelease.Releaser) {
+ rls, ok := rlsr.(*helmreleasev1.Release)
+ if !ok {
+ return
+ }
for i := range obj.Status.History {
snap := obj.Status.History[i]
if snap.Targets(rls.Name, rls.Namespace, rls.Version) {
diff --git a/internal/reconcile/uninstall_remediation.go b/internal/reconcile/uninstall_remediation.go
index c0a01e645..596abdc4c 100644
--- a/internal/reconcile/uninstall_remediation.go
+++ b/internal/reconcile/uninstall_remediation.go
@@ -22,12 +22,11 @@ import (
"fmt"
"strings"
+ helmreleasev1 "helm.sh/helm/v4/pkg/release/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
- ctrl "sigs.k8s.io/controller-runtime"
"github.com/fluxcd/pkg/runtime/conditions"
- "github.com/fluxcd/pkg/runtime/logger"
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
@@ -81,8 +80,8 @@ func NewUninstallRemediation(cfg *action.ConfigFactory, recorder record.EventRec
func (r *UninstallRemediation) Reconcile(ctx context.Context, req *Request) error {
var (
cur = req.Object.Status.History.Latest().DeepCopy()
- logBuf = action.NewLogBuffer(action.NewDebugLog(ctrl.LoggerFrom(ctx).V(logger.DebugLevel)), 10)
- cfg = r.configFactory.Build(logBuf.Log, observeUninstall(req.Object))
+ logBuf = action.NewDebugLogBuffer(ctx)
+ cfg = r.configFactory.Build(logBuf, observeUninstall(req.Object))
)
// Require current to run uninstall.
@@ -96,9 +95,15 @@ func (r *UninstallRemediation) Reconcile(ctx context.Context, req *Request) erro
// The Helm uninstall action does always target the latest release. Before
// accepting results, we need to confirm this is actually the release we
// have recorded as latest.
- if res != nil && !release.ObserveRelease(res.Release).Targets(cur.Name, cur.Namespace, cur.Version) {
- err = fmt.Errorf("%w: uninstalled release %s/%s.v%d != current release %s",
- ErrReleaseMismatch, res.Release.Namespace, res.Release.Name, res.Release.Version, cur.FullReleaseName())
+ if res != nil {
+ rls, ok := res.Release.(*helmreleasev1.Release)
+ if !ok {
+ return fmt.Errorf("only the Chart API v2 is supported")
+ }
+ if !release.ObserveRelease(rls).Targets(cur.Name, cur.Namespace, cur.Version) {
+ err = fmt.Errorf("%w: uninstalled release %s/%s.v%d != current release %s",
+ ErrReleaseMismatch, rls.Namespace, rls.Name, rls.Version, cur.FullReleaseName())
+ }
}
// The Helm uninstall action may return without an error while the update
diff --git a/internal/reconcile/uninstall_remediation_test.go b/internal/reconcile/uninstall_remediation_test.go
index 3b782cf85..02e921b37 100644
--- a/internal/reconcile/uninstall_remediation_test.go
+++ b/internal/reconcile/uninstall_remediation_test.go
@@ -24,10 +24,11 @@ import (
"time"
. "github.com/onsi/gomega"
- helmrelease "helm.sh/helm/v3/pkg/release"
- "helm.sh/helm/v3/pkg/releaseutil"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
@@ -88,7 +89,7 @@ func TestUninstallRemediation_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -122,7 +123,7 @@ func TestUninstallRemediation_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(testutil.ChartWithFailingHook()),
}, testutil.ReleaseWithFailingHook()),
}
@@ -141,7 +142,7 @@ func TestUninstallRemediation_Reconcile(t *testing.T) {
},
expectConditions: []metav1.Condition{
*conditions.FalseCondition(v2.RemediatedCondition, v2.UninstallFailedReason,
- "uninstallation completed with 1 error(s): 1 error occurred:\n\t* timed out waiting for the condition"),
+ "context deadline exceeded"),
},
expectHistory: func(releases []*helmrelease.Release) v2.Snapshots {
return v2.Snapshots{
@@ -170,7 +171,7 @@ func TestUninstallRemediation_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -217,7 +218,7 @@ func TestUninstallRemediation_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}),
}
@@ -248,7 +249,7 @@ func TestUninstallRemediation_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -264,14 +265,14 @@ func TestUninstallRemediation_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}, testutil.ReleaseWithTestHook()),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -322,7 +323,7 @@ func TestUninstallRemediation_Reconcile(t *testing.T) {
ReleaseName: mockReleaseName,
TargetNamespace: releaseNamespace,
StorageNamespace: releaseNamespace,
- Timeout: &metav1.Duration{Duration: 100 * time.Millisecond},
+ Timeout: &metav1.Duration{Duration: 200 * time.Millisecond},
},
}
if tt.spec != nil {
@@ -361,7 +362,7 @@ func TestUninstallRemediation_Reconcile(t *testing.T) {
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.expectConditions))
- releases, _ = store.History(mockReleaseName)
+ releases, _ = storeHistory(store, mockReleaseName)
releaseutil.SortByRevision(releases)
if tt.expectHistory != nil {
@@ -438,7 +439,7 @@ func TestUninstallRemediation_failure(t *testing.T) {
eventRecorder: recorder,
}
req := &Request{Object: obj.DeepCopy()}
- r.failure(req, mockLogBuffer(5, 10), err)
+ r.failure(req, mockLogBuffer(), err)
expectSubStr := "Last Helm logs"
g.Expect(conditions.IsFalse(req.Object, v2.RemediatedCondition)).To(BeTrue())
diff --git a/internal/reconcile/uninstall_test.go b/internal/reconcile/uninstall_test.go
index 88d030534..c88c757e3 100644
--- a/internal/reconcile/uninstall_test.go
+++ b/internal/reconcile/uninstall_test.go
@@ -24,10 +24,11 @@ import (
"time"
. "github.com/onsi/gomega"
- helmrelease "helm.sh/helm/v3/pkg/release"
- "helm.sh/helm/v3/pkg/releaseutil"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ releaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
@@ -90,7 +91,7 @@ func TestUninstall_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -126,7 +127,7 @@ func TestUninstall_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(testutil.ChartWithFailingHook()),
}, testutil.ReleaseWithFailingHook()),
}
@@ -145,9 +146,9 @@ func TestUninstall_Reconcile(t *testing.T) {
},
expectConditions: []metav1.Condition{
*conditions.FalseCondition(meta.ReadyCondition, v2.UninstallFailedReason,
- "uninstallation completed with 1 error(s): 1 error occurred:\n\t* timed out waiting for the condition"),
+ "context deadline exceeded"),
*conditions.FalseCondition(v2.ReleasedCondition, v2.UninstallFailedReason,
- "uninstallation completed with 1 error(s): 1 error occurred:\n\t* timed out waiting for the condition"),
+ "context deadline exceeded"),
},
expectHistory: func(namespace string, releases []*helmrelease.Release) v2.Snapshots {
return v2.Snapshots{
@@ -155,7 +156,7 @@ func TestUninstall_Reconcile(t *testing.T) {
}
},
expectFailures: 1,
- wantErrString: "timed out waiting",
+ wantErrString: "context deadline exceeded",
},
{
name: "uninstall failure without storage update",
@@ -177,7 +178,7 @@ func TestUninstall_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -226,7 +227,7 @@ func TestUninstall_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: namespace,
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
Chart: testutil.BuildChart(),
}),
}
@@ -261,7 +262,7 @@ func TestUninstall_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -277,14 +278,14 @@ func TestUninstall_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}, testutil.ReleaseWithTestHook()),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -334,7 +335,7 @@ func TestUninstall_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -366,7 +367,7 @@ func TestUninstall_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusUninstalled,
+ Status: helmreleasecommon.StatusUninstalled,
}),
}
},
@@ -389,7 +390,7 @@ func TestUninstall_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusUninstalled,
+ Status: helmreleasecommon.StatusUninstalled,
})
return v2.Snapshots{
release.ObservedToSnapshot(release.ObserveRelease(rls)),
@@ -405,7 +406,7 @@ func TestUninstall_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
- Status: helmrelease.StatusUninstalled,
+ Status: helmreleasecommon.StatusUninstalled,
}),
}
},
@@ -456,7 +457,7 @@ func TestUninstall_Reconcile(t *testing.T) {
ReleaseName: mockReleaseName,
TargetNamespace: releaseNamespace,
StorageNamespace: releaseNamespace,
- Timeout: &metav1.Duration{Duration: 100 * time.Millisecond},
+ Timeout: &metav1.Duration{Duration: 200 * time.Millisecond},
},
}
if tt.spec != nil {
@@ -497,7 +498,7 @@ func TestUninstall_Reconcile(t *testing.T) {
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.expectConditions))
- releases, _ = store.History(mockReleaseName)
+ releases, _ = storeHistory(store, mockReleaseName)
releaseutil.SortByRevision(releases)
if tt.expectHistory != nil {
@@ -574,7 +575,7 @@ func TestUninstall_failure(t *testing.T) {
eventRecorder: recorder,
}
req := &Request{Object: obj.DeepCopy()}
- r.failure(req, mockLogBuffer(5, 10), err)
+ r.failure(req, mockLogBuffer(), err)
expectSubStr := "Last Helm logs"
g.Expect(conditions.IsFalse(req.Object, v2.ReleasedCondition)).To(BeTrue())
@@ -643,7 +644,7 @@ func Test_observeUninstall(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed.String(),
+ Status: helmreleasecommon.StatusDeployed.String(),
}
obj := &v2.HelmRelease{
Status: v2.HelmReleaseStatus{
@@ -656,7 +657,7 @@ func Test_observeUninstall(t *testing.T) {
Name: current.Name,
Namespace: current.Namespace,
Version: current.Version,
- Status: helmrelease.StatusUninstalled,
+ Status: helmreleasecommon.StatusUninstalled,
})
expect := release.ObservedToSnapshot(release.ObserveRelease(rls))
@@ -678,7 +679,7 @@ func Test_observeUninstall(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusUninstalling,
+ Status: helmreleasecommon.StatusUninstalling,
})
observeUninstall(obj)(rls)
@@ -692,7 +693,7 @@ func Test_observeUninstall(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed.String(),
+ Status: helmreleasecommon.StatusDeployed.String(),
}
obj := &v2.HelmRelease{
Status: v2.HelmReleaseStatus{
@@ -705,7 +706,7 @@ func Test_observeUninstall(t *testing.T) {
Name: current.Name,
Namespace: current.Namespace,
Version: current.Version + 1,
- Status: helmrelease.StatusUninstalled,
+ Status: helmreleasecommon.StatusUninstalled,
})
observeUninstall(obj)(rls)
@@ -720,7 +721,7 @@ func Test_observeUninstall(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusDeployed.String(),
+ Status: helmreleasecommon.StatusDeployed.String(),
OCIDigest: "sha256:fcdc2b0de1581a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6",
}
obj := &v2.HelmRelease{
@@ -734,7 +735,7 @@ func Test_observeUninstall(t *testing.T) {
Name: current.Name,
Namespace: current.Namespace,
Version: current.Version,
- Status: helmrelease.StatusUninstalled,
+ Status: helmreleasecommon.StatusUninstalled,
})
obs := release.ObserveRelease(rls)
obs.OCIDigest = "sha256:fcdc2b0de1581a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6"
diff --git a/internal/reconcile/unlock.go b/internal/reconcile/unlock.go
index af32724f6..a12ca26ec 100644
--- a/internal/reconcile/unlock.go
+++ b/internal/reconcile/unlock.go
@@ -22,7 +22,9 @@ import (
"fmt"
"strings"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmrelease "helm.sh/helm/v4/pkg/release"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmreleasev1 "helm.sh/helm/v4/pkg/release/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
@@ -80,7 +82,7 @@ func (r *Unlock) Reconcile(_ context.Context, req *Request) error {
cur := processCurrentSnaphot(req.Object, rls)
if status := rls.Info.Status; status.IsPending() {
// Update pending status to failed and persist.
- rls.SetStatus(helmrelease.StatusFailed, fmt.Sprintf("Release unlocked from stale '%s' state", status.String()))
+ rls.SetStatus(helmreleasecommon.StatusFailed, fmt.Sprintf("Release unlocked from stale '%s' state", status.String()))
if err = cfg.Releases.Update(rls); err != nil {
r.failure(req, cur, status, err)
return err
@@ -108,7 +110,7 @@ const (
// failure records the failure of an unlock action in the status of the given
// Request.Object by marking ReleasedCondition=False and increasing the failure
// counter. In addition, it emits a warning event for the Request.Object.
-func (r *Unlock) failure(req *Request, cur *v2.Snapshot, status helmrelease.Status, err error) {
+func (r *Unlock) failure(req *Request, cur *v2.Snapshot, status helmreleasecommon.Status, err error) {
// Compose failure message.
msg := fmt.Sprintf(fmtUnlockFailure, cur.FullReleaseName(), cur.VersionedChartName(), status.String(), strings.TrimSpace(err.Error()))
@@ -128,7 +130,7 @@ func (r *Unlock) failure(req *Request, cur *v2.Snapshot, status helmrelease.Stat
// success records the success of an unlock action in the status of the given
// Request.Object by marking ReleasedCondition=False and emitting an event.
-func (r *Unlock) success(req *Request, cur *v2.Snapshot, status helmrelease.Status) {
+func (r *Unlock) success(req *Request, cur *v2.Snapshot, status helmreleasecommon.Status) {
// Compose success message.
msg := fmt.Sprintf(fmtUnlockSuccess, cur.FullReleaseName(), cur.VersionedChartName(), status.String())
@@ -150,7 +152,11 @@ func (r *Unlock) success(req *Request, cur *v2.Snapshot, status helmrelease.Stat
// It updates the snapshot of a release when an unlock action is observed for
// that release.
func observeUnlock(obj *v2.HelmRelease) storage.ObserveFunc {
- return func(rls *helmrelease.Release) {
+ return func(rlsr helmrelease.Releaser) {
+ rls, ok := rlsr.(*helmreleasev1.Release)
+ if !ok {
+ return
+ }
for i := range obj.Status.History {
snap := obj.Status.History[i]
if snap.Targets(rls.Name, rls.Namespace, rls.Version) {
@@ -164,7 +170,7 @@ func observeUnlock(obj *v2.HelmRelease) storage.ObserveFunc {
// processCurrentSnaphot processes the current snapshot based on a Helm release.
// It also looks for the OCIDigest in the corresponding v2.HelmRelease history and
// updates the current snapshot with the OCIDigest if found.
-func processCurrentSnaphot(obj *v2.HelmRelease, rls *helmrelease.Release) *v2.Snapshot {
+func processCurrentSnaphot(obj *v2.HelmRelease, rls *helmreleasev1.Release) *v2.Snapshot {
cur := release.ObservedToSnapshot(release.ObserveRelease(rls))
for i := range obj.Status.History {
snap := obj.Status.History[i]
diff --git a/internal/reconcile/unlock_test.go b/internal/reconcile/unlock_test.go
index 606c82890..1fb22bdaa 100644
--- a/internal/reconcile/unlock_test.go
+++ b/internal/reconcile/unlock_test.go
@@ -24,10 +24,11 @@ import (
"time"
. "github.com/onsi/gomega"
- helmrelease "helm.sh/helm/v3/pkg/release"
- helmreleaseutil "helm.sh/helm/v3/pkg/releaseutil"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ helmreleaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
@@ -89,7 +90,7 @@ func TestUnlock_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusPendingInstall,
+ Status: helmreleasecommon.StatusPendingInstall,
}),
}
},
@@ -125,7 +126,7 @@ func TestUnlock_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusPendingRollback,
+ Status: helmreleasecommon.StatusPendingRollback,
}),
}
},
@@ -157,7 +158,7 @@ func TestUnlock_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
}),
}
},
@@ -168,7 +169,7 @@ func TestUnlock_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: releases[0].Namespace,
Version: 1,
- Status: helmrelease.StatusFailed.String(),
+ Status: helmreleasecommon.StatusFailed.String(),
},
},
}
@@ -180,7 +181,7 @@ func TestUnlock_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: releases[0].Namespace,
Version: 1,
- Status: helmrelease.StatusFailed.String(),
+ Status: helmreleasecommon.StatusFailed.String(),
},
}
},
@@ -194,7 +195,7 @@ func TestUnlock_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 2,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -205,7 +206,7 @@ func TestUnlock_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: releases[0].Namespace,
Version: releases[0].Version - 1,
- Status: helmrelease.StatusPendingInstall.String(),
+ Status: helmreleasecommon.StatusPendingInstall.String(),
},
},
}
@@ -216,7 +217,7 @@ func TestUnlock_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: releases[0].Namespace,
Version: releases[0].Version - 1,
- Status: helmrelease.StatusPendingInstall.String(),
+ Status: helmreleasecommon.StatusPendingInstall.String(),
},
}
},
@@ -229,7 +230,7 @@ func TestUnlock_Reconcile(t *testing.T) {
&v2.Snapshot{
Name: mockReleaseName,
Version: 1,
- Status: helmrelease.StatusFailed.String(),
+ Status: helmreleasecommon.StatusFailed.String(),
},
},
}
@@ -240,7 +241,7 @@ func TestUnlock_Reconcile(t *testing.T) {
&v2.Snapshot{
Name: mockReleaseName,
Version: 1,
- Status: helmrelease.StatusFailed.String(),
+ Status: helmreleasecommon.StatusFailed.String(),
},
}
},
@@ -260,7 +261,7 @@ func TestUnlock_Reconcile(t *testing.T) {
Namespace: namespace,
Version: 1,
Chart: testutil.BuildChart(),
- Status: helmrelease.StatusPendingInstall,
+ Status: helmreleasecommon.StatusPendingInstall,
}),
}
},
@@ -270,7 +271,7 @@ func TestUnlock_Reconcile(t *testing.T) {
&v2.Snapshot{
Name: mockReleaseName,
Version: 1,
- Status: helmrelease.StatusFailed.String(),
+ Status: helmreleasecommon.StatusFailed.String(),
},
},
}
@@ -282,7 +283,7 @@ func TestUnlock_Reconcile(t *testing.T) {
&v2.Snapshot{
Name: mockReleaseName,
Version: 1,
- Status: helmrelease.StatusFailed.String(),
+ Status: helmreleasecommon.StatusFailed.String(),
},
}
},
@@ -349,7 +350,7 @@ func TestUnlock_Reconcile(t *testing.T) {
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.expectConditions))
- releases, _ = store.History(mockReleaseName)
+ releases, _ = storeHistory(store, mockReleaseName)
helmreleaseutil.SortByRevision(releases)
if tt.expectHistory != nil {
@@ -376,7 +377,7 @@ func TestUnlock_failure(t *testing.T) {
Version: 4,
})
obj = &v2.HelmRelease{}
- status = helmrelease.StatusPendingInstall
+ status = helmreleasecommon.StatusPendingInstall
err = fmt.Errorf("unlock error")
)
@@ -424,7 +425,7 @@ func TestUnlock_success(t *testing.T) {
Version: 4,
})
obj = &v2.HelmRelease{}
- status = helmrelease.StatusPendingInstall
+ status = helmreleasecommon.StatusPendingInstall
)
recorder := testutil.NewFakeRecorder(10, false)
@@ -475,7 +476,7 @@ func TestUnlock_withOCIDigest(t *testing.T) {
Namespace: releaseNamespace,
Chart: testutil.BuildChart(),
Version: 4,
- Status: helmrelease.StatusPendingInstall,
+ Status: helmreleasecommon.StatusPendingInstall,
})
obs := release.ObserveRelease(rls)
@@ -520,7 +521,7 @@ func TestUnlock_withOCIDigest(t *testing.T) {
*conditions.FalseCondition(v2.ReleasedCondition, "PendingRelease", "Unlocked Helm release"),
}))
- releases, _ := store.History(mockReleaseName)
+ releases, _ := storeHistory(store, mockReleaseName)
helmreleaseutil.SortByRevision(releases)
expected := release.ObserveRelease(releases[0])
expected.OCIDigest = "sha256:fcdc2b0de1581a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6"
@@ -561,7 +562,7 @@ func Test_observeUnlock(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusPendingRollback.String(),
+ Status: helmreleasecommon.StatusPendingRollback.String(),
},
},
},
@@ -570,7 +571,7 @@ func Test_observeUnlock(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
})
expect := release.ObservedToSnapshot(release.ObserveRelease(rls))
observeUnlock(obj)(rls)
@@ -590,7 +591,7 @@ func Test_observeUnlock(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusPendingRollback.String(),
+ Status: helmreleasecommon.StatusPendingRollback.String(),
OCIDigest: "sha256:fcdc2b0de1581a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6",
},
},
@@ -600,7 +601,7 @@ func Test_observeUnlock(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
})
obs := release.ObserveRelease(rls)
obs.OCIDigest = "sha256:fcdc2b0de1581a3633ada4afee3f918f6eaa5b5ab38c3fef03d5b48d3f85d9f6"
@@ -620,7 +621,7 @@ func Test_observeUnlock(t *testing.T) {
Name: mockReleaseName,
Namespace: mockReleaseNamespace,
Version: 1,
- Status: helmrelease.StatusFailed,
+ Status: helmreleasecommon.StatusFailed,
})
observeUnlock(obj)(rls)
diff --git a/internal/reconcile/upgrade.go b/internal/reconcile/upgrade.go
index ea4f580ef..7858011b2 100644
--- a/internal/reconcile/upgrade.go
+++ b/internal/reconcile/upgrade.go
@@ -25,11 +25,9 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
- ctrl "sigs.k8s.io/controller-runtime"
"github.com/fluxcd/pkg/chartutil"
"github.com/fluxcd/pkg/runtime/conditions"
- "github.com/fluxcd/pkg/runtime/logger"
v2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/fluxcd/helm-controller/internal/action"
@@ -67,9 +65,9 @@ func NewUpgrade(cfg *action.ConfigFactory, recorder record.EventRecorder) *Upgra
func (r *Upgrade) Reconcile(ctx context.Context, req *Request) error {
var (
- logBuf = action.NewLogBuffer(action.NewDebugLog(ctrl.LoggerFrom(ctx).V(logger.DebugLevel)), 10)
+ logBuf = action.NewDebugLogBuffer(ctx)
obsReleases = make(observedReleases)
- cfg = r.configFactory.Build(logBuf.Log, observeRelease(obsReleases))
+ cfg = r.configFactory.Build(logBuf, observeRelease(obsReleases))
startTime = time.Now()
)
diff --git a/internal/reconcile/upgrade_test.go b/internal/reconcile/upgrade_test.go
index 47ab11f08..241d53a00 100644
--- a/internal/reconcile/upgrade_test.go
+++ b/internal/reconcile/upgrade_test.go
@@ -24,12 +24,13 @@ import (
"time"
. "github.com/onsi/gomega"
- helmchart "helm.sh/helm/v3/pkg/chart"
- helmchartutil "helm.sh/helm/v3/pkg/chartutil"
- helmrelease "helm.sh/helm/v3/pkg/release"
- helmreleaseutil "helm.sh/helm/v3/pkg/releaseutil"
- helmstorage "helm.sh/helm/v3/pkg/storage"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmchartutil "helm.sh/helm/v4/pkg/chart/common"
+ helmchart "helm.sh/helm/v4/pkg/chart/v2"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
+ helmreleaseutil "helm.sh/helm/v4/pkg/release/v1/util"
+ helmstorage "helm.sh/helm/v4/pkg/storage"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -96,7 +97,7 @@ func TestUpgrade_Reconcile(t *testing.T) {
Namespace: namespace,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -128,7 +129,7 @@ func TestUpgrade_Reconcile(t *testing.T) {
Namespace: namespace,
Chart: testutil.BuildChart(),
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -142,9 +143,9 @@ func TestUpgrade_Reconcile(t *testing.T) {
},
expectConditions: []metav1.Condition{
*conditions.FalseCondition(meta.ReadyCondition, v2.UpgradeFailedReason,
- "post-upgrade hooks failed: 1 error occurred:\n\t* timed out waiting for the condition"),
+ "context deadline exceeded"),
*conditions.FalseCondition(v2.ReleasedCondition, v2.UpgradeFailedReason,
- "post-upgrade hooks failed: 1 error occurred:\n\t* timed out waiting for the condition"),
+ "context deadline exceeded"),
},
expectHistory: func(releases []*helmrelease.Release) v2.Snapshots {
return v2.Snapshots{
@@ -170,7 +171,7 @@ func TestUpgrade_Reconcile(t *testing.T) {
Namespace: namespace,
Chart: testutil.BuildChart(),
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -212,7 +213,7 @@ func TestUpgrade_Reconcile(t *testing.T) {
Namespace: namespace,
Chart: testutil.BuildChart(),
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -248,7 +249,7 @@ func TestUpgrade_Reconcile(t *testing.T) {
Namespace: namespace,
Chart: testutil.BuildChart(),
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -277,14 +278,14 @@ func TestUpgrade_Reconcile(t *testing.T) {
Namespace: namespace,
Chart: testutil.BuildChart(),
Version: 1,
- Status: helmrelease.StatusSuperseded,
+ Status: helmreleasecommon.StatusSuperseded,
}),
testutil.BuildRelease(&helmrelease.MockReleaseOptions{
Name: mockReleaseName,
Namespace: namespace,
Chart: testutil.BuildChart(),
Version: 2,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -296,7 +297,7 @@ func TestUpgrade_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: releases[0].Namespace,
Version: 1,
- Status: helmrelease.StatusDeployed.String(),
+ Status: helmreleasecommon.StatusDeployed.String(),
},
},
}
@@ -314,7 +315,7 @@ func TestUpgrade_Reconcile(t *testing.T) {
Name: mockReleaseName,
Namespace: releases[0].Namespace,
Version: 1,
- Status: helmrelease.StatusDeployed.String(),
+ Status: helmreleasecommon.StatusDeployed.String(),
},
}
},
@@ -328,7 +329,7 @@ func TestUpgrade_Reconcile(t *testing.T) {
Namespace: namespace,
Chart: testutil.BuildChart(),
Version: 2,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
},
@@ -376,7 +377,7 @@ func TestUpgrade_Reconcile(t *testing.T) {
ReleaseName: mockReleaseName,
TargetNamespace: releaseNamespace,
StorageNamespace: releaseNamespace,
- Timeout: &metav1.Duration{Duration: 100 * time.Millisecond},
+ Timeout: &metav1.Duration{Duration: 200 * time.Millisecond},
},
}
if tt.spec != nil {
@@ -417,7 +418,7 @@ func TestUpgrade_Reconcile(t *testing.T) {
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.expectConditions))
- releases, _ = store.History(mockReleaseName)
+ releases, _ = storeHistory(store, mockReleaseName)
helmreleaseutil.SortByRevision(releases)
if tt.expectHistory != nil {
@@ -447,7 +448,7 @@ func TestUpgrade_Reconcile_withSubchartWithCRDs(t *testing.T) {
Namespace: namespace,
Chart: testutil.BuildChart(testutil.ChartWithTestHook()),
Version: 1,
- Status: helmrelease.StatusDeployed,
+ Status: helmreleasecommon.StatusDeployed,
}),
}
}
@@ -559,7 +560,7 @@ func TestUpgrade_Reconcile_withSubchartWithCRDs(t *testing.T) {
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(expectConditions))
- releases, _ = store.History(mockReleaseName)
+ releases, _ = storeHistory(store, mockReleaseName)
helmreleaseutil.SortByRevision(releases)
g.Expect(obj.Status.History).To(testutil.Equal(expectHistory(releases)))
@@ -625,7 +626,7 @@ func TestUpgrade_failure(t *testing.T) {
eventRecorder: recorder,
}
- req := &Request{Object: obj.DeepCopy(), Chart: chrt, Values: map[string]interface{}{"foo": "bar"}}
+ req := &Request{Object: obj.DeepCopy(), Chart: chrt, Values: map[string]any{"foo": "bar"}}
r.failure(req, nil, err)
expectMsg := fmt.Sprintf(fmtUpgradeFailure, mockReleaseNamespace, mockReleaseName, chrt.Name(),
@@ -660,7 +661,7 @@ func TestUpgrade_failure(t *testing.T) {
eventRecorder: recorder,
}
req := &Request{Object: obj.DeepCopy(), Chart: chrt}
- r.failure(req, mockLogBuffer(5, 10), err)
+ r.failure(req, mockLogBuffer(), err)
expectSubStr := "Last Helm logs"
g.Expect(conditions.IsFalse(req.Object, v2.ReleasedCondition)).To(BeTrue())
diff --git a/internal/release/decode_test.go b/internal/release/decode_test.go
index 1ea41c237..c48267994 100644
--- a/internal/release/decode_test.go
+++ b/internal/release/decode_test.go
@@ -23,7 +23,7 @@ import (
"encoding/json"
"io"
- rspb "helm.sh/helm/v3/pkg/release"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
)
var (
@@ -38,7 +38,7 @@ var (
// It is copied over from the Helm project to be able to deal
// with encoded releases.
// Ref: https://github.com/helm/helm/blob/v3.9.0/pkg/storage/driver/util.go#L56
-func decodeRelease(data string) (*rspb.Release, error) {
+func decodeRelease(data string) (*helmrelease.Release, error) {
// base64 decode string
b, err := b64.DecodeString(data)
if err != nil {
@@ -61,7 +61,7 @@ func decodeRelease(data string) (*rspb.Release, error) {
b = b2
}
- var rls rspb.Release
+ var rls helmrelease.Release
// unmarshal release object bytes
if err := json.Unmarshal(b, &rls); err != nil {
return nil, err
diff --git a/internal/release/digest_test.go b/internal/release/digest_test.go
index c340ca965..a721ace09 100644
--- a/internal/release/digest_test.go
+++ b/internal/release/digest_test.go
@@ -37,7 +37,7 @@ func TestDigest(t *testing.T) {
rel: Observation{
Name: "foo",
},
- exp: "sha256:91b6773f7696d3eb405708a07e2daedc6e69664dabac8e10af7d570d09f947d5",
+ exp: "sha256:f1498f27a16a09cd7e1ee610d924df065c03d30035638babc95dd9a8d412ce4d",
},
}
for _, tt := range tests {
diff --git a/internal/release/observation.go b/internal/release/observation.go
index fa8088914..2ab658625 100644
--- a/internal/release/observation.go
+++ b/internal/release/observation.go
@@ -21,8 +21,8 @@ import (
"io"
"github.com/mitchellh/copystructure"
- "helm.sh/helm/v3/pkg/chart"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
@@ -71,7 +71,7 @@ type Observation struct {
ChartMetadata chart.Metadata `json:"chartMetadata"`
// Config is the set of extra Values added to the chart.
// These values override the default values inside the chart.
- Config map[string]interface{} `json:"config"`
+ Config map[string]any `json:"config"`
// Manifest is the string representation of the rendered template.
Manifest string `json:"manifest"`
// Hooks are all the hooks declared for this release, and the current
@@ -126,13 +126,15 @@ func ObserveRelease(rel *helmrelease.Release, filter ...DataFilter) Observation
if rel.Chart != nil && rel.Chart.Metadata != nil {
if v, err := copystructure.Copy(rel.Chart.Metadata); err == nil {
- obsRel.ChartMetadata = *v.(*chart.Metadata)
+ if vTyped, ok := v.(*chart.Metadata); ok {
+ obsRel.ChartMetadata = *vTyped
+ }
}
}
if len(rel.Config) > 0 {
if v, err := copystructure.Copy(rel.Config); err == nil {
- obsRel.Config = v.(map[string]interface{})
+ obsRel.Config = v.(map[string]any)
}
}
@@ -165,9 +167,9 @@ func ObservedToSnapshot(rls Observation) *v2.Snapshot {
ChartName: rls.ChartMetadata.Name,
ChartVersion: rls.ChartMetadata.Version,
ConfigDigest: chartutil.DigestValues(digest.Canonical, rls.Config).String(),
- FirstDeployed: metav1.NewTime(rls.Info.FirstDeployed.Time),
- LastDeployed: metav1.NewTime(rls.Info.LastDeployed.Time),
- Deleted: metav1.NewTime(rls.Info.Deleted.Time),
+ FirstDeployed: metav1.NewTime(rls.Info.FirstDeployed),
+ LastDeployed: metav1.NewTime(rls.Info.LastDeployed),
+ Deleted: metav1.NewTime(rls.Info.Deleted),
Status: rls.Info.Status.String(),
OCIDigest: rls.OCIDigest,
}
@@ -181,8 +183,8 @@ func TestHooksFromRelease(rls *helmrelease.Release) map[string]*v2.TestHookStatu
var h *v2.TestHookStatus
if v != nil {
h = &v2.TestHookStatus{
- LastStarted: metav1.NewTime(v.LastRun.StartedAt.Time),
- LastCompleted: metav1.NewTime(v.LastRun.CompletedAt.Time),
+ LastStarted: metav1.NewTime(v.LastRun.StartedAt),
+ LastCompleted: metav1.NewTime(v.LastRun.CompletedAt),
Phase: v.LastRun.Phase.String(),
}
}
diff --git a/internal/release/observation_test.go b/internal/release/observation_test.go
index afd3182c4..7671d0b50 100644
--- a/internal/release/observation_test.go
+++ b/internal/release/observation_test.go
@@ -22,7 +22,7 @@ import (
. "github.com/onsi/gomega"
"github.com/opencontainers/go-digest"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v2 "github.com/fluxcd/helm-controller/api/v2"
@@ -189,7 +189,7 @@ func TestObserveRelease(t *testing.T) {
Version: 1,
Chart: testutil.BuildChart(),
},
- testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"}),
+ testutil.ReleaseWithConfig(map[string]any{"foo": "bar"}),
)
testReleaseWithLabels = testutil.BuildRelease(
&helmrelease.MockReleaseOptions{
@@ -298,7 +298,7 @@ func TestObservedToSnapshot(t *testing.T) {
Namespace: "namespace",
Version: 1,
Chart: testutil.BuildChart(),
- }, testutil.ReleaseWithConfig(map[string]interface{}{"foo": "bar"})))
+ }, testutil.ReleaseWithConfig(map[string]any{"foo": "bar"})))
got := ObservedToSnapshot(obs)
@@ -309,9 +309,9 @@ func TestObservedToSnapshot(t *testing.T) {
g.Expect(got.ChartVersion).To(Equal(obs.ChartMetadata.Version))
g.Expect(got.Status).To(BeEquivalentTo(obs.Info.Status))
- g.Expect(obs.Info.FirstDeployed.Time.Equal(got.FirstDeployed.Time)).To(BeTrue())
- g.Expect(obs.Info.LastDeployed.Time.Equal(got.LastDeployed.Time)).To(BeTrue())
- g.Expect(obs.Info.Deleted.Time.Equal(got.Deleted.Time)).To(BeTrue())
+ g.Expect(obs.Info.FirstDeployed.Equal(got.FirstDeployed.Time)).To(BeTrue())
+ g.Expect(obs.Info.LastDeployed.Equal(got.LastDeployed.Time)).To(BeTrue())
+ g.Expect(obs.Info.Deleted.Equal(got.Deleted.Time)).To(BeTrue())
g.Expect(got.Digest).ToNot(BeEmpty())
g.Expect(digest.Digest(got.Digest).Validate()).To(Succeed())
@@ -360,13 +360,13 @@ func TestTestHooksFromRelease(t *testing.T) {
g.Expect(TestHooksFromRelease(rls)).To(testutil.Equal(map[string]*v2.TestHookStatus{
hooks[0].Name: {},
hooks[1].Name: {
- LastStarted: metav1.Time{Time: hooks[1].LastRun.StartedAt.Time},
- LastCompleted: metav1.Time{Time: hooks[1].LastRun.CompletedAt.Time},
+ LastStarted: metav1.Time{Time: hooks[1].LastRun.StartedAt},
+ LastCompleted: metav1.Time{Time: hooks[1].LastRun.CompletedAt},
Phase: hooks[1].LastRun.Phase.String(),
},
hooks[2].Name: {
- LastStarted: metav1.Time{Time: hooks[2].LastRun.StartedAt.Time},
- LastCompleted: metav1.Time{Time: hooks[2].LastRun.CompletedAt.Time},
+ LastStarted: metav1.Time{Time: hooks[2].LastRun.StartedAt},
+ LastCompleted: metav1.Time{Time: hooks[2].LastRun.CompletedAt},
Phase: hooks[2].LastRun.Phase.String(),
},
}))
diff --git a/internal/release/observed_bench_test.go b/internal/release/observed_bench_test.go
index 77ef9fd5e..ce2e80e30 100644
--- a/internal/release/observed_bench_test.go
+++ b/internal/release/observed_bench_test.go
@@ -20,7 +20,7 @@ import (
"testing"
"github.com/opencontainers/go-digest"
- "helm.sh/helm/v3/pkg/release"
+ release "helm.sh/helm/v4/pkg/release/v1"
intdigest "github.com/fluxcd/helm-controller/internal/digest"
)
diff --git a/internal/release/suite_test.go b/internal/release/suite_test.go
index 659b03f9e..1e8463384 100644
--- a/internal/release/suite_test.go
+++ b/internal/release/suite_test.go
@@ -22,7 +22,7 @@ import (
"os"
"testing"
- "helm.sh/helm/v3/pkg/release"
+ release "helm.sh/helm/v4/pkg/release/v1"
)
var (
diff --git a/internal/release/util.go b/internal/release/util.go
index 5ef10718a..8a396cd16 100644
--- a/internal/release/util.go
+++ b/internal/release/util.go
@@ -17,7 +17,7 @@ limitations under the License.
package release
import (
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
)
// GetTestHooks returns the list of test hooks for the given release, indexed
diff --git a/internal/release/util_test.go b/internal/release/util_test.go
index c4555379d..2083e6ad9 100644
--- a/internal/release/util_test.go
+++ b/internal/release/util_test.go
@@ -20,7 +20,7 @@ import (
"testing"
. "github.com/onsi/gomega"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
"github.com/fluxcd/helm-controller/internal/testutil"
)
diff --git a/internal/storage/failing.go b/internal/storage/failing.go
index 3669fcece..c6454a74e 100644
--- a/internal/storage/failing.go
+++ b/internal/storage/failing.go
@@ -17,8 +17,8 @@ limitations under the License.
package storage
import (
- "helm.sh/helm/v3/pkg/release"
- "helm.sh/helm/v3/pkg/storage/driver"
+ "helm.sh/helm/v4/pkg/release"
+ "helm.sh/helm/v4/pkg/storage/driver"
)
const (
@@ -56,7 +56,7 @@ func (o *Failing) Name() string {
}
// Get returns GetErr, or the embedded driver result.
-func (o *Failing) Get(key string) (*release.Release, error) {
+func (o *Failing) Get(key string) (release.Releaser, error) {
if o.GetErr != nil {
return nil, o.GetErr
}
@@ -64,7 +64,7 @@ func (o *Failing) Get(key string) (*release.Release, error) {
}
// List returns ListErr, or the embedded driver result.
-func (o *Failing) List(filter func(*release.Release) bool) ([]*release.Release, error) {
+func (o *Failing) List(filter func(release.Releaser) bool) ([]release.Releaser, error) {
if o.ListErr != nil {
return nil, o.ListErr
}
@@ -72,7 +72,7 @@ func (o *Failing) List(filter func(*release.Release) bool) ([]*release.Release,
}
// Query returns QueryErr, or the embedded driver result.
-func (o *Failing) Query(keyvals map[string]string) ([]*release.Release, error) {
+func (o *Failing) Query(keyvals map[string]string) ([]release.Releaser, error) {
if o.QueryErr != nil {
return nil, o.QueryErr
}
@@ -80,7 +80,7 @@ func (o *Failing) Query(keyvals map[string]string) ([]*release.Release, error) {
}
// Create returns CreateErr, or the embedded driver result.
-func (o *Failing) Create(key string, rls *release.Release) error {
+func (o *Failing) Create(key string, rls release.Releaser) error {
if o.CreateErr != nil {
return o.CreateErr
}
@@ -88,7 +88,7 @@ func (o *Failing) Create(key string, rls *release.Release) error {
}
// Update returns UpdateErr, or the embedded driver result.
-func (o *Failing) Update(key string, rls *release.Release) error {
+func (o *Failing) Update(key string, rls release.Releaser) error {
if o.UpdateErr != nil {
return o.UpdateErr
}
@@ -96,7 +96,7 @@ func (o *Failing) Update(key string, rls *release.Release) error {
}
// Delete returns DeleteErr, or the embedded driver result.
-func (o *Failing) Delete(key string) (*release.Release, error) {
+func (o *Failing) Delete(key string) (release.Releaser, error) {
if o.DeleteErr != nil {
return nil, o.DeleteErr
}
diff --git a/internal/storage/observer.go b/internal/storage/observer.go
index a0885ad76..95de0196f 100644
--- a/internal/storage/observer.go
+++ b/internal/storage/observer.go
@@ -17,8 +17,8 @@ limitations under the License.
package storage
import (
- helmrelease "helm.sh/helm/v3/pkg/release"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmrelease "helm.sh/helm/v4/pkg/release"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
)
// ObserverDriverName contains the string representation of Observer.
@@ -46,7 +46,7 @@ type Observer struct {
// storage.
// NOTE: while it takes a pointer, the caller is expected to perform a
// read-only operation.
-type ObserveFunc func(rel *helmrelease.Release)
+type ObserveFunc func(rel helmrelease.Releaser)
// NewObserver creates a new Observer for the given Helm storage driver.
func NewObserver(driver helmdriver.Driver, observers ...ObserveFunc) *Observer {
@@ -62,23 +62,23 @@ func (o *Observer) Name() string {
}
// Get returns the release named by key or returns ErrReleaseNotFound.
-func (o *Observer) Get(key string) (*helmrelease.Release, error) {
+func (o *Observer) Get(key string) (helmrelease.Releaser, error) {
return o.driver.Get(key)
}
// List returns the list of all releases such that filter(release) == true.
-func (o *Observer) List(filter func(*helmrelease.Release) bool) ([]*helmrelease.Release, error) {
+func (o *Observer) List(filter func(helmrelease.Releaser) bool) ([]helmrelease.Releaser, error) {
return o.driver.List(filter)
}
// Query returns the set of releases that match the provided set of labels.
-func (o *Observer) Query(keyvals map[string]string) ([]*helmrelease.Release, error) {
+func (o *Observer) Query(keyvals map[string]string) ([]helmrelease.Releaser, error) {
return o.driver.Query(keyvals)
}
// Create creates a new release or returns driver.ErrReleaseExists.
// It observes the release as provided after a successful creation.
-func (o *Observer) Create(key string, rls *helmrelease.Release) error {
+func (o *Observer) Create(key string, rls helmrelease.Releaser) error {
if err := o.driver.Create(key, rls); err != nil {
return err
}
@@ -90,7 +90,7 @@ func (o *Observer) Create(key string, rls *helmrelease.Release) error {
// Update updates a release or returns driver.ErrReleaseNotFound.
// After a successful update, it observes the release as provided.
-func (o *Observer) Update(key string, rls *helmrelease.Release) error {
+func (o *Observer) Update(key string, rls helmrelease.Releaser) error {
if err := o.driver.Update(key, rls); err != nil {
return err
}
@@ -103,7 +103,7 @@ func (o *Observer) Update(key string, rls *helmrelease.Release) error {
// Delete deletes a release or returns driver.ErrReleaseNotFound.
// After a successful deletion, it observes the release as returned by the
// embedded driver.Deletor.
-func (o *Observer) Delete(key string) (*helmrelease.Release, error) {
+func (o *Observer) Delete(key string) (helmrelease.Releaser, error) {
rls, err := o.driver.Delete(key)
if err != nil {
return nil, err
diff --git a/internal/storage/observer_test.go b/internal/storage/observer_test.go
index b8e055e27..5a42eb35e 100644
--- a/internal/storage/observer_test.go
+++ b/internal/storage/observer_test.go
@@ -14,22 +14,26 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package storage
+package storage_test
import (
"fmt"
"testing"
. "github.com/onsi/gomega"
- helmrelease "helm.sh/helm/v3/pkg/release"
- helmdriver "helm.sh/helm/v3/pkg/storage/driver"
+ helmrelease "helm.sh/helm/v4/pkg/release"
+ helmreleasecommon "helm.sh/helm/v4/pkg/release/common"
+ helmreleasev1 "helm.sh/helm/v4/pkg/release/v1"
+ helmdriver "helm.sh/helm/v4/pkg/storage/driver"
+
+ "github.com/fluxcd/helm-controller/internal/storage"
)
func TestObserver_Name(t *testing.T) {
g := NewWithT(t)
- o := NewObserver(helmdriver.NewMemory())
- g.Expect(o.Name()).To(Equal(ObserverDriverName))
+ o := storage.NewObserver(helmdriver.NewMemory())
+ g.Expect(o.Name()).To(Equal(storage.ObserverDriverName))
}
func TestObserver_Get(t *testing.T) {
@@ -37,12 +41,12 @@ func TestObserver_Get(t *testing.T) {
g := NewWithT(t)
ms := helmdriver.NewMemory()
- rel := releaseStub("success", 1, "ns1", helmrelease.StatusDeployed)
+ rel := releaseStub("success", 1, "ns1", helmreleasecommon.StatusDeployed).(*helmreleasev1.Release)
key := testKey(rel.Name, rel.Version)
g.Expect(ms.Create(key, rel)).To(Succeed())
var called bool
- o := NewObserver(ms, func(rls *helmrelease.Release) {
+ o := storage.NewObserver(ms, func(rls helmrelease.Releaser) {
called = true
})
@@ -58,15 +62,15 @@ func TestObserver_List(t *testing.T) {
g := NewWithT(t)
ms := helmdriver.NewMemory()
- rel := releaseStub("success", 1, "ns1", helmrelease.StatusDeployed)
+ rel := releaseStub("success", 1, "ns1", helmreleasecommon.StatusDeployed).(*helmreleasev1.Release)
key := testKey(rel.Name, rel.Version)
g.Expect(ms.Create(key, rel)).To(Succeed())
var called bool
- o := NewObserver(ms, func(rls *helmrelease.Release) {
+ o := storage.NewObserver(ms, func(rls helmrelease.Releaser) {
called = true
})
- got, err := o.List(func(r *helmrelease.Release) bool {
+ got, err := o.List(func(r helmrelease.Releaser) bool {
// Include everything
return true
})
@@ -82,12 +86,12 @@ func TestObserver_Query(t *testing.T) {
g := NewWithT(t)
ms := helmdriver.NewMemory()
- rel := releaseStub("success", 1, "ns1", helmrelease.StatusDeployed)
+ rel := releaseStub("success", 1, "ns1", helmreleasecommon.StatusDeployed).(*helmreleasev1.Release)
key := testKey(rel.Name, rel.Version)
g.Expect(ms.Create(key, rel)).To(Succeed())
var called bool
- o := NewObserver(ms, func(rls *helmrelease.Release) {
+ o := storage.NewObserver(ms, func(rls helmrelease.Releaser) {
called = true
})
@@ -104,11 +108,11 @@ func TestObserver_Create(t *testing.T) {
g := NewWithT(t)
ms := helmdriver.NewMemory()
- rel := releaseStub("success", 1, "ns1", helmrelease.StatusDeployed)
+ rel := releaseStub("success", 1, "ns1", helmreleasecommon.StatusDeployed).(*helmreleasev1.Release)
key := testKey(rel.Name, rel.Version)
var called bool
- o := NewObserver(ms, func(rls *helmrelease.Release) {
+ o := storage.NewObserver(ms, func(rls helmrelease.Releaser) {
called = true
})
@@ -121,16 +125,16 @@ func TestObserver_Create(t *testing.T) {
ms := helmdriver.NewMemory()
- rel := releaseStub("error", 1, "ns1", helmrelease.StatusDeployed)
+ rel := releaseStub("error", 1, "ns1", helmreleasecommon.StatusDeployed).(*helmreleasev1.Release)
key := testKey(rel.Name, rel.Version)
g.Expect(ms.Create(key, rel)).To(Succeed())
var called bool
- o := NewObserver(ms, func(rls *helmrelease.Release) {
+ o := storage.NewObserver(ms, func(rls helmrelease.Releaser) {
called = true
})
- rel2 := releaseStub("error", 1, "ns1", helmrelease.StatusFailed)
+ rel2 := releaseStub("error", 1, "ns1", helmreleasecommon.StatusFailed)
g.Expect(o.Create(key, rel2)).To(HaveOccurred())
g.Expect(called).To(BeFalse())
})
@@ -141,12 +145,12 @@ func TestObserver_Update(t *testing.T) {
g := NewWithT(t)
ms := helmdriver.NewMemory()
- rel := releaseStub("success", 1, "ns1", helmrelease.StatusDeployed)
+ rel := releaseStub("success", 1, "ns1", helmreleasecommon.StatusDeployed).(*helmreleasev1.Release)
key := testKey(rel.Name, rel.Version)
g.Expect(ms.Create(key, rel)).To(Succeed())
var called bool
- o := NewObserver(ms, func(rls *helmrelease.Release) {
+ o := storage.NewObserver(ms, func(rls helmrelease.Releaser) {
called = true
})
@@ -158,11 +162,11 @@ func TestObserver_Update(t *testing.T) {
g := NewWithT(t)
var called bool
- o := NewObserver(helmdriver.NewMemory(), func(rls *helmrelease.Release) {
+ o := storage.NewObserver(helmdriver.NewMemory(), func(rls helmrelease.Releaser) {
called = true
})
- rel := releaseStub("error", 1, "ns1", helmrelease.StatusDeployed)
+ rel := releaseStub("error", 1, "ns1", helmreleasecommon.StatusDeployed).(*helmreleasev1.Release)
key := testKey(rel.Name, rel.Version)
g.Expect(o.Update(key, rel)).To(HaveOccurred())
g.Expect(called).To(BeFalse())
@@ -174,12 +178,12 @@ func TestObserver_Delete(t *testing.T) {
g := NewWithT(t)
ms := helmdriver.NewMemory()
- rel := releaseStub("success", 1, "ns1", helmrelease.StatusDeployed)
+ rel := releaseStub("success", 1, "ns1", helmreleasecommon.StatusDeployed).(*helmreleasev1.Release)
key := testKey(rel.Name, rel.Version)
g.Expect(ms.Create(key, rel)).To(Succeed())
var called bool
- o := NewObserver(ms, func(rls *helmrelease.Release) {
+ o := storage.NewObserver(ms, func(rls helmrelease.Releaser) {
called = true
})
@@ -196,7 +200,7 @@ func TestObserver_Delete(t *testing.T) {
g := NewWithT(t)
var called bool
- o := NewObserver(helmdriver.NewMemory(), func(rls *helmrelease.Release) {
+ o := storage.NewObserver(helmdriver.NewMemory(), func(rls helmrelease.Releaser) {
called = true
})
@@ -208,12 +212,12 @@ func TestObserver_Delete(t *testing.T) {
})
}
-func releaseStub(name string, version int, namespace string, status helmrelease.Status) *helmrelease.Release {
- return &helmrelease.Release{
+func releaseStub(name string, version int, namespace string, status helmreleasecommon.Status) helmrelease.Releaser {
+ return &helmreleasev1.Release{
Name: name,
Version: version,
Namespace: namespace,
- Info: &helmrelease.Info{Status: status},
+ Info: &helmreleasev1.Info{Status: status},
}
}
diff --git a/internal/testutil/equal_cmp.go b/internal/testutil/equal_cmp.go
index a8ca1960c..c9314b53b 100644
--- a/internal/testutil/equal_cmp.go
+++ b/internal/testutil/equal_cmp.go
@@ -40,7 +40,7 @@ import (
// Equal uses go-cmp to compare actual with expected. Equal is strict about
// types when performing comparisons.
-func Equal(expected interface{}, options ...cmp.Option) types.GomegaMatcher {
+func Equal(expected any, options ...cmp.Option) types.GomegaMatcher {
return &equalCmpMatcher{
expected: expected,
options: options,
@@ -48,20 +48,20 @@ func Equal(expected interface{}, options ...cmp.Option) types.GomegaMatcher {
}
type equalCmpMatcher struct {
- expected interface{}
+ expected any
options cmp.Options
}
-func (matcher *equalCmpMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *equalCmpMatcher) Match(actual any) (success bool, err error) {
return cmp.Equal(actual, matcher.expected, matcher.options), nil
}
-func (matcher *equalCmpMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *equalCmpMatcher) FailureMessage(actual any) (message string) {
diff := cmp.Diff(matcher.expected, actual, matcher.options)
return "Mismatch (-want, +got):\n" + diff
}
-func (matcher *equalCmpMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *equalCmpMatcher) NegatedFailureMessage(actual any) (message string) {
diff := cmp.Diff(matcher.expected, actual, matcher.options)
return "Mismatch (-want, +got):\n" + diff
}
diff --git a/internal/testutil/helm_time.go b/internal/testutil/helm_time.go
index 5c4b81639..53cd79f30 100644
--- a/internal/testutil/helm_time.go
+++ b/internal/testutil/helm_time.go
@@ -18,14 +18,12 @@ package testutil
import (
"time"
-
- helmtime "helm.sh/helm/v3/pkg/time"
)
// MustParseHelmTime parses a string into a Helm time.Time, panicking if it
// fails.
-func MustParseHelmTime(t string) helmtime.Time {
- res, err := helmtime.Parse(time.RFC3339, t)
+func MustParseHelmTime(t string) time.Time {
+ res, err := time.Parse(time.RFC3339, t)
if err != nil {
panic(err)
}
diff --git a/internal/testutil/mock_chart.go b/internal/testutil/mock_chart.go
index 29380474c..f648763f5 100644
--- a/internal/testutil/mock_chart.go
+++ b/internal/testutil/mock_chart.go
@@ -19,8 +19,8 @@ package testutil
import (
"fmt"
- helmchart "helm.sh/helm/v3/pkg/chart"
- helmchartutil "helm.sh/helm/v3/pkg/chartutil"
+ helmchartutil "helm.sh/helm/v4/pkg/chart/common"
+ helmchart "helm.sh/helm/v4/pkg/chart/v2"
)
var manifestTmpl = `apiVersion: v1
@@ -145,14 +145,14 @@ func BuildChart(opts ...ChartOption) *helmchart.Chart {
AppVersion: "1.2.3",
},
// This adds a basic template and hooks.
- Templates: []*helmchart.File{
+ Templates: []*helmchartutil.File{
{
Name: "templates/manifest",
- Data: []byte(fmt.Sprintf(manifestTmpl, "{{ default .Release.Namespace }}")),
+ Data: fmt.Appendf(nil, manifestTmpl, "{{ default .Release.Namespace }}"),
},
{
Name: "templates/hooks",
- Data: []byte(fmt.Sprintf(manifestWithHookTmpl, "{{ default .Release.Namespace }}")),
+ Data: fmt.Appendf(nil, manifestWithHookTmpl, "{{ default .Release.Namespace }}"),
},
},
},
@@ -182,9 +182,9 @@ func ChartWithVersion(version string) ChartOption {
// ChartWithFailingHook appends a failing hook to the chart.
func ChartWithFailingHook() ChartOption {
return func(opts *ChartOptions) {
- opts.Templates = append(opts.Templates, &helmchart.File{
+ opts.Templates = append(opts.Templates, &helmchartutil.File{
Name: "templates/failing-hook",
- Data: []byte(fmt.Sprintf(manifestWithFailingHookTmpl, "{{ default .Release.Namespace }}")),
+ Data: fmt.Appendf(nil, manifestWithFailingHookTmpl, "{{ default .Release.Namespace }}"),
})
}
}
@@ -192,9 +192,9 @@ func ChartWithFailingHook() ChartOption {
// ChartWithTestHook appends a test hook to the chart.
func ChartWithTestHook() ChartOption {
return func(opts *ChartOptions) {
- opts.Templates = append(opts.Templates, &helmchart.File{
+ opts.Templates = append(opts.Templates, &helmchartutil.File{
Name: "templates/test-hooks",
- Data: []byte(fmt.Sprintf(manifestWithTestHookTmpl, "{{ default .Release.Namespace }}")),
+ Data: fmt.Appendf(nil, manifestWithTestHookTmpl, "{{ default .Release.Namespace }}"),
})
}
}
@@ -202,9 +202,9 @@ func ChartWithTestHook() ChartOption {
// ChartWithFailingTestHook appends a failing test hook to the chart.
func ChartWithFailingTestHook() ChartOption {
return func(options *ChartOptions) {
- options.Templates = append(options.Templates, &helmchart.File{
+ options.Templates = append(options.Templates, &helmchartutil.File{
Name: "templates/test-hooks",
- Data: []byte(fmt.Sprintf(manifestWithFailingTestHookTmpl, "{{ default .Release.Namespace }}")),
+ Data: fmt.Appendf(nil, manifestWithFailingTestHookTmpl, "{{ default .Release.Namespace }}"),
})
}
}
@@ -212,10 +212,10 @@ func ChartWithFailingTestHook() ChartOption {
// ChartWithManifestWithCustomName sets the name of the manifest.
func ChartWithManifestWithCustomName(name string) ChartOption {
return func(opts *ChartOptions) {
- opts.Templates = []*helmchart.File{
+ opts.Templates = []*helmchartutil.File{
{
Name: "templates/manifest",
- Data: []byte(fmt.Sprintf(manifestWithCustomNameTmpl, name, "{{ default .Release.Namespace }}")),
+ Data: fmt.Appendf(nil, manifestWithCustomNameTmpl, name, "{{ default .Release.Namespace }}"),
},
}
}
@@ -224,7 +224,7 @@ func ChartWithManifestWithCustomName(name string) ChartOption {
// ChartWithCRD appends a CRD to the chart.
func ChartWithCRD() ChartOption {
return func(opts *ChartOptions) {
- opts.Files = []*helmchart.File{
+ opts.Files = []*helmchartutil.File{
{
Name: "crds/crd.yaml",
Data: []byte(crdManifest),
diff --git a/internal/testutil/mock_release.go b/internal/testutil/mock_release.go
index e37b3e762..2a73a41aa 100644
--- a/internal/testutil/mock_release.go
+++ b/internal/testutil/mock_release.go
@@ -19,7 +19,7 @@ package testutil
import (
"fmt"
- helmrelease "helm.sh/helm/v3/pkg/release"
+ helmrelease "helm.sh/helm/v4/pkg/release/v1"
)
// ReleaseOptions is a helper to build a Helm release mock.
@@ -44,7 +44,7 @@ func BuildRelease(mockOpts *helmrelease.MockReleaseOptions, opts ...ReleaseOptio
}
// ReleaseWithConfig sets the config on the release.
-func ReleaseWithConfig(config map[string]interface{}) ReleaseOption {
+func ReleaseWithConfig(config map[string]any) ReleaseOption {
return func(options *ReleaseOptions) {
options.Config = config
}
diff --git a/internal/testutil/mock_slog_handler.go b/internal/testutil/mock_slog_handler.go
new file mode 100644
index 000000000..d6149ab8a
--- /dev/null
+++ b/internal/testutil/mock_slog_handler.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2026 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testutil
+
+import (
+ "context"
+ "log/slog"
+)
+
+// MockSLogHandler lets callers know if Handle was called.
+type MockSLogHandler struct {
+ Called bool
+}
+
+// Enabled implements slog.Handler.
+func (m *MockSLogHandler) Enabled(context.Context, slog.Level) bool {
+ return true
+}
+
+// Handle implements slog.Handler.
+func (m *MockSLogHandler) Handle(context.Context, slog.Record) error {
+ m.Called = true
+ return nil
+}
+
+// WithAttrs implements slog.Handler.
+func (m *MockSLogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ return m
+}
+
+// WithGroup implements slog.Handler.
+func (m *MockSLogHandler) WithGroup(name string) slog.Handler {
+ return m
+}
diff --git a/internal/testutil/save_chart.go b/internal/testutil/save_chart.go
index 55da8cba3..0c812586f 100644
--- a/internal/testutil/save_chart.go
+++ b/internal/testutil/save_chart.go
@@ -23,8 +23,8 @@ import (
"strings"
"github.com/opencontainers/go-digest"
- "helm.sh/helm/v3/pkg/chart"
- "helm.sh/helm/v3/pkg/chartutil"
+ chart "helm.sh/helm/v4/pkg/chart/v2"
+ chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
diff --git a/main.go b/main.go
index 31b245f70..652b6286a 100644
--- a/main.go
+++ b/main.go
@@ -22,7 +22,7 @@ import (
"time"
flag "github.com/spf13/pflag"
- "helm.sh/helm/v3/pkg/kube"
+ "helm.sh/helm/v4/pkg/kube"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@@ -51,14 +51,13 @@ import (
"github.com/fluxcd/pkg/runtime/probes"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
- v2 "github.com/fluxcd/helm-controller/api/v2"
-
- intdigest "github.com/fluxcd/helm-controller/internal/digest"
-
// +kubebuilder:scaffold:imports
+ v2 "github.com/fluxcd/helm-controller/api/v2"
intacl "github.com/fluxcd/helm-controller/internal/acl"
+ "github.com/fluxcd/helm-controller/internal/action"
"github.com/fluxcd/helm-controller/internal/controller"
+ intdigest "github.com/fluxcd/helm-controller/internal/digest"
"github.com/fluxcd/helm-controller/internal/features"
intkube "github.com/fluxcd/helm-controller/internal/kube"
"github.com/fluxcd/helm-controller/internal/oomwatch"
@@ -172,6 +171,14 @@ func main() {
auth.EnableObjectLevelWorkloadIdentity()
}
+ switch enabled, err := features.Enabled(features.UseHelm3Defaults); {
+ case err != nil:
+ setupLog.Error(err, "unable to check feature gate "+features.UseHelm3Defaults)
+ os.Exit(1)
+ case enabled:
+ action.UseHelm3Defaults = enabled
+ }
+
if defaultKubeConfigServiceAccount != "" {
auth.SetDefaultKubeConfigServiceAccount(defaultKubeConfigServiceAccount)
}