diff --git a/.github/workflows/helm_vs_kustomize_verification.yaml b/.github/workflows/helm_vs_kustomize_verification.yaml new file mode 100644 index 0000000000..2e10b68053 --- /dev/null +++ b/.github/workflows/helm_vs_kustomize_verification.yaml @@ -0,0 +1,47 @@ +name: Verify Helm vs Kustomize Manifests + +on: + pull_request: + paths: + - .github/workflows/helm_vs_kustomize_verification.yaml + - tests/install_KinD_create_KinD_cluster_install_kustomize.sh + - tests/helm_install.sh + - tests/helm_kustomize_compare_manifests.sh + - tests/helm_kustomize_compare_manifests.py + - applications/spark/spark-operator/** + - common/cert-manager/** + - experimental/helm/scripts/** + +jobs: + build: + runs-on: ubuntu-latest + timeout-minutes: 15 + strategy: + matrix: + component: [spark-operator, cert-manager] + fail-fast: false + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install KinD, Create KinD cluster and Install kustomize + run: ./tests/install_KinD_create_KinD_cluster_install_kustomize.sh + + - name: Install Helm + run: ./tests/helm_install.sh + + - name: Setup Helm chart structure + run: ./experimental/helm/scripts/setup-test-chart.sh + + - name: Generate Helm templates for ${{ matrix.component }} + run: ./experimental/helm/scripts/synchronize-${{ matrix.component }}.sh + + - name: Run Helm vs Kustomize verification for ${{ matrix.component }} + run: ./tests/helm_kustomize_compare_manifests.sh ${{ matrix.component }} + + - name: Cleanup generated Helm templates + if: always() + run: | + rm -rf experimental/helm/kubeflow/templates/external/${{ matrix.component }} + rm -f experimental/helm/kubeflow/crds/*-${{ matrix.component }}*.yaml diff --git a/experimental/helm/.gitignore b/experimental/helm/.gitignore new file mode 100644 index 0000000000..e0b5594428 --- /dev/null +++ b/experimental/helm/.gitignore @@ -0,0 +1,7 @@ +# Helm dependency charts +**/charts/*.tgz +**/Chart.lock + +# Helm temporary files +*.tmp +*.backup \ No newline at end of file diff --git a/experimental/helm/scripts/patch-templates.py b/experimental/helm/scripts/patch-templates.py new file mode 100755 index 0000000000..80b6f0a2a1 --- /dev/null +++ b/experimental/helm/scripts/patch-templates.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +import yaml +import sys +import os +from pathlib import Path + +def patch_yaml_file(file_path, component): + """Patch a YAML file to add conditional rendering and namespace""" + with open(file_path, 'r') as f: + content = f.read() + + component_map = { + 'spark-operator': 'sparkOperator.enabled', + 'cert-manager': 'certManager.enabled', + } + + condition = component_map.get(component, f'{component}.enabled') + condition_check = f'{{{{- if .Values.{condition} }}}}' + + if condition_check in content: + return + + try: + docs = list(yaml.safe_load_all(content)) + patched_docs = [] + + for doc in docs: + if doc and isinstance(doc, dict): + if 'metadata' in doc and doc.get('kind') in [ + 'Deployment', 'Service', 'ServiceAccount', 'Role', 'RoleBinding' + ]: + if not isinstance(doc['metadata'].get('namespace'), str) or '{{' not in doc['metadata'].get('namespace', ''): + if component == 'cert-manager': + if doc.get('kind') in ['Role', 'RoleBinding'] and 'leaderelection' in doc['metadata'].get('name', ''): + doc['metadata']['namespace'] = 'kube-system' + else: + doc['metadata']['namespace'] = '{{ .Values.global.certManagerNamespace }}' + else: + doc['metadata']['namespace'] = '{{ include "kubeflow.namespace" . }}' + + if doc.get('kind') == 'Deployment' and 'spec' in doc and component == 'spark-operator': + if 'template' in doc['spec'] and 'metadata' in doc['spec']['template']: + template_meta = doc['spec']['template']['metadata'] + if 'labels' not in template_meta: + template_meta['labels'] = {} + template_meta['labels']['sidecar.istio.io/inject'] = 'false' + + patched_docs.append(doc) + + with open(file_path, 'w') as f: + f.write(f'{condition_check}\n') + for doc in patched_docs: + f.write('---\n') + yaml.dump(doc, f, default_flow_style=False, sort_keys=False) + f.write('{{- end }}\n') + + except Exception as e: + print(f"Warning: Could not patch {file_path}: {e}") + with open(file_path, 'w') as f: + f.write(f'{condition_check}\n') + f.write(content) + f.write('{{- end }}\n') + +if __name__ == "__main__": + if len(sys.argv) < 3: + print("Usage: patch-templates.py ") + sys.exit(1) + + templates_dir = sys.argv[1] + component = sys.argv[2] + for yaml_file in Path(templates_dir).rglob("*.yaml"): + patch_yaml_file(str(yaml_file), component) \ No newline at end of file diff --git a/experimental/helm/scripts/setup-test-chart.sh b/experimental/helm/scripts/setup-test-chart.sh new file mode 100755 index 0000000000..5ad73c239e --- /dev/null +++ b/experimental/helm/scripts/setup-test-chart.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash + +set -euo pipefail + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +HELM_DIR="$(dirname "$SCRIPT_DIR")" +CHART_DIR="$HELM_DIR/kubeflow" + + +mkdir -p "$CHART_DIR/templates/external" +mkdir -p "$CHART_DIR/templates/integrations" +mkdir -p "$CHART_DIR/templates/_helpers" +mkdir -p "$CHART_DIR/crds" + +cat > "$CHART_DIR/Chart.yaml" << 'EOF' +apiVersion: v2 +name: kubeflow +description: Kubeflow All-in-One Helm Chart (Test Version) +type: application +version: 0.1.0 +appVersion: "v1.10.0" +EOF + +cat > "$CHART_DIR/values.yaml" << 'EOF' +# Global configuration +global: + kubeflowNamespace: kubeflow + certManagerNamespace: cert-manager + +# Component configurations +sparkOperator: + enabled: false + kubeflowRBAC: + enabled: false + spark: + jobNamespaces: [] + webhook: + enable: true + port: 9443 + +certManager: + enabled: false + installCRDs: true + global: + leaderElection: + namespace: kube-system + startupapicheck: + enabled: false + kubeflowIssuer: + enabled: true + name: kubeflow-self-signing-issuer +EOF + +cat > "$CHART_DIR/templates/_helpers.tpl" << 'EOF' +{{/* +Common labels +*/}} +{{- define "kubeflow.labels" -}} +app.kubernetes.io/name: {{ include "kubeflow.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Chart name +*/}} +{{- define "kubeflow.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Kubeflow namespace +*/}} +{{- define "kubeflow.namespace" -}} +{{- .Values.global.kubeflowNamespace | default "kubeflow" }} +{{- end }} +EOF + +cat > "$CHART_DIR/templates/integrations/spark-operator-rbac.yaml" << 'EOF' +{{- if and .Values.sparkOperator.enabled .Values.sparkOperator.kubeflowRBAC.enabled }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeflow-spark-admin + labels: + app: spark-operator + app.kubernetes.io/name: spark-operator + rbac.authorization.kubeflow.org/aggregate-to-kubeflow-admin: "true" +rules: [] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeflow-spark-edit + labels: + app: spark-operator + app.kubernetes.io/name: spark-operator + rbac.authorization.kubeflow.org/aggregate-to-kubeflow-edit: "true" + rbac.authorization.kubeflow.org/aggregate-to-kubeflow-admin: "true" +rules: + - apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + - scheduledsparkapplications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/status + - scheduledsparkapplications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeflow-spark-view + labels: + app: spark-operator + app.kubernetes.io/name: spark-operator + rbac.authorization.kubeflow.org/aggregate-to-kubeflow-view: "true" +rules: + - apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + - scheduledsparkapplications + verbs: + - get + - list + - watch + - apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/status + - scheduledsparkapplications/status + verbs: + - get +{{- end }} +EOF + diff --git a/experimental/helm/scripts/synchronize-all-charts.sh b/experimental/helm/scripts/synchronize-all-charts.sh new file mode 100755 index 0000000000..89ce88ef48 --- /dev/null +++ b/experimental/helm/scripts/synchronize-all-charts.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Master script to sync all upstream charts for Kubeflow AIO Helm chart + +set -euo pipefail + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +HELM_DIR="$(dirname "$SCRIPT_DIR")" +CHART_DIR="$HELM_DIR/kubeflow" + +COMPONENTS=( + "spark-operator" + "cert-manager" + # Add more components as we implement them + # "training-operator" + # "istio" + # "oauth2-proxy" + # "dex" +) + +for component in "${COMPONENTS[@]}"; do + sync_script="$SCRIPT_DIR/synchronize-${component}.sh" + if [ -f "$sync_script" ]; then + echo "Syncing $component..." + bash "$sync_script" + fi +done + +cd "$CHART_DIR" +helm template kubeflow . --debug --dry-run > /dev/null && echo "Success: AIO chart templates correctly!" \ No newline at end of file diff --git a/experimental/helm/scripts/synchronize-cert-manager.sh b/experimental/helm/scripts/synchronize-cert-manager.sh new file mode 100755 index 0000000000..a2ff9f274c --- /dev/null +++ b/experimental/helm/scripts/synchronize-cert-manager.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash +# Script to sync Cert Manager templates for AIO Helm chart + +set -euo pipefail + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +HELM_DIR="$(dirname "$SCRIPT_DIR")" +CHART_DIR="$HELM_DIR/kubeflow" + +COMPONENT="cert-manager" +VERSION="v1.16.1" +REPO="https://charts.jetstack.io" +TEMPLATES_DIR="$CHART_DIR/templates/external/${COMPONENT}" +CRDS_DIR="$CHART_DIR/crds" +NAMESPACE="cert-manager" + +rm -rf "$TEMPLATES_DIR" +mkdir -p "$TEMPLATES_DIR" +mkdir -p "$CRDS_DIR" + +TEMP_DIR=$(mktemp -d) +cd "$TEMP_DIR" + +# Generate templates using same settings as existing Kustomize setup +# Disable startupapicheck to match Kustomize manifests that don't include it +helm template "$COMPONENT" "$COMPONENT" \ + --version "$VERSION" \ + --repo "$REPO" \ + --namespace "$NAMESPACE" \ + --include-crds \ + --set installCRDs=true \ + --set global.leaderElection.namespace="kube-system" \ + --set startupapicheck.enabled=false \ + --output-dir . + +for file in "$COMPONENT/templates/"*.yaml; do + if [ -f "$file" ] && [[ ! "$(basename "$file")" == crds.yaml ]]; then + cp "$file" "$TEMPLATES_DIR/" + fi +done +cat "$COMPONENT/templates/crds.yaml" > "$CRDS_DIR/cert-manager-crds.yaml" + +python3 "$SCRIPT_DIR/patch-templates.py" "$TEMPLATES_DIR" "$COMPONENT" + +# Add namespace template since cert-manager chart doesn't include it +cat > "$TEMPLATES_DIR/namespace.yaml" << 'EOF' +{{- if .Values.certManager.enabled }} +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.global.certManagerNamespace }} + labels: + pod-security.kubernetes.io/enforce: restricted +{{- end }} +EOF + +# Create kubeflow-issuer template +mkdir -p "$TEMPLATES_DIR/kubeflow-issuer" +cat > "$TEMPLATES_DIR/kubeflow-issuer/cluster-issuer.yaml" << 'EOF' +{{- if .Values.certManager.enabled }} +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: kubeflow-self-signing-issuer + labels: + {{- include "kubeflow.labels" . | nindent 4 }} + app.kubernetes.io/component: cert-manager + app.kubernetes.io/name: cert-manager + kustomize.component: cert-manager +spec: + selfSigned: {} +{{- end }} +EOF + +cd "$CHART_DIR" +rm -rf "$TEMP_DIR" + +helm template kubeflow . --debug --dry-run > /dev/null + +echo "Cert Manager templates synchronized successfully" \ No newline at end of file diff --git a/experimental/helm/scripts/synchronize-spark-operator.sh b/experimental/helm/scripts/synchronize-spark-operator.sh new file mode 100755 index 0000000000..c4bc21a05f --- /dev/null +++ b/experimental/helm/scripts/synchronize-spark-operator.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# Script to sync Spark Operator templates for AIO Helm chart + +set -euo pipefail + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +HELM_DIR="$(dirname "$SCRIPT_DIR")" +CHART_DIR="$HELM_DIR/kubeflow" + +COMPONENT="spark-operator" +VERSION="2.2.0" +REPO="https://kubeflow.github.io/spark-operator" +TEMPLATES_DIR="$CHART_DIR/templates/external/${COMPONENT}" +CRDS_DIR="$CHART_DIR/crds" +NAMESPACE="kubeflow" + +rm -rf "$TEMPLATES_DIR" +mkdir -p "$TEMPLATES_DIR" +mkdir -p "$CRDS_DIR" + +TEMP_DIR=$(mktemp -d) +cd "$TEMP_DIR" + +# Generate templates using same settings as existing Kustomize sync +helm template "$COMPONENT" "$COMPONENT" \ + --version "$VERSION" \ + --repo "$REPO" \ + --namespace "$NAMESPACE" \ + --include-crds \ + --set "spark.jobNamespaces={}" \ + --set webhook.enable=true \ + --set webhook.port=9443 \ + --output-dir . + +cp -r "$COMPONENT/templates/"* "$TEMPLATES_DIR/" + +[ -d "$COMPONENT/crds" ] && { + cp -r "$COMPONENT/crds/"* "$CRDS_DIR/" +} + +python3 "$SCRIPT_DIR/patch-templates.py" "$TEMPLATES_DIR" "$COMPONENT" + +cd "$CHART_DIR" +rm -rf "$TEMP_DIR" + +helm template kubeflow . --debug --dry-run > /dev/null \ No newline at end of file diff --git a/tests/helm_install.sh b/tests/helm_install.sh new file mode 100755 index 0000000000..1ef315c799 --- /dev/null +++ b/tests/helm_install.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +HELM_VERSION="v3.16.4" + +echo "Install Helm..." +{ + curl -Lo ./helm.tar.gz https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz + tar -xzf helm.tar.gz linux-amd64/helm + chmod +x linux-amd64/helm + sudo mv linux-amd64/helm /usr/local/bin/helm + rm -rf helm.tar.gz linux-amd64 +} || { echo "Failed to install Helm"; exit 1; } + +echo "Helm ${HELM_VERSION} installed successfully" \ No newline at end of file diff --git a/tests/helm_kustomize_compare_manifests.py b/tests/helm_kustomize_compare_manifests.py new file mode 100644 index 0000000000..5c431e149f --- /dev/null +++ b/tests/helm_kustomize_compare_manifests.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python3 +import yaml +import sys +import json + +def clean_helm_metadata(obj): + """Remove ONLY helm-specific metadata from objects""" + if isinstance(obj, dict): + if 'metadata' in obj: + metadata = obj['metadata'] + + if 'labels' in metadata: + labels = metadata['labels'] + helm_specific_labels = [ + 'helm.sh/chart', + 'app.kubernetes.io/managed-by', + 'app.kubernetes.io/instance', + 'app.kubernetes.io/version' + ] + for label in helm_specific_labels: + if label in labels: + del labels[label] + + if 'annotations' in metadata: + annotations = metadata['annotations'] + helm_specific_annotations = [ + 'meta.helm.sh/release-name', + 'meta.helm.sh/release-namespace' + ] + for annotation in helm_specific_annotations: + if annotation in annotations: + del annotations[annotation] + + for key, value in list(obj.items()): + if isinstance(value, (dict, list)): + clean_helm_metadata(value) + + elif isinstance(obj, list): + for item in obj: + clean_helm_metadata(item) + + return obj + +def json_diff(obj1, obj2, path=""): + """Simple recursive comparison showing differences""" + differences = [] + + if type(obj1) != type(obj2): + differences.append(f"{path}: type mismatch ({type(obj1).__name__} vs {type(obj2).__name__})") + return differences + + if isinstance(obj1, dict): + all_keys = set(obj1.keys()) | set(obj2.keys()) + for key in sorted(all_keys): + key_path = f"{path}.{key}" if path else key + if key not in obj1: + differences.append(f"{key_path}: missing in kustomize") + elif key not in obj2: + differences.append(f"{key_path}: missing in helm") + else: + differences.extend(json_diff(obj1[key], obj2[key], key_path)) + + elif isinstance(obj1, list): + if len(obj1) != len(obj2): + differences.append(f"{path}: list length mismatch ({len(obj1)} vs {len(obj2)})") + else: + for i, (item1, item2) in enumerate(zip(obj1, obj2)): + differences.extend(json_diff(item1, item2, f"{path}[{i}]")) + + elif obj1 != obj2: + differences.append(f"{path}: '{obj1}' != '{obj2}'") + + return differences + +def load_and_index(file_path): + """Load YAML and index by kind/name""" + with open(file_path) as f: + content = f.read() + if not content.strip(): + return {} + docs = list(yaml.safe_load_all(content)) + + indexed = {} + for doc in docs: + if doc and isinstance(doc, dict) and 'kind' in doc and 'metadata' in doc: + kind = doc['kind'] + name = doc['metadata'].get('name', 'unnamed') + key = f"{kind}/{name}" + + if kind == "ClusterIssuer" and 'webhooks' in doc: + del doc['webhooks'] + + indexed[key] = doc + + return indexed + +def main(): + if len(sys.argv) < 3: + print("Usage: compare-manifests.py [component] [namespace] [kubeflow-rbac-enabled]") + sys.exit(1) + + kustomize_file = sys.argv[1] + helm_file = sys.argv[2] + component = sys.argv[3] if len(sys.argv) > 3 else "component" + namespace = sys.argv[4] if len(sys.argv) > 4 else "kubeflow" + kubeflow_rbac_enabled = sys.argv[5].lower() == 'true' if len(sys.argv) > 5 else False + + if component not in ["spark-operator", "cert-manager"]: + print(f"ERROR: Unsupported component '{component}'. Only 'spark-operator' and 'cert-manager' are supported.") + sys.exit(1) + + kustomize = load_and_index(kustomize_file) + helm = load_and_index(helm_file) + + kustomize_keys = set(kustomize.keys()) + helm_keys = set(helm.keys()) + + kustomize_crds = {key for key in kustomize_keys if key.startswith('CustomResourceDefinition/')} + helm_crds = {key for key in helm_keys if key.startswith('CustomResourceDefinition/')} + extra_helm_crds = helm_crds - kustomize_crds + + expected_extra_helm = set() + if component == "spark-operator" and kubeflow_rbac_enabled: + expected_extra_helm = { + 'ClusterRole/kubeflow-spark-admin', + 'ClusterRole/kubeflow-spark-edit', + 'ClusterRole/kubeflow-spark-view' + } + elif component == "cert-manager": + expected_extra_helm = { + 'ValidatingWebhookConfiguration/cert-manager-webhook' + } + + expected_extra_helm.update(extra_helm_crds) + + extra_kustomize = kustomize_keys - helm_keys + extra_helm = helm_keys - kustomize_keys + unexpected_extra_helm = extra_helm - expected_extra_helm + + success = True + + if extra_kustomize: + print(f"CRITICAL: Resources missing in Helm output: {', '.join(sorted(extra_kustomize))}") + success = False + + if unexpected_extra_helm: + print(f"WARNING: Unexpected extra resources in Helm: {', '.join(sorted(unexpected_extra_helm))}") + + print(f"\nCOMPARISON SUMMARY:") + print(f" Component: {component}") + print(f" Namespace: {namespace}") + print(f" Kustomize resources: {len(kustomize_keys)}") + print(f" Helm resources: {len(helm_keys)}") + print(f" Common resources: {len(kustomize_keys & helm_keys)}") + print(f" Resources only in Kustomize: {len(extra_kustomize)}") + print(f" Resources only in Helm: {len(extra_helm)}") + print(f" Expected extra Helm resources: {len(expected_extra_helm)}") + + common = kustomize_keys & helm_keys + resources_with_differences = [] + + for key in sorted(common): + kustomize_doc = json.loads(json.dumps(kustomize[key])) + helm_doc = json.loads(json.dumps(helm[key])) + + clean_helm_metadata(kustomize_doc) + clean_helm_metadata(helm_doc) + + differences = json_diff(kustomize_doc, helm_doc) + + if differences: + print(f"\nDifferences in {key}:") + for diff in differences[:10]: + print(f" {diff}") + if len(differences) > 10: + print(f" ... and {len(differences) - 10} more differences") + resources_with_differences.append(key) + + if not success or resources_with_differences: + if resources_with_differences: + print(f"\nFAILED: Found differences in {len(resources_with_differences)} resources:") + for resource in resources_with_differences: + print(f" - {resource}") + if extra_kustomize: + print(f"\nFAILED: {len(extra_kustomize)} resources are missing in Helm output.") + print("Helm templates need to be implemented for:") + for resource in sorted(extra_kustomize): + print(f" - {resource}") + print("\nREASON: Manifests are NOT equivalent!") + sys.exit(1) + else: + print(f"\nSUCCESS: All {len(common)} common resources match!") + print("Manifests are equivalent (ignoring only helm-specific metadata)!") + sys.exit(0) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tests/helm_kustomize_compare_manifests.sh b/tests/helm_kustomize_compare_manifests.sh new file mode 100755 index 0000000000..a0ed3b20f6 --- /dev/null +++ b/tests/helm_kustomize_compare_manifests.sh @@ -0,0 +1,175 @@ +#!/usr/bin/env bash +# Compare Helm vs Kustomize manifests for Kubeflow components + +set -euo pipefail + +COMPONENT=${1:-"cert-manager"} +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +ROOT_DIR="$(dirname "$SCRIPT_DIR")" +HELM_DIR="$ROOT_DIR/experimental/helm" + +case $COMPONENT in + "spark-operator") + KUSTOMIZE_PATH="applications/spark/spark-operator/base" + NAMESPACE="kubeflow" + ;; + "cert-manager") + KUSTOMIZE_PATH="common/cert-manager/base" + NAMESPACE="cert-manager" + ;; + *) + echo "ERROR: Unsupported component: $COMPONENT" + echo "Only 'spark-operator' and 'cert-manager' are supported." + echo "Centraldashboard support has been removed in favor of dynamic generation." + exit 1 + ;; +esac + +cd "$ROOT_DIR" + +# Check if we're running in an environment where Helm templates should be generated +# (like GitHub Actions) vs using existing templates +HELM_CHART_DIR="$HELM_DIR/kubeflow" +if [ ! -d "$HELM_CHART_DIR" ]; then + echo "ERROR: Helm chart directory not found: $HELM_CHART_DIR" + echo "This script expects Helm templates to be generated first." + echo "Run the appropriate synchronization script from experimental/helm/scripts/ first." + exit 1 +fi + +HELM_TEMPLATE_PATH="$HELM_CHART_DIR/templates/external/$COMPONENT" +if [ ! -d "$HELM_TEMPLATE_PATH" ]; then + echo "ERROR: Helm template directory does not exist: $HELM_TEMPLATE_PATH" + echo "Run: experimental/helm/scripts/synchronize-${COMPONENT}.sh" + exit 1 +fi + +TEMPLATE_FILES=$(find "$HELM_TEMPLATE_PATH" -name "*.yaml" -o -name "*.yml" -o -name "*.tpl" | wc -l) +if [ "$TEMPLATE_FILES" -eq 0 ]; then + echo "ERROR: No Helm template files found in $HELM_TEMPLATE_PATH" + echo "Please run the synchronization script: experimental/helm/scripts/synchronize-${COMPONENT}.sh" + exit 1 +fi + +echo "Generating Kustomize manifests for $COMPONENT..." +KUSTOMIZE_OUTPUT="/tmp/kustomize-${COMPONENT}.yaml" +case $COMPONENT in + "cert-manager") + { + kustomize build "$KUSTOMIZE_PATH" + kustomize build "common/cert-manager/kubeflow-issuer/base" + } > "$KUSTOMIZE_OUTPUT" + ;; + *) + kustomize build "$KUSTOMIZE_PATH" > "$KUSTOMIZE_OUTPUT" + ;; +esac + +echo "Generating Helm manifests for $COMPONENT..." +cd "$HELM_CHART_DIR" +HELM_OUTPUT="/tmp/helm-aio-${COMPONENT}.yaml" +TEMP_VALUES_FILE="/tmp/test-values-${COMPONENT}.yaml" + + # Create values file with only the target component enabled +create_values_for_component() { + local component=$1 + + cat > "$TEMP_VALUES_FILE" << EOF +# Global settings +global: + kubeflowNamespace: kubeflow + certManagerNamespace: cert-manager + +# Disable all components by default +sparkOperator: + enabled: false +certManager: + enabled: false +trainingOperator: + enabled: false +istio: + enabled: false +oauth2Proxy: + enabled: false +dex: + enabled: false +centraldashboard: + enabled: false +profiles: + enabled: false +jupyter: + enabled: false +pipelines: + enabled: false +kserve: + enabled: false +katib: + enabled: false +tensorboard: + enabled: false +volumesWebApp: + enabled: false +admissionWebhook: + enabled: false +pvcviewerController: + enabled: false +modelRegistry: + enabled: false +EOF + + case $component in + "cert-manager") + cat >> "$TEMP_VALUES_FILE" << EOF + +# Enable cert-manager +certManager: + enabled: true + installCRDs: true + global: + leaderElection: + namespace: kube-system + startupapicheck: + enabled: false + kubeflowIssuer: + enabled: true + name: kubeflow-self-signing-issuer +EOF + ;; + "spark-operator") + cat >> "$TEMP_VALUES_FILE" << EOF + +# Enable spark-operator +sparkOperator: + enabled: true + spark: + jobNamespaces: [] + webhook: + enable: true + port: 9443 + kubeflowRBAC: + enabled: true +EOF + ;; + esac +} + +create_values_for_component "$COMPONENT" + +helm template kubeflow . --namespace "$NAMESPACE" --include-crds --values "$TEMP_VALUES_FILE" > "$HELM_OUTPUT" +rm -f "$TEMP_VALUES_FILE" + +KUBEFLOW_RBAC_ENABLED="false" +if [ "$COMPONENT" = "spark-operator" ]; then + INTEGRATIONS_FILE="$HELM_CHART_DIR/templates/integrations/spark-operator-rbac.yaml" + if [ -f "$INTEGRATIONS_FILE" ]; then + KUBEFLOW_RBAC_ENABLED="true" + fi +fi + +echo "Comparing manifests for $COMPONENT..." +cd "$ROOT_DIR" +python3 "$ROOT_DIR/tests/helm_kustomize_compare_manifests.py" "$KUSTOMIZE_OUTPUT" "$HELM_OUTPUT" "$COMPONENT" "$NAMESPACE" "$KUBEFLOW_RBAC_ENABLED" + +rm -f "$KUSTOMIZE_OUTPUT" "$HELM_OUTPUT" + +echo "Comparison completed for $COMPONENT" \ No newline at end of file