diff --git a/.github/workflows/aws-snp-launchmeasurements-requirements.txt b/.github/workflows/aws-snp-launchmeasurements-requirements.txt index 6d4195056..bbf8e9779 100644 --- a/.github/workflows/aws-snp-launchmeasurements-requirements.txt +++ b/.github/workflows/aws-snp-launchmeasurements-requirements.txt @@ -58,39 +58,34 @@ cffi==1.16.0 \ --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 # via cryptography -cryptography==42.0.4 \ - --hash=sha256:01911714117642a3f1792c7f376db572aadadbafcd8d75bb527166009c9f1d1b \ - --hash=sha256:0e89f7b84f421c56e7ff69f11c441ebda73b8a8e6488d322ef71746224c20fce \ - --hash=sha256:12d341bd42cdb7d4937b0cabbdf2a94f949413ac4504904d0cdbdce4a22cbf88 \ - --hash=sha256:15a1fb843c48b4a604663fa30af60818cd28f895572386e5f9b8a665874c26e7 \ - --hash=sha256:1cdcdbd117681c88d717437ada72bdd5be9de117f96e3f4d50dab3f59fd9ab20 \ - --hash=sha256:1df6fcbf60560d2113b5ed90f072dc0b108d64750d4cbd46a21ec882c7aefce9 \ - --hash=sha256:3c6048f217533d89f2f8f4f0fe3044bf0b2090453b7b73d0b77db47b80af8dff \ - --hash=sha256:3e970a2119507d0b104f0a8e281521ad28fc26f2820687b3436b8c9a5fcf20d1 \ - --hash=sha256:44a64043f743485925d3bcac548d05df0f9bb445c5fcca6681889c7c3ab12764 \ - --hash=sha256:4e36685cb634af55e0677d435d425043967ac2f3790ec652b2b88ad03b85c27b \ - --hash=sha256:5f8907fcf57392cd917892ae83708761c6ff3c37a8e835d7246ff0ad251d9298 \ - --hash=sha256:69b22ab6506a3fe483d67d1ed878e1602bdd5912a134e6202c1ec672233241c1 \ - --hash=sha256:6bfadd884e7280df24d26f2186e4e07556a05d37393b0f220a840b083dc6a824 \ - --hash=sha256:6d0fbe73728c44ca3a241eff9aefe6496ab2656d6e7a4ea2459865f2e8613257 \ - --hash=sha256:6ffb03d419edcab93b4b19c22ee80c007fb2d708429cecebf1dd3258956a563a \ - --hash=sha256:810bcf151caefc03e51a3d61e53335cd5c7316c0a105cc695f0959f2c638b129 \ - --hash=sha256:831a4b37accef30cccd34fcb916a5d7b5be3cbbe27268a02832c3e450aea39cb \ - --hash=sha256:887623fe0d70f48ab3f5e4dbf234986b1329a64c066d719432d0698522749929 \ - --hash=sha256:a0298bdc6e98ca21382afe914c642620370ce0470a01e1bef6dd9b5354c36854 \ - --hash=sha256:a1327f280c824ff7885bdeef8578f74690e9079267c1c8bd7dc5cc5aa065ae52 \ - --hash=sha256:c1f25b252d2c87088abc8bbc4f1ecbf7c919e05508a7e8628e6875c40bc70923 \ - --hash=sha256:c3a5cbc620e1e17009f30dd34cb0d85c987afd21c41a74352d1719be33380885 \ - --hash=sha256:ce8613beaffc7c14f091497346ef117c1798c202b01153a8cc7b8e2ebaaf41c0 \ - --hash=sha256:d2a27aca5597c8a71abbe10209184e1a8e91c1fd470b5070a2ea60cafec35bcd \ - --hash=sha256:dad9c385ba8ee025bb0d856714f71d7840020fe176ae0229de618f14dae7a6e2 \ - --hash=sha256:db4b65b02f59035037fde0998974d84244a64c3265bdef32a827ab9b63d61b18 \ - --hash=sha256:e09469a2cec88fb7b078e16d4adec594414397e8879a4341c6ace96013463d5b \ - --hash=sha256:e53dc41cda40b248ebc40b83b31516487f7db95ab8ceac1f042626bc43a2f992 \ - --hash=sha256:f1e85a178384bf19e36779d91ff35c7617c885da487d689b05c1366f9933ad74 \ - --hash=sha256:f47be41843200f7faec0683ad751e5ef11b9a56a220d57f300376cd8aba81660 \ - --hash=sha256:fb0cef872d8193e487fc6bdb08559c3aa41b659a7d9be48b2e10747f47863925 \ - --hash=sha256:ffc73996c4fca3d2b6c1c8c12bfd3ad00def8621da24f547626bf06441400449 +cryptography==43.0.1 \ + --hash=sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494 \ + --hash=sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806 \ + --hash=sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d \ + --hash=sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062 \ + --hash=sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2 \ + --hash=sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4 \ + --hash=sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1 \ + --hash=sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85 \ + --hash=sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84 \ + --hash=sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042 \ + --hash=sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d \ + --hash=sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962 \ + --hash=sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2 \ + --hash=sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa \ + --hash=sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d \ + --hash=sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365 \ + --hash=sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96 \ + --hash=sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47 \ + --hash=sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d \ + --hash=sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d \ + --hash=sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c \ + --hash=sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb \ + --hash=sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277 \ + --hash=sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172 \ + --hash=sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034 \ + --hash=sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a \ + --hash=sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289 # via sev-snp-measure pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ diff --git a/.github/workflows/build-binaries.yml b/.github/workflows/build-binaries.yml index ca28f2030..910e734b5 100644 --- a/.github/workflows/build-binaries.yml +++ b/.github/workflows/build-binaries.yml @@ -19,7 +19,7 @@ on: jobs: build-binaries: - runs-on: [arc-runner-set] + runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 @@ -28,9 +28,6 @@ jobs: - name: Setup bazel uses: ./.github/actions/setup_bazel_nix - with: - useCache: "rbe" - rbePlatform: "ubuntu-22.04" - name: Build all shell: bash diff --git a/.github/workflows/build-tf.yml b/.github/workflows/build-tf.yml new file mode 100644 index 000000000..699832c87 --- /dev/null +++ b/.github/workflows/build-tf.yml @@ -0,0 +1,64 @@ +name: Build Terraform Provider + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + build-terraform-provider: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - arch: amd64 + os: linux + - arch: amd64 + os: darwin + - arch: arm64 + os: linux + - arch: arm64 + os: darwin + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Cache Bazel + uses: actions/cache@v4 + with: + path: | + ~/.cache/bazel + ~/.cache/bazelisk + key: ${{ runner.os }}-bazel-${{ hashFiles('**/BUILD.bazel', '**/*.bzl', 'WORKSPACE') }} + restore-keys: | + ${{ runner.os }}-bazel- + + - name: Setup bazel + uses: ./.github/actions/setup_bazel_nix + with: + useCache: "false" + + - name: Cache Go dependencies + uses: actions/cache@v4 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Build Terraform Provider Binary + uses: ./.github/actions/build_tf_provider + with: + targetOS: ${{ matrix.os }} + targetArch: ${{ matrix.arch }} + + - name: Upload Terraform Provider Binary as artifact + uses: actions/upload-artifact@v4 + with: + name: terraform-provider-constellation-${{ matrix.os }}-${{ matrix.arch }} + path: | + build/terraform-provider-constellation-${{ matrix.os }}-${{ matrix.arch }} diff --git a/terraform-provider-constellation/internal/provider/cluster_resource.go b/terraform-provider-constellation/internal/provider/cluster_resource.go index 096621af6..16ff297b0 100644 --- a/terraform-provider-constellation/internal/provider/cluster_resource.go +++ b/terraform-provider-constellation/internal/provider/cluster_resource.go @@ -589,21 +589,36 @@ func (r *ClusterResource) ModifyPlan(ctx context.Context, req resource.ModifyPla // Create is called when the resource is created. func (r *ClusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // Read data supplied by Terraform runtime into the model + r.logWithContext(ctx, "INFO", "Starting cluster creation") + var data ClusterResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to read plan data", map[string]interface{}{"error": resp.Diagnostics.Errors()}) return } // Apply changes to the cluster, including the init RPC and skipping the node upgrade. + r.logWithContext(ctx, "DEBUG", "Applying cluster changes", map[string]interface{}{ + "csp": data.CSP.ValueString(), + "name": data.Name.ValueString(), + }) diags := r.apply(ctx, &data, false, true) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to apply cluster changes", map[string]interface{}{"error": resp.Diagnostics.Errors()}) return } // Save data into Terraform state + r.logWithContext(ctx, "DEBUG", "Saving cluster data to state") resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + if resp.Diagnostics.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to save cluster data to state", map[string]interface{}{"error": resp.Diagnostics.Errors()}) + return + } + + r.logWithContext(ctx, "INFO", "Cluster creation completed successfully") } // Read is called when the resource is read or refreshed. @@ -772,78 +787,103 @@ func (r *ClusterResource) validateGCPNetworkConfig(ctx context.Context, data *Cl // apply applies changes to a cluster. It can be used for both creating and updating a cluster. // This implements the core part of the Create and Update methods. func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, skipInitRPC, skipNodeUpgrade bool) diag.Diagnostics { + r.logWithContext(ctx, "INFO", "Starting cluster apply", map[string]interface{}{ + "skipInitRPC": skipInitRPC, + "skipNodeUpgrade": skipNodeUpgrade, + }) + diags := diag.Diagnostics{} // Parse and convert values from the Terraform state // to formats the Constellation library can work with. + r.logWithContext(ctx, "DEBUG", "Validating GCP network config") convertDiags := r.validateGCPNetworkConfig(ctx, data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to validate GCP network config", map[string]interface{}{"error": diags.Errors()}) return diags } csp := cloudprovider.FromString(data.CSP.ValueString()) + r.logWithContext(ctx, "DEBUG", "Parsed CSP", map[string]interface{}{"csp": csp.String()}) // parse attestation config + r.logWithContext(ctx, "DEBUG", "Converting attestation config") att, convertDiags := r.convertAttestationConfig(ctx, *data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to convert attestation config", map[string]interface{}{"error": diags.Errors()}) return diags } // parse secrets (i.e. measurement salt, master secret, etc.) + r.logWithContext(ctx, "DEBUG", "Converting secrets") secrets, convertDiags := r.convertSecrets(*data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to convert secrets", map[string]interface{}{"error": diags.Errors()}) return diags } // parse API server certificate SANs + r.logWithContext(ctx, "DEBUG", "Getting API server cert SANs") apiServerCertSANs, convertDiags := r.getAPIServerCertSANs(ctx, data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to get API server cert SANs", map[string]interface{}{"error": diags.Errors()}) return diags } // parse network config + r.logWithContext(ctx, "DEBUG", "Getting network config") networkCfg, getDiags := r.getNetworkConfig(ctx, data) diags.Append(getDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to get network config", map[string]interface{}{"error": diags.Errors()}) return diags } // parse Constellation microservice config + r.logWithContext(ctx, "DEBUG", "Parsing microservice config") var microserviceCfg extraMicroservicesAttribute convertDiags = data.ExtraMicroservices.As(ctx, µserviceCfg, basetypes.ObjectAsOptions{ UnhandledNullAsEmpty: true, // we want to allow null values, as the CSIDriver field is optional }) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to parse microservice config", map[string]interface{}{"error": diags.Errors()}) return diags } // parse Constellation microservice version + r.logWithContext(ctx, "DEBUG", "Getting microservice version") microserviceVersion, convertDiags := r.getMicroserviceVersion(data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to get microservice version", map[string]interface{}{"error": diags.Errors()}) return diags } // parse Kubernetes version + r.logWithContext(ctx, "DEBUG", "Getting Kubernetes version") k8sVersion, getDiags := r.getK8sVersion(data) diags.Append(getDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to get Kubernetes version", map[string]interface{}{"error": diags.Errors()}) return diags } // parse OS image version + r.logWithContext(ctx, "DEBUG", "Getting OS image version") image, imageSemver, convertDiags := r.getImageVersion(ctx, data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to get OS image version", map[string]interface{}{"error": diags.Errors()}) return diags } // parse license ID + r.logWithContext(ctx, "DEBUG", "Parsing license ID") licenseID := data.LicenseID.ValueString() switch { case image.MarketplaceImage != nil && *image.MarketplaceImage: @@ -859,6 +899,7 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, } // Parse in-cluster service account info. + r.logWithContext(ctx, "DEBUG", "Parsing service account info") serviceAccPayload := constellation.ServiceAccountPayload{} var gcpConfig gcpAttribute var azureConfig azureAttribute @@ -868,6 +909,7 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, convertDiags = data.GCP.As(ctx, &gcpConfig, basetypes.ObjectAsOptions{}) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to parse GCP config", map[string]interface{}{"error": diags.Errors()}) return diags } @@ -891,6 +933,7 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, convertDiags = data.Azure.As(ctx, &azureConfig, basetypes.ObjectAsOptions{}) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to parse Azure config", map[string]interface{}{"error": diags.Errors()}) return diags } serviceAccPayload.Azure = azureshared.ApplicationCredentials{ @@ -903,6 +946,7 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, convertDiags = data.OpenStack.As(ctx, &openStackConfig, basetypes.ObjectAsOptions{}) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to parse OpenStack config", map[string]interface{}{"error": diags.Errors()}) return diags } cloudsYAML, err := clouds.ReadCloudsYAML(file.NewHandler(afero.NewOsFs()), openStackConfig.CloudsYAMLPath) @@ -927,8 +971,11 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, } } + + r.logWithContext(ctx, "DEBUG", "Marshalling service account URI") serviceAccURI, err := constellation.MarshalServiceAccountURI(csp, serviceAccPayload) if err != nil { + r.logWithContext(ctx, "ERROR", "Failed to marshal service account URI", map[string]interface{}{"error": err.Error()}) diags.AddError("Marshalling service account URI", err.Error()) return diags } @@ -940,14 +987,17 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, } // setup clients + r.logWithContext(ctx, "DEBUG", "Setting up clients") validator, err := choose.Validator(att.config, &tfContextLogger{ctx: ctx}) if err != nil { + r.logWithContext(ctx, "ERROR", "Failed to choose validator", map[string]interface{}{"error": err.Error()}) diags.AddError("Choosing validator", err.Error()) return diags } applier := r.newApplier(ctx, validator) // Construct in-memory state file + r.logWithContext(ctx, "DEBUG", "Constructing in-memory state file") stateFile := state.New().SetInfrastructure(state.Infrastructure{ UID: data.UID.ValueString(), ClusterEndpoint: data.OutOfClusterEndpoint.ValueString(), @@ -980,21 +1030,24 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, } // Check license + r.logWithContext(ctx, "DEBUG", "Checking license") quota, err := applier.CheckLicense(ctx, csp, !skipInitRPC, licenseID) if err != nil { + r.logWithContext(ctx, "WARN", "Unable to contact license server", map[string]interface{}{"error": err.Error()}) diags.AddWarning("Unable to contact license server.", "Please keep your vCPU quota in mind.") } else if licenseID == license.CommunityLicense { + r.logWithContext(ctx, "WARN", "Using community license") diags.AddWarning("Using community license.", "For details, see https://docs.edgeless.systems/constellation/overview/license") } else { - tflog.Info(ctx, fmt.Sprintf("Please keep your vCPU quota (%d) in mind.", quota)) + r.logWithContext(ctx, "INFO", "License check completed", map[string]interface{}{"vCPU_quota": quota}) } // Now, we perform the actual applying. // Run init RPC - var initDiags diag.Diagnostics if !skipInitRPC { // run the init RPC and retrieve the post-init state + r.logWithContext(ctx, "INFO", "Running init RPC") initRPCPayload := initRPCPayload{ csp: csp, masterSecret: secrets.masterSecret, @@ -1007,9 +1060,10 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, k8sVersion: k8sVersion, inClusterEndpoint: inClusterEndpoint, } - initDiags = r.runInitRPC(ctx, applier, initRPCPayload, data, validator, stateFile) + initDiags := r.runInitRPC(ctx, applier, initRPCPayload, data, validator, stateFile) diags.Append(initDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Init RPC failed", map[string]interface{}{"error": diags.Errors()}) return diags } } @@ -1017,6 +1071,7 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, // Here, we either have the post-init values from the actual init RPC // or, if performing an upgrade and skipping the init RPC, we have the // values from the Terraform state. + r.logWithContext(ctx, "DEBUG", "Setting cluster values") stateFile.SetClusterValues(state.ClusterValues{ ClusterID: data.ClusterID.ValueString(), OwnerID: data.OwnerID.ValueString(), @@ -1025,25 +1080,32 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, // Kubeconfig is in the state by now. Either through the init RPC or through // already being in the state. + r.logWithContext(ctx, "DEBUG", "Setting kubeconfig") if err := applier.SetKubeConfig([]byte(data.KubeConfig.ValueString())); err != nil { + r.logWithContext(ctx, "ERROR", "Failed to set kubeconfig", map[string]interface{}{"error": err.Error()}) diags.AddError("Setting kubeconfig", err.Error()) return diags } // Apply attestation config + r.logWithContext(ctx, "DEBUG", "Applying attestation config") if err := applier.ApplyJoinConfig(ctx, att.config, secrets.measurementSalt); err != nil { + r.logWithContext(ctx, "ERROR", "Failed to apply attestation config", map[string]interface{}{"error": err.Error()}) diags.AddError("Applying attestation config", err.Error()) return diags } // Extend API Server Certificate SANs + r.logWithContext(ctx, "DEBUG", "Extending API server certificate SANs") if err := applier.ExtendClusterConfigCertSANs(ctx, data.OutOfClusterEndpoint.ValueString(), "", apiServerCertSANs); err != nil { + r.logWithContext(ctx, "ERROR", "Failed to extend API server certificate SANs", map[string]interface{}{"error": err.Error()}) diags.AddError("Extending API server certificate SANs", err.Error()) return diags } // Apply Helm Charts + r.logWithContext(ctx, "INFO", "Applying Helm charts") payload := applyHelmChartsPayload{ csp: cloudprovider.FromString(data.CSP.ValueString()), attestationVariant: att.variant, @@ -1064,11 +1126,13 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, helmDiags := r.applyHelmCharts(ctx, applier, payload, stateFile) diags.Append(helmDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to apply Helm charts", map[string]interface{}{"error": diags.Errors()}) return diags } if !skipNodeUpgrade { // Upgrade node image + r.logWithContext(ctx, "INFO", "Upgrading node image") err = applier.UpgradeNodeImage(ctx, imageSemver, image.Reference, @@ -1076,26 +1140,33 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, var upgradeImageErr *compatibility.InvalidUpgradeError switch { case errors.Is(err, kubecmd.ErrInProgress): + r.logWithContext(ctx, "WARN", "Skipping OS image upgrade: Another upgrade is already in progress") diags.AddWarning("Skipping OS image upgrade", "Another upgrade is already in progress.") case errors.As(err, &upgradeImageErr): + r.logWithContext(ctx, "WARN", "Ignoring invalid OS image upgrade", map[string]interface{}{"error": err.Error()}) diags.AddWarning("Ignoring invalid OS image upgrade", err.Error()) case err != nil: + r.logWithContext(ctx, "ERROR", "Failed to upgrade OS image", map[string]interface{}{"error": err.Error()}) diags.AddError("Upgrading OS image", err.Error()) return diags } // Upgrade Kubernetes components + r.logWithContext(ctx, "INFO", "Upgrading Kubernetes components") err = applier.UpgradeKubernetesVersion(ctx, k8sVersion, false) var upgradeK8sErr *compatibility.InvalidUpgradeError switch { case errors.As(err, &upgradeK8sErr): + r.logWithContext(ctx, "WARN", "Ignoring invalid Kubernetes components upgrade", map[string]interface{}{"error": err.Error()}) diags.AddWarning("Ignoring invalid Kubernetes components upgrade", err.Error()) case err != nil: + r.logWithContext(ctx, "ERROR", "Failed to upgrade Kubernetes components", map[string]interface{}{"error": err.Error()}) diags.AddError("Upgrading Kubernetes components", err.Error()) return diags } } + r.logWithContext(ctx, "INFO", "Cluster apply completed successfully") return diags } @@ -1401,3 +1472,24 @@ type nopSpinner struct{ io.Writer } func (s *nopSpinner) Start(string, bool) {} func (s *nopSpinner) Stop() {} func (s *nopSpinner) Write([]byte) (n int, err error) { return 1, nil } + +func (r *ClusterResource) logWithContext(ctx context.Context, level string, msg string, additionalFields ...map[string]interface{}) { + fields := map[string]interface{}{ + "resource": "ClusterResource", + } + for _, af := range additionalFields { + for k, v := range af { + fields[k] = v + } + } + switch level { + case "DEBUG": + tflog.Debug(ctx, msg, fields) + case "INFO": + tflog.Info(ctx, msg, fields) + case "WARN": + tflog.Warn(ctx, msg, fields) + case "ERROR": + tflog.Error(ctx, msg, fields) + } +}