diff --git a/.github/workflows/build-binaries.yml b/.github/workflows/build-binaries.yml index ca28f2030..910e734b5 100644 --- a/.github/workflows/build-binaries.yml +++ b/.github/workflows/build-binaries.yml @@ -19,7 +19,7 @@ on: jobs: build-binaries: - runs-on: [arc-runner-set] + runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 @@ -28,9 +28,6 @@ jobs: - name: Setup bazel uses: ./.github/actions/setup_bazel_nix - with: - useCache: "rbe" - rbePlatform: "ubuntu-22.04" - name: Build all shell: bash diff --git a/.github/workflows/build-tf.yml b/.github/workflows/build-tf.yml new file mode 100644 index 000000000..699832c87 --- /dev/null +++ b/.github/workflows/build-tf.yml @@ -0,0 +1,64 @@ +name: Build Terraform Provider + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + build-terraform-provider: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - arch: amd64 + os: linux + - arch: amd64 + os: darwin + - arch: arm64 + os: linux + - arch: arm64 + os: darwin + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Cache Bazel + uses: actions/cache@v4 + with: + path: | + ~/.cache/bazel + ~/.cache/bazelisk + key: ${{ runner.os }}-bazel-${{ hashFiles('**/BUILD.bazel', '**/*.bzl', 'WORKSPACE') }} + restore-keys: | + ${{ runner.os }}-bazel- + + - name: Setup bazel + uses: ./.github/actions/setup_bazel_nix + with: + useCache: "false" + + - name: Cache Go dependencies + uses: actions/cache@v4 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Build Terraform Provider Binary + uses: ./.github/actions/build_tf_provider + with: + targetOS: ${{ matrix.os }} + targetArch: ${{ matrix.arch }} + + - name: Upload Terraform Provider Binary as artifact + uses: actions/upload-artifact@v4 + with: + name: terraform-provider-constellation-${{ matrix.os }}-${{ matrix.arch }} + path: | + build/terraform-provider-constellation-${{ matrix.os }}-${{ matrix.arch }} diff --git a/go.mod b/go.mod index f4b66e794..4402e39f7 100644 --- a/go.mod +++ b/go.mod @@ -237,7 +237,7 @@ require ( github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gofrs/uuid/v5 v5.2.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.2 // indirect diff --git a/go.sum b/go.sum index 8386c801e..52e7523c8 100644 --- a/go.sum +++ b/go.sum @@ -408,8 +408,9 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= diff --git a/terraform-provider-constellation/internal/provider/cluster_resource.go b/terraform-provider-constellation/internal/provider/cluster_resource.go index 096621af6..16ff297b0 100644 --- a/terraform-provider-constellation/internal/provider/cluster_resource.go +++ b/terraform-provider-constellation/internal/provider/cluster_resource.go @@ -589,21 +589,36 @@ func (r *ClusterResource) ModifyPlan(ctx context.Context, req resource.ModifyPla // Create is called when the resource is created. func (r *ClusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // Read data supplied by Terraform runtime into the model + r.logWithContext(ctx, "INFO", "Starting cluster creation") + var data ClusterResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to read plan data", map[string]interface{}{"error": resp.Diagnostics.Errors()}) return } // Apply changes to the cluster, including the init RPC and skipping the node upgrade. + r.logWithContext(ctx, "DEBUG", "Applying cluster changes", map[string]interface{}{ + "csp": data.CSP.ValueString(), + "name": data.Name.ValueString(), + }) diags := r.apply(ctx, &data, false, true) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to apply cluster changes", map[string]interface{}{"error": resp.Diagnostics.Errors()}) return } // Save data into Terraform state + r.logWithContext(ctx, "DEBUG", "Saving cluster data to state") resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + if resp.Diagnostics.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to save cluster data to state", map[string]interface{}{"error": resp.Diagnostics.Errors()}) + return + } + + r.logWithContext(ctx, "INFO", "Cluster creation completed successfully") } // Read is called when the resource is read or refreshed. @@ -772,78 +787,103 @@ func (r *ClusterResource) validateGCPNetworkConfig(ctx context.Context, data *Cl // apply applies changes to a cluster. It can be used for both creating and updating a cluster. // This implements the core part of the Create and Update methods. func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, skipInitRPC, skipNodeUpgrade bool) diag.Diagnostics { + r.logWithContext(ctx, "INFO", "Starting cluster apply", map[string]interface{}{ + "skipInitRPC": skipInitRPC, + "skipNodeUpgrade": skipNodeUpgrade, + }) + diags := diag.Diagnostics{} // Parse and convert values from the Terraform state // to formats the Constellation library can work with. + r.logWithContext(ctx, "DEBUG", "Validating GCP network config") convertDiags := r.validateGCPNetworkConfig(ctx, data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to validate GCP network config", map[string]interface{}{"error": diags.Errors()}) return diags } csp := cloudprovider.FromString(data.CSP.ValueString()) + r.logWithContext(ctx, "DEBUG", "Parsed CSP", map[string]interface{}{"csp": csp.String()}) // parse attestation config + r.logWithContext(ctx, "DEBUG", "Converting attestation config") att, convertDiags := r.convertAttestationConfig(ctx, *data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to convert attestation config", map[string]interface{}{"error": diags.Errors()}) return diags } // parse secrets (i.e. measurement salt, master secret, etc.) + r.logWithContext(ctx, "DEBUG", "Converting secrets") secrets, convertDiags := r.convertSecrets(*data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to convert secrets", map[string]interface{}{"error": diags.Errors()}) return diags } // parse API server certificate SANs + r.logWithContext(ctx, "DEBUG", "Getting API server cert SANs") apiServerCertSANs, convertDiags := r.getAPIServerCertSANs(ctx, data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to get API server cert SANs", map[string]interface{}{"error": diags.Errors()}) return diags } // parse network config + r.logWithContext(ctx, "DEBUG", "Getting network config") networkCfg, getDiags := r.getNetworkConfig(ctx, data) diags.Append(getDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to get network config", map[string]interface{}{"error": diags.Errors()}) return diags } // parse Constellation microservice config + r.logWithContext(ctx, "DEBUG", "Parsing microservice config") var microserviceCfg extraMicroservicesAttribute convertDiags = data.ExtraMicroservices.As(ctx, µserviceCfg, basetypes.ObjectAsOptions{ UnhandledNullAsEmpty: true, // we want to allow null values, as the CSIDriver field is optional }) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to parse microservice config", map[string]interface{}{"error": diags.Errors()}) return diags } // parse Constellation microservice version + r.logWithContext(ctx, "DEBUG", "Getting microservice version") microserviceVersion, convertDiags := r.getMicroserviceVersion(data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to get microservice version", map[string]interface{}{"error": diags.Errors()}) return diags } // parse Kubernetes version + r.logWithContext(ctx, "DEBUG", "Getting Kubernetes version") k8sVersion, getDiags := r.getK8sVersion(data) diags.Append(getDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to get Kubernetes version", map[string]interface{}{"error": diags.Errors()}) return diags } // parse OS image version + r.logWithContext(ctx, "DEBUG", "Getting OS image version") image, imageSemver, convertDiags := r.getImageVersion(ctx, data) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to get OS image version", map[string]interface{}{"error": diags.Errors()}) return diags } // parse license ID + r.logWithContext(ctx, "DEBUG", "Parsing license ID") licenseID := data.LicenseID.ValueString() switch { case image.MarketplaceImage != nil && *image.MarketplaceImage: @@ -859,6 +899,7 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, } // Parse in-cluster service account info. + r.logWithContext(ctx, "DEBUG", "Parsing service account info") serviceAccPayload := constellation.ServiceAccountPayload{} var gcpConfig gcpAttribute var azureConfig azureAttribute @@ -868,6 +909,7 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, convertDiags = data.GCP.As(ctx, &gcpConfig, basetypes.ObjectAsOptions{}) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to parse GCP config", map[string]interface{}{"error": diags.Errors()}) return diags } @@ -891,6 +933,7 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, convertDiags = data.Azure.As(ctx, &azureConfig, basetypes.ObjectAsOptions{}) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to parse Azure config", map[string]interface{}{"error": diags.Errors()}) return diags } serviceAccPayload.Azure = azureshared.ApplicationCredentials{ @@ -903,6 +946,7 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, convertDiags = data.OpenStack.As(ctx, &openStackConfig, basetypes.ObjectAsOptions{}) diags.Append(convertDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to parse OpenStack config", map[string]interface{}{"error": diags.Errors()}) return diags } cloudsYAML, err := clouds.ReadCloudsYAML(file.NewHandler(afero.NewOsFs()), openStackConfig.CloudsYAMLPath) @@ -927,8 +971,11 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, } } + + r.logWithContext(ctx, "DEBUG", "Marshalling service account URI") serviceAccURI, err := constellation.MarshalServiceAccountURI(csp, serviceAccPayload) if err != nil { + r.logWithContext(ctx, "ERROR", "Failed to marshal service account URI", map[string]interface{}{"error": err.Error()}) diags.AddError("Marshalling service account URI", err.Error()) return diags } @@ -940,14 +987,17 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, } // setup clients + r.logWithContext(ctx, "DEBUG", "Setting up clients") validator, err := choose.Validator(att.config, &tfContextLogger{ctx: ctx}) if err != nil { + r.logWithContext(ctx, "ERROR", "Failed to choose validator", map[string]interface{}{"error": err.Error()}) diags.AddError("Choosing validator", err.Error()) return diags } applier := r.newApplier(ctx, validator) // Construct in-memory state file + r.logWithContext(ctx, "DEBUG", "Constructing in-memory state file") stateFile := state.New().SetInfrastructure(state.Infrastructure{ UID: data.UID.ValueString(), ClusterEndpoint: data.OutOfClusterEndpoint.ValueString(), @@ -980,21 +1030,24 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, } // Check license + r.logWithContext(ctx, "DEBUG", "Checking license") quota, err := applier.CheckLicense(ctx, csp, !skipInitRPC, licenseID) if err != nil { + r.logWithContext(ctx, "WARN", "Unable to contact license server", map[string]interface{}{"error": err.Error()}) diags.AddWarning("Unable to contact license server.", "Please keep your vCPU quota in mind.") } else if licenseID == license.CommunityLicense { + r.logWithContext(ctx, "WARN", "Using community license") diags.AddWarning("Using community license.", "For details, see https://docs.edgeless.systems/constellation/overview/license") } else { - tflog.Info(ctx, fmt.Sprintf("Please keep your vCPU quota (%d) in mind.", quota)) + r.logWithContext(ctx, "INFO", "License check completed", map[string]interface{}{"vCPU_quota": quota}) } // Now, we perform the actual applying. // Run init RPC - var initDiags diag.Diagnostics if !skipInitRPC { // run the init RPC and retrieve the post-init state + r.logWithContext(ctx, "INFO", "Running init RPC") initRPCPayload := initRPCPayload{ csp: csp, masterSecret: secrets.masterSecret, @@ -1007,9 +1060,10 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, k8sVersion: k8sVersion, inClusterEndpoint: inClusterEndpoint, } - initDiags = r.runInitRPC(ctx, applier, initRPCPayload, data, validator, stateFile) + initDiags := r.runInitRPC(ctx, applier, initRPCPayload, data, validator, stateFile) diags.Append(initDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Init RPC failed", map[string]interface{}{"error": diags.Errors()}) return diags } } @@ -1017,6 +1071,7 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, // Here, we either have the post-init values from the actual init RPC // or, if performing an upgrade and skipping the init RPC, we have the // values from the Terraform state. + r.logWithContext(ctx, "DEBUG", "Setting cluster values") stateFile.SetClusterValues(state.ClusterValues{ ClusterID: data.ClusterID.ValueString(), OwnerID: data.OwnerID.ValueString(), @@ -1025,25 +1080,32 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, // Kubeconfig is in the state by now. Either through the init RPC or through // already being in the state. + r.logWithContext(ctx, "DEBUG", "Setting kubeconfig") if err := applier.SetKubeConfig([]byte(data.KubeConfig.ValueString())); err != nil { + r.logWithContext(ctx, "ERROR", "Failed to set kubeconfig", map[string]interface{}{"error": err.Error()}) diags.AddError("Setting kubeconfig", err.Error()) return diags } // Apply attestation config + r.logWithContext(ctx, "DEBUG", "Applying attestation config") if err := applier.ApplyJoinConfig(ctx, att.config, secrets.measurementSalt); err != nil { + r.logWithContext(ctx, "ERROR", "Failed to apply attestation config", map[string]interface{}{"error": err.Error()}) diags.AddError("Applying attestation config", err.Error()) return diags } // Extend API Server Certificate SANs + r.logWithContext(ctx, "DEBUG", "Extending API server certificate SANs") if err := applier.ExtendClusterConfigCertSANs(ctx, data.OutOfClusterEndpoint.ValueString(), "", apiServerCertSANs); err != nil { + r.logWithContext(ctx, "ERROR", "Failed to extend API server certificate SANs", map[string]interface{}{"error": err.Error()}) diags.AddError("Extending API server certificate SANs", err.Error()) return diags } // Apply Helm Charts + r.logWithContext(ctx, "INFO", "Applying Helm charts") payload := applyHelmChartsPayload{ csp: cloudprovider.FromString(data.CSP.ValueString()), attestationVariant: att.variant, @@ -1064,11 +1126,13 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, helmDiags := r.applyHelmCharts(ctx, applier, payload, stateFile) diags.Append(helmDiags...) if diags.HasError() { + r.logWithContext(ctx, "ERROR", "Failed to apply Helm charts", map[string]interface{}{"error": diags.Errors()}) return diags } if !skipNodeUpgrade { // Upgrade node image + r.logWithContext(ctx, "INFO", "Upgrading node image") err = applier.UpgradeNodeImage(ctx, imageSemver, image.Reference, @@ -1076,26 +1140,33 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, var upgradeImageErr *compatibility.InvalidUpgradeError switch { case errors.Is(err, kubecmd.ErrInProgress): + r.logWithContext(ctx, "WARN", "Skipping OS image upgrade: Another upgrade is already in progress") diags.AddWarning("Skipping OS image upgrade", "Another upgrade is already in progress.") case errors.As(err, &upgradeImageErr): + r.logWithContext(ctx, "WARN", "Ignoring invalid OS image upgrade", map[string]interface{}{"error": err.Error()}) diags.AddWarning("Ignoring invalid OS image upgrade", err.Error()) case err != nil: + r.logWithContext(ctx, "ERROR", "Failed to upgrade OS image", map[string]interface{}{"error": err.Error()}) diags.AddError("Upgrading OS image", err.Error()) return diags } // Upgrade Kubernetes components + r.logWithContext(ctx, "INFO", "Upgrading Kubernetes components") err = applier.UpgradeKubernetesVersion(ctx, k8sVersion, false) var upgradeK8sErr *compatibility.InvalidUpgradeError switch { case errors.As(err, &upgradeK8sErr): + r.logWithContext(ctx, "WARN", "Ignoring invalid Kubernetes components upgrade", map[string]interface{}{"error": err.Error()}) diags.AddWarning("Ignoring invalid Kubernetes components upgrade", err.Error()) case err != nil: + r.logWithContext(ctx, "ERROR", "Failed to upgrade Kubernetes components", map[string]interface{}{"error": err.Error()}) diags.AddError("Upgrading Kubernetes components", err.Error()) return diags } } + r.logWithContext(ctx, "INFO", "Cluster apply completed successfully") return diags } @@ -1401,3 +1472,24 @@ type nopSpinner struct{ io.Writer } func (s *nopSpinner) Start(string, bool) {} func (s *nopSpinner) Stop() {} func (s *nopSpinner) Write([]byte) (n int, err error) { return 1, nil } + +func (r *ClusterResource) logWithContext(ctx context.Context, level string, msg string, additionalFields ...map[string]interface{}) { + fields := map[string]interface{}{ + "resource": "ClusterResource", + } + for _, af := range additionalFields { + for k, v := range af { + fields[k] = v + } + } + switch level { + case "DEBUG": + tflog.Debug(ctx, msg, fields) + case "INFO": + tflog.Info(ctx, msg, fields) + case "WARN": + tflog.Warn(ctx, msg, fields) + case "ERROR": + tflog.Error(ctx, msg, fields) + } +}