Skip to content

Commit 00c269d

Browse files
committed
final work
1 parent 8b7ddd4 commit 00c269d

File tree

7 files changed

+355
-132
lines changed

7 files changed

+355
-132
lines changed

build/int.cloudbuild.yaml

Lines changed: 281 additions & 5 deletions
Large diffs are not rendered by default.

test/fixtures/node_pool/outputs.tf

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,3 +87,7 @@ output "registry_project_ids" {
8787
output "random_string" {
8888
value = random_string.suffix.result
8989
}
90+
91+
output "compute_engine_service_account" {
92+
value = var.compute_engine_service_accounts[0]
93+
}

test/integration/go.mod

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ require (
99
github.com/gruntwork-io/terratest v0.48.1
1010
github.com/hashicorp/terraform-json v0.24.0
1111
github.com/stretchr/testify v1.10.0
12+
github.com/tidwall/gjson v1.18.0
13+
golang.org/x/sync v0.10.0
1214
)
1315

1416
require (
@@ -103,7 +105,6 @@ require (
103105
github.com/pquerna/otp v1.4.0 // indirect
104106
github.com/russross/blackfriday/v2 v2.1.0 // indirect
105107
github.com/spf13/pflag v1.0.5 // indirect
106-
github.com/tidwall/gjson v1.18.0 // indirect
107108
github.com/tidwall/match v1.1.1 // indirect
108109
github.com/tidwall/pretty v1.2.1 // indirect
109110
github.com/tidwall/sjson v1.2.5 // indirect
@@ -116,7 +117,6 @@ require (
116117
golang.org/x/mod v0.22.0 // indirect
117118
golang.org/x/net v0.31.0 // indirect
118119
golang.org/x/oauth2 v0.24.0 // indirect
119-
golang.org/x/sync v0.10.0 // indirect
120120
golang.org/x/sys v0.28.0 // indirect
121121
golang.org/x/term v0.27.0 // indirect
122122
golang.org/x/text v0.21.0 // indirect

test/integration/node_pool/node_pool_test.go

Lines changed: 11 additions & 114 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,12 @@ package node_pool
1515

1616
import (
1717
"fmt"
18-
"slices"
1918
"testing"
2019
"time"
2120

2221
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/cai"
23-
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud"
2422
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/golden"
2523
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft"
26-
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils"
27-
"github.com/gruntwork-io/terratest/modules/k8s"
2824
"github.com/stretchr/testify/assert"
2925
"github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/testutils"
3026
)
@@ -44,128 +40,29 @@ func TestNodePool(t *testing.T) {
4440
clusterName := bpt.GetStringOutput("cluster_name")
4541
randomString := bpt.GetStringOutput("random_string")
4642
kubernetesEndpoint := bpt.GetStringOutput("kubernetes_endpoint")
43+
nodeServiceAccount := bpt.GetStringOutput("compute_engine_service_account")
4744

4845
// Retrieve Project CAI
4946
projectCAI := cai.GetProjectResources(t, projectId, cai.WithAssetTypes([]string{"container.googleapis.com/Cluster", "k8s.io/Node"}))
50-
t.Log(projectCAI.Raw)
51-
// Retrieve Cluster from CAI
52-
clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", projectId, location, clusterName)
53-
54-
if !projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data").Exists() {
55-
t.Fatalf("Cluster not found: %s", clusterResourceName)
56-
}
5747

58-
cluster := projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data")
59-
t.Log(cluster.Raw)
48+
// Retrieve Cluster from CAI
6049
// Equivalent gcloud describe command (classic)
6150
// cluster := gcloud.Runf(t, "container clusters describe %s --zone %s --project %s", clusterName, location, projectId)
51+
clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", projectId, location, clusterName)
52+
cluster := projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data")
6253

63-
// Cluster Assertions (classic)
64-
assert.Contains([]string{"RUNNING", "RECONCILING"}, cluster.Get("status").String(), "Cluster is Running")
65-
assert.Equal("COS_CONTAINERD", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.imageType").String(), "has the expected image type")
66-
assert.Equal("https://www.googleapis.com/auth/cloud-platform", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.oauthScopes.0").String(), "has the expected oauth scopes")
67-
assert.Equal("default", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.serviceAccount").String(), "has the expected service account")
68-
assert.Equal("OPTIMIZE_UTILIZATION", cluster.Get("autoscaling.autoscalingProfile").String(), "has the expected autoscaling profile")
69-
assert.True(cluster.Get("autoscaling.enableNodeAutoprovisioning").Bool(), "has the expected node autoprovisioning")
70-
assert.JSONEq(`[
71-
{
72-
"maximum": "20",
73-
"minimum": "5",
74-
"resourceType": "cpu"
75-
},
76-
{
77-
"maximum": "30",
78-
"minimum": "10",
79-
"resourceType": "memory"
80-
}
81-
]`,
82-
cluster.Get("autoscaling.resourceLimits").String(), "has the expected resource limits")
83-
84-
// Cluster Assertions using golden image (TestNodePool.json) with sanitizer
54+
// Setup golden image with sanitizers
8555
g := golden.NewOrUpdate(t, cluster.String(),
56+
golden.WithSanitizer(golden.StringSanitizer(nodeServiceAccount, "NODE_SERVICE_ACCOUNT")),
8657
golden.WithSanitizer(golden.StringSanitizer(projectId, "PROJECT_ID")),
8758
golden.WithSanitizer(golden.StringSanitizer(randomString, "RANDOM_STRING")),
8859
golden.WithSanitizer(golden.StringSanitizer(kubernetesEndpoint, "KUBERNETES_ENDPOINT")),
8960
)
90-
checkPaths := utils.GetTerminalJSONPaths(g.GetJSON())
91-
92-
exemptPaths := []string{"nodePools"}
93-
checkPaths = slices.DeleteFunc(checkPaths, func(s string) bool {
94-
return slices.Contains(exemptPaths, s)
95-
})
96-
g.JSONPathEqs(assert, cluster, checkPaths)
97-
98-
// NodePool Assertions
99-
nodePools := []string{"pool-01", "pool-02", "pool-03", "pool-04", "pool-05"}
100-
for _, nodePool := range nodePools {
101-
g.JSONPathEqs(assert, cluster.Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)), utils.GetTerminalJSONPaths(g.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool))))
102-
}
103-
104-
// nodePool-01 Assertions
105-
assert.Equal("pool-01", cluster.Get("nodePools.#(name==\"pool-01\").name").String(), "pool-1 exists")
106-
assert.Equal("e2-medium", cluster.Get("nodePools.#(name==\"pool-01\").config.machineType").String(), "is the expected machine type")
107-
assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-01\").config.imageType").String(), "has the expected image")
108-
assert.True(cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.enabled").Bool(), "has autoscaling enabled")
109-
assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.minNodeCount").Int(), "has the expected minimum node count")
110-
assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoRepair").Bool(), "has autorepair enabled")
111-
assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoUpgrade").Bool(), "has automatic upgrades enabled")
112-
assert.Equal("kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data \"$HOSTNAME\"", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.shutdown-script").String(), "pool-2 exists")
113-
assert.Equal("false", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.disable-legacy-endpoints").String(), "pool-2 exists")
114-
assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "pool-01-example": "true", "cluster_name": "%s", "node_pool": "pool-01"}`, clusterName),
115-
cluster.Get("nodePools.#(name==\"pool-01\").config.labels").String(), "has the expected labels")
116-
assert.ElementsMatch([]string{"all-node-example", "pool-01-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-01", clusterName)},
117-
cluster.Get("nodePools.#(name==\"pool-01\").config.tags").Value().([]interface{}), "has the expected network tags")
118-
assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config net.core.netdev_max_backlog sysctl")
119-
assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.rmem_max").Int(), "has the expected linux node config net.core.rmem_max sysctl")
120-
121-
// nodePool-02 Assertions
122-
assert.Equal("pool-02", cluster.Get("nodePools.#(name==\"pool-02\").name").String(), "pool-2 exists")
123-
assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-02\").config.machineType").String(), "is the expected machine type")
124-
assert.True(cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.enabled").Bool(), "has autoscaling enabled")
125-
assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.minNodeCount").Int(), "has the expected minimum node count")
126-
assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.maxNodeCount").Int(), "has the expected maximum node count")
127-
assert.Equal(int64(30), cluster.Get("nodePools.#(name==\"pool-02\").config.diskSizeGb").Int(), "has the expected disk size")
128-
assert.Equal("pd-standard", cluster.Get("nodePools.#(name==\"pool-02\").config.diskType").String(), "has the expected disk type")
129-
assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-02\").config.imageType").String(), "has the expected image")
130-
assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-02"}`, clusterName),
131-
cluster.Get("nodePools.#(name==\"pool-02\").config.labels").String(), "has the expected labels")
132-
assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-02", clusterName)},
133-
cluster.Get("nodePools.#(name==\"pool-02\").config.tags").Value().([]interface{}), "has the expected network tags")
134-
assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-02\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls")
135-
136-
// nodwPool-03 Assertions
137-
assert.Equal("pool-03", cluster.Get("nodePools.#(name==\"pool-03\").name").String(), "pool-3 exists")
138-
assert.JSONEq(fmt.Sprintf(`["%s-b", "%s-c"]`, location, location), cluster.Get("nodePools.#(name==\"pool-03\").locations").String(), "has nodes in correct locations")
139-
assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-03\").config.machineType").String(), "is the expected machine type")
140-
assert.False(cluster.Get("nodePools.#(name==\"pool-03\").autoscaling.enabled").Bool(), "has autoscaling enabled")
141-
assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-03\").initialNodeCount").Int(), "has the expected inital node count")
142-
assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoRepair").Bool(), "has autorepair enabled")
143-
assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoUpgrade").Bool(), "has automatic upgrades enabled")
144-
assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-03", "sandbox.gke.io/runtime": "gvisor"}`, clusterName),
145-
cluster.Get("nodePools.#(name==\"pool-03\").config.labels").String(), "has the expected labels")
146-
assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-03", clusterName)},
147-
cluster.Get("nodePools.#(name==\"pool-03\").config.tags").Value().([]interface{}), "has the expected network tags")
148-
assert.Equal("172.16.0.0/18", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podIpv4CidrBlock").String(), "has the expected pod range")
149-
assert.Equal("test", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podRange").String(), "has the expected pod range")
150-
assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-03\").config.imageType").String(), "has the expected image")
151-
assert.Equal("static", cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuManagerPolicy").String(), "has the expected cpuManagerPolicy kubelet config")
152-
assert.True(cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuCfsQuota").Bool(), "has the expected cpuCfsQuota kubelet config")
153-
assert.Equal(int64(20000), cluster.Get("nodePools.#(name==\"pool-03\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls")
154-
155-
// nodePool-04 Assertions
156-
assert.Equal("pool-04", cluster.Get("nodePools.#(name==\"pool-04\").name").String(), "pool-4 exists")
157-
assert.False(cluster.Get("nodePools.#(name==\"pool-04\").config.queuedProvisioning.enabled").Bool(), "has queued provisioning not enabled")
15861

159-
// nodePool-05 Assertions
160-
assert.Equal("pool-05", cluster.Get("nodePools.#(name==\"pool-05\").name").String(), "pool-5 exists")
161-
assert.True(cluster.Get("nodePools.#(name==\"pool-05\").config.advancedMachineFeatures.enableNestedVirtualization").Bool(), "has enable_nested_virtualization enabled")
62+
// Cluster (and listed node pools) Assertions
63+
testutils.TGKEAssertGolden(assert, g, &cluster, []string{"pool-01", "pool-02", "pool-03", "pool-04", "pool-05"}, []string{"monitoringConfig.componentConfig.enableComponents"}) // TODO: enableComponents is UL
16264

16365
// K8s Assertions
164-
gcloud.Runf(t, "container clusters get-credentials %s --region %s --project %s", clusterName, location, projectId)
165-
k8sOpts := k8s.KubectlOptions{}
166-
clusterNodesOp, err := k8s.RunKubectlAndGetOutputE(t, &k8sOpts, "get", "nodes", "-o", "json")
167-
assert.NoError(err)
168-
clusterNodes := utils.ParseKubectlJSONResult(t, clusterNodesOp)
16966
assert.JSONEq(`[
17067
{
17168
"effect": "PreferNoSchedule",
@@ -178,7 +75,7 @@ func TestNodePool(t *testing.T) {
17875
"value": "true"
17976
}
18077
]`,
181-
clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-01\").spec.taints").String(), "has the expected taints")
78+
projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-01\").resource.data.spec.taints").String(), "has the expected taints")
18279
assert.JSONEq(`[
18380
{
18481
"effect": "PreferNoSchedule",
@@ -191,7 +88,7 @@ func TestNodePool(t *testing.T) {
19188
"value": "present"
19289
}
19390
]`,
194-
clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-02\").spec.taints").String(), "has the expected all-pools-example taint")
91+
projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-02\").resource.data.spec.taints").String(), "has the expected all-pools-example taint")
19592
assert.JSONEq(`[
19693
{
19794
"effect": "PreferNoSchedule",
@@ -204,7 +101,7 @@ func TestNodePool(t *testing.T) {
204101
"value": "gvisor"
205102
}
206103
]`,
207-
clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-03\").spec.taints").String(), "has the expected all-pools-example taint")
104+
projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-03\").resource.data.spec.taints").String(), "has the expected all-pools-example taint")
208105
})
209106

210107
bpt.Test()

test/integration/node_pool/testdata/TestNodePool.json

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,7 @@
221221
"https://www.googleapis.com/auth/userinfo.email",
222222
"https://www.googleapis.com/auth/cloud-platform"
223223
],
224-
"serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com",
224+
"serviceAccount": "NODE_SERVICE_ACCOUNT",
225225
"shieldedInstanceConfig": {
226226
"enableIntegrityMonitoring": true
227227
},
@@ -354,7 +354,7 @@
354354
"oauthScopes": [
355355
"https://www.googleapis.com/auth/cloud-platform"
356356
],
357-
"serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com",
357+
"serviceAccount": "NODE_SERVICE_ACCOUNT",
358358
"shieldedInstanceConfig": {
359359
"enableIntegrityMonitoring": true
360360
},
@@ -451,7 +451,7 @@
451451
"oauthScopes": [
452452
"https://www.googleapis.com/auth/cloud-platform"
453453
],
454-
"serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com",
454+
"serviceAccount": "NODE_SERVICE_ACCOUNT",
455455
"shieldedInstanceConfig": {
456456
"enableIntegrityMonitoring": true
457457
},
@@ -541,7 +541,7 @@
541541
"sandboxConfig": {
542542
"type": "GVISOR"
543543
},
544-
"serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com",
544+
"serviceAccount": "NODE_SERVICE_ACCOUNT",
545545
"shieldedInstanceConfig": {
546546
"enableIntegrityMonitoring": true
547547
},
@@ -632,7 +632,7 @@
632632
"reservationAffinity": {
633633
"consumeReservationType": "NO_RESERVATION"
634634
},
635-
"serviceAccount": "default",
635+
"serviceAccount": "NODE_SERVICE_ACCOUNT",
636636
"shieldedInstanceConfig": {
637637
"enableIntegrityMonitoring": true
638638
},
@@ -724,7 +724,7 @@
724724
"oauthScopes": [
725725
"https://www.googleapis.com/auth/cloud-platform"
726726
],
727-
"serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com",
727+
"serviceAccount": "default",
728728
"shieldedInstanceConfig": {
729729
"enableIntegrityMonitoring": true
730730
},

test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,20 +35,20 @@ func TestSaferClusterIapBastion(t *testing.T) {
3535
// bpt.DefaultVerify(assert)
3636
testutils.TGKEVerify(t, bpt, assert) // Verify Resources
3737

38-
test_command, _ := strings.CutPrefix(bpt.GetStringOutput("test_command"), "gcloud ")
38+
testCommand, _ := strings.CutPrefix(bpt.GetStringOutput("test_command"), "gcloud ")
3939

4040
// pre run ssh command so that ssh-keygen can run
41-
gcloud.RunCmd(t, test_command,
41+
gcloud.RunCmd(t, testCommand,
4242
gcloud.WithCommonArgs([]string{}),
4343
)
4444

45-
cluster_version := fmt.Sprintf("v%s", bpt.GetStringOutput("cluster_version"))
45+
clusterVersion := fmt.Sprintf("v%s", bpt.GetStringOutput("cluster_version"))
4646

47-
op := gcloud.Run(t, test_command,
47+
op := gcloud.Run(t, testCommand,
4848
gcloud.WithCommonArgs([]string{}),
4949
)
5050

51-
assert.Equal(cluster_version, op.Get("gitVersion").String(), "SSH into VM and verify connectivity to GKE")
51+
assert.Equal(clusterVersion, op.Get("gitVersion").String(), "SSH into VM and verify connectivity to GKE")
5252
})
5353

5454
bpt.Test()

test/integration/testutils/utils.go

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,14 +15,19 @@
1515
package testutils
1616

1717
import (
18+
"fmt"
1819
"slices"
1920
"strings"
2021
"testing"
2122
"time"
2223

24+
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/golden"
2325
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft"
26+
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils"
2427
tfjson "github.com/hashicorp/terraform-json"
2528
"github.com/stretchr/testify/assert"
29+
"github.com/tidwall/gjson"
30+
"golang.org/x/sync/errgroup"
2631
)
2732

2833
var (
@@ -36,6 +41,8 @@ var (
3641
// API Rate limit exceeded errors can be retried.
3742
".*rateLimitExceeded.*": "Rate limit exceeded.",
3843
}
44+
45+
ClusterAlwaysExemptPaths = []string{"nodePools"} // node pools are separately checked by name
3946
)
4047

4148
func GetTestProjectFromSetup(t *testing.T, idx int) string {
@@ -67,3 +74,42 @@ func TGKEVerifyExemptResources(t *testing.T, b *tft.TFBlueprintTest, assert *ass
6774
assert.Equal(tfjson.Actions{tfjson.ActionNoop}, r.Change.Actions, "Plan must be no-op for resource: %s", r.Address)
6875
}
6976
}
77+
78+
// TGKEAssertGolden asserts a cluster and listed node pools against paths in golden image
79+
func TGKEAssertGolden(assert *assert.Assertions, golden *golden.GoldenFile, clusterJson *gjson.Result, nodePools []string, exemptClusterPaths []string) {
80+
// Retrieve golden paths
81+
clusterCheckPaths := utils.GetTerminalJSONPaths(golden.GetJSON())
82+
83+
// Remove exempt cluster paths
84+
exemptPaths := slices.Concat(exemptClusterPaths, ClusterAlwaysExemptPaths)
85+
clusterCheckPaths = slices.DeleteFunc(clusterCheckPaths, func(s string) bool {
86+
for _, exempPath := range exemptPaths {
87+
if strings.HasPrefix(s, exempPath) {
88+
return true
89+
}
90+
}
91+
return false
92+
})
93+
94+
// Cluster assertions
95+
golden.JSONPathEqs(assert, *clusterJson, clusterCheckPaths)
96+
97+
// NodePool assertions
98+
for _, nodePool := range nodePools {
99+
assert.Truef(clusterJson.Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)).Exists(), "NodePool not found: %s", nodePool)
100+
101+
nodeCheckPaths := utils.GetTerminalJSONPaths(golden.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool)))
102+
103+
syncGroup := new(errgroup.Group)
104+
syncGroup.SetLimit(24)
105+
for _, nodeCheckPath := range nodeCheckPaths {
106+
nodeCheckPath := nodeCheckPath
107+
syncGroup.Go(func() error {
108+
gotData := golden.ApplySanitizers(clusterJson.Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool)).Get(nodeCheckPath).String())
109+
gfData := golden.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool)).Get(nodeCheckPath).String()
110+
assert.Equalf(gfData, gotData, "For node %s path %q expected %q to match fixture %q", nodePool, nodeCheckPath, gotData, gfData)
111+
return nil
112+
})
113+
}
114+
}
115+
}

0 commit comments

Comments
 (0)