@@ -18,12 +18,14 @@ import (
1818 "testing"
1919 "time"
2020
21+ "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/cai"
2122 "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud"
23+ "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/golden"
2224 "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft"
25+ "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils"
2326 "github.com/gruntwork-io/terratest/modules/k8s"
2427 "github.com/stretchr/testify/assert"
2528 "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/testutils"
26- gkeutils "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/utils"
2729)
2830
2931func TestNodePool (t * testing.T ) {
@@ -34,20 +36,24 @@ func TestNodePool(t *testing.T) {
3436 bpt .DefineVerify (func (assert * assert.Assertions ) {
3537 // Skipping Default Verify as the Verify Stage fails due to change in Client Cert Token
3638 // bpt.DefaultVerify(assert)
37- gkeutils .TGKEVerify (t , bpt , assert ) // Verify Resources
39+ testutils .TGKEVerify (t , bpt , assert ) // Verify Resources
3840
3941 projectId := bpt .GetStringOutput ("project_id" )
4042 location := bpt .GetStringOutput ("location" )
4143 clusterName := bpt .GetStringOutput ("cluster_name" )
44+ randomString := bpt .GetStringOutput ("random_string" )
45+ kubernetesEndpoint := bpt .GetStringOutput ("kubernetes_endpoint" )
4246
43- //cluster := gcloud.Runf(t, "container clusters describe %s --zone %s --project %s", clusterName, location, projectId)
47+ // Retrieve Cluster using CAI
4448 clusterResourceName := fmt .Sprintf ("//container.googleapis.com/projects/%s/locations/%s/clusters/%s" , projectId , location , clusterName )
45- cluster := gkeutils .GetProjectResources (t , projectId , gkeutils .WithAssetType ("container.googleapis.com/Cluster" )).Get ("#(name=\" " + clusterResourceName + "\" ).resource.data" )
49+ cluster := cai .GetProjectResources (t , projectId , cai .WithAssetTypes ([]string {"container.googleapis.com/Cluster" })).Get ("#(name=\" " + clusterResourceName + "\" ).resource.data" )
50+ // Equivalent gcloud describe command (classic)
51+ // cluster := gcloud.Runf(t, "container clusters describe %s --zone %s --project %s", clusterName, location, projectId)
4652
47- // Cluster
53+ // Cluster Assertions (classic)
4854 assert .Contains ([]string {"RUNNING" , "RECONCILING" }, cluster .Get ("status" ).String (), "Cluster is Running" )
4955 assert .Equal ("COS_CONTAINERD" , cluster .Get ("autoscaling.autoprovisioningNodePoolDefaults.imageType" ).String (), "has the expected image type" )
50- assert .Equal ("[ \n \" https://www.googleapis.com/auth/cloud-platform\" \n ]" , cluster .Get ("autoscaling.autoprovisioningNodePoolDefaults.oauthScopes" ).String (), "has the expected oauth scopes" )
56+ assert .Equal ("https://www.googleapis.com/auth/cloud-platform" , cluster .Get ("autoscaling.autoprovisioningNodePoolDefaults.oauthScopes.0 " ).String (), "has the expected oauth scopes" )
5157 assert .Equal ("default" , cluster .Get ("autoscaling.autoprovisioningNodePoolDefaults.serviceAccount" ).String (), "has the expected service account" )
5258 assert .Equal ("OPTIMIZE_UTILIZATION" , cluster .Get ("autoscaling.autoscalingProfile" ).String (), "has the expected autoscaling profile" )
5359 assert .True (cluster .Get ("autoscaling.enableNodeAutoprovisioning" ).Bool (), "has the expected node autoprovisioning" )
@@ -65,7 +71,19 @@ func TestNodePool(t *testing.T) {
6571 ]` ,
6672 cluster .Get ("autoscaling.resourceLimits" ).String (), "has the expected resource limits" )
6773
68- // Pool-01
74+ // Cluster Assertions using golden image (TestNodePool.json) with sanitizer
75+ g := golden .NewOrUpdate (t , cluster .String (),
76+ golden .WithSanitizer (golden .StringSanitizer (projectId , "PROJECT_ID" )),
77+ golden .WithSanitizer (golden .StringSanitizer (randomString , "RANDOM_STRING" )),
78+ golden .WithSanitizer (golden .StringSanitizer (kubernetesEndpoint , "KUBERNETES_ENDPOINT" )),
79+ )
80+
81+ // TODO: wrap as single function in golden pkg
82+ // Retrieve all paths from golden image
83+ jsonPaths := utils .GetTerminalJSONPaths (g .GetJSON ())
84+ g .JSONPathEqs (assert , cluster , jsonPaths )
85+
86+ // nodePool-01 Assertions
6987 assert .Equal ("pool-01" , cluster .Get ("nodePools.#(name==\" pool-01\" ).name" ).String (), "pool-1 exists" )
7088 assert .Equal ("e2-medium" , cluster .Get ("nodePools.#(name==\" pool-01\" ).config.machineType" ).String (), "is the expected machine type" )
7189 assert .Equal ("COS_CONTAINERD" , cluster .Get ("nodePools.#(name==\" pool-01\" ).config.imageType" ).String (), "has the expected image" )
@@ -82,7 +100,7 @@ func TestNodePool(t *testing.T) {
82100 assert .Equal (int64 (10000 ), cluster .Get ("nodePools.#(name==\" pool-01\" ).config.linuxNodeConfig.sysctls.net\\ .core\\ .netdev_max_backlog" ).Int (), "has the expected linux node config net.core.netdev_max_backlog sysctl" )
83101 assert .Equal (int64 (10000 ), cluster .Get ("nodePools.#(name==\" pool-01\" ).config.linuxNodeConfig.sysctls.net\\ .core\\ .rmem_max" ).Int (), "has the expected linux node config net.core.rmem_max sysctl" )
84102
85- // Pool -02
103+ // nodePool -02 Assertions
86104 assert .Equal ("pool-02" , cluster .Get ("nodePools.#(name==\" pool-02\" ).name" ).String (), "pool-2 exists" )
87105 assert .Equal ("n1-standard-2" , cluster .Get ("nodePools.#(name==\" pool-02\" ).config.machineType" ).String (), "is the expected machine type" )
88106 assert .True (cluster .Get ("nodePools.#(name==\" pool-02\" ).autoscaling.enabled" ).Bool (), "has autoscaling enabled" )
@@ -97,7 +115,7 @@ func TestNodePool(t *testing.T) {
97115 cluster .Get ("nodePools.#(name==\" pool-02\" ).config.tags" ).Value ().([]interface {}), "has the expected network tags" )
98116 assert .Equal (int64 (10000 ), cluster .Get ("nodePools.#(name==\" pool-02\" ).config.linuxNodeConfig.sysctls.net\\ .core\\ .netdev_max_backlog" ).Int (), "has the expected linux node config sysctls" )
99117
100- // Pool -03
118+ // nodwPool -03 Assertions
101119 assert .Equal ("pool-03" , cluster .Get ("nodePools.#(name==\" pool-03\" ).name" ).String (), "pool-3 exists" )
102120 assert .JSONEq (fmt .Sprintf (`["%s-b", "%s-c"]` , location , location ), cluster .Get ("nodePools.#(name==\" pool-03\" ).locations" ).String (), "has nodes in correct locations" )
103121 assert .Equal ("n1-standard-2" , cluster .Get ("nodePools.#(name==\" pool-03\" ).config.machineType" ).String (), "is the expected machine type" )
@@ -116,20 +134,20 @@ func TestNodePool(t *testing.T) {
116134 assert .True (cluster .Get ("nodePools.#(name==\" pool-03\" ).config.kubeletConfig.cpuCfsQuota" ).Bool (), "has the expected cpuCfsQuota kubelet config" )
117135 assert .Equal (int64 (20000 ), cluster .Get ("nodePools.#(name==\" pool-03\" ).config.linuxNodeConfig.sysctls.net\\ .core\\ .netdev_max_backlog" ).Int (), "has the expected linux node config sysctls" )
118136
119- // Pool -04
137+ // nodePool -04 Assertions
120138 assert .Equal ("pool-04" , cluster .Get ("nodePools.#(name==\" pool-04\" ).name" ).String (), "pool-4 exists" )
121139 assert .False (cluster .Get ("nodePools.#(name==\" pool-04\" ).config.queuedProvisioning.enabled" ).Bool (), "has queued provisioning not enabled" )
122140
123- // Pool -05
141+ // nodePool -05 Assertions
124142 assert .Equal ("pool-05" , cluster .Get ("nodePools.#(name==\" pool-05\" ).name" ).String (), "pool-5 exists" )
125143 assert .True (cluster .Get ("nodePools.#(name==\" pool-05\" ).config.advancedMachineFeatures.enableNestedVirtualization" ).Bool (), "has enable_nested_virtualization enabled" )
126144
127- // K8s
145+ // K8s Assertions
128146 gcloud .Runf (t , "container clusters get-credentials %s --region %s --project %s" , clusterName , location , projectId )
129147 k8sOpts := k8s.KubectlOptions {}
130148 clusterNodesOp , err := k8s .RunKubectlAndGetOutputE (t , & k8sOpts , "get" , "nodes" , "-o" , "json" )
131149 assert .NoError (err )
132- clusterNodes := testutils .ParseKubectlJSONResult (t , clusterNodesOp )
150+ clusterNodes := utils .ParseKubectlJSONResult (t , clusterNodesOp )
133151 assert .JSONEq (`[
134152 {
135153 "effect": "PreferNoSchedule",
@@ -148,6 +166,11 @@ func TestNodePool(t *testing.T) {
148166 "effect": "PreferNoSchedule",
149167 "key": "all-pools-example",
150168 "value": "true"
169+ },
170+ {
171+ "effect": "NoSchedule",
172+ "key": "nvidia.com/gpu",
173+ "value": "present"
151174 }
152175 ]` ,
153176 clusterNodes .Get ("items.#(metadata.labels.node_pool==\" pool-02\" ).spec.taints" ).String (), "has the expected all-pools-example taint" )
@@ -156,6 +179,11 @@ func TestNodePool(t *testing.T) {
156179 "effect": "PreferNoSchedule",
157180 "key": "all-pools-example",
158181 "value": "true"
182+ },
183+ {
184+ "effect": "NoSchedule",
185+ "key": "sandbox.gke.io/runtime",
186+ "value": "gvisor"
159187 }
160188 ]` ,
161189 clusterNodes .Get ("items.#(metadata.labels.node_pool==\" pool-03\" ).spec.taints" ).String (), "has the expected all-pools-example taint" )
0 commit comments