@@ -159,6 +159,8 @@ func schemaNodeConfig() *schema.Schema {
159
159
Description : `Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd` ,
160
160
},
161
161
162
+ "boot_disk" : schemaBootDiskConfig (),
163
+
162
164
"guest_accelerator" : {
163
165
Type : schema .TypeList ,
164
166
Optional : true ,
@@ -869,6 +871,45 @@ func schemaNodeConfig() *schema.Schema {
869
871
}
870
872
}
871
873
874
+ func schemaBootDiskConfig () * schema.Schema {
875
+ return & schema.Schema {
876
+ Type : schema .TypeList ,
877
+ Optional : true ,
878
+ Computed : true ,
879
+ MaxItems : 1 ,
880
+ Description : `Boot disk configuration for node pools nodes.` ,
881
+ Elem : & schema.Resource {
882
+ Schema : map [string ]* schema.Schema {
883
+ "disk_type" : {
884
+ Type : schema .TypeString ,
885
+ Optional : true ,
886
+ Computed : true ,
887
+ Description : `Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd` ,
888
+ },
889
+ "size_gb" : {
890
+ Type : schema .TypeInt ,
891
+ Optional : true ,
892
+ Computed : true ,
893
+ ValidateFunc : validation .IntAtLeast (10 ),
894
+ Description : `Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.` ,
895
+ },
896
+ "provisioned_iops" : {
897
+ Type : schema .TypeInt ,
898
+ Optional : true ,
899
+ Computed : true ,
900
+ Description : `Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.` ,
901
+ },
902
+ "provisioned_throughput" : {
903
+ Type : schema .TypeInt ,
904
+ Optional : true ,
905
+ Computed : true ,
906
+ Description : `Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.` ,
907
+ },
908
+ },
909
+ },
910
+ }
911
+ }
912
+
872
913
// Separate since this currently only supports a single value -- a subset of
873
914
// the overall NodeKubeletConfig
874
915
func schemaNodePoolAutoConfigNodeKubeletConfig () * schema.Schema {
@@ -1004,6 +1045,10 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
1004
1045
nc .DiskType = v .(string )
1005
1046
}
1006
1047
1048
+ if v , ok := nodeConfig ["boot_disk" ]; ok {
1049
+ nc .BootDisk = expandBootDiskConfig (v )
1050
+ }
1051
+
1007
1052
if v , ok := nodeConfig ["local_ssd_count" ]; ok {
1008
1053
nc .LocalSsdCount = int64 (v .(int ))
1009
1054
}
@@ -1255,6 +1300,36 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
1255
1300
return nc
1256
1301
}
1257
1302
1303
+ func expandBootDiskConfig (v interface {}) * container.BootDisk {
1304
+ bd := & container.BootDisk {}
1305
+ if v == nil {
1306
+ return nil
1307
+ }
1308
+ ls := v .([]interface {})
1309
+ if len (ls ) == 0 {
1310
+ return nil
1311
+ }
1312
+ cfg := ls [0 ].(map [string ]interface {})
1313
+
1314
+ if v , ok := cfg ["disk_type" ]; ok {
1315
+ bd .DiskType = v .(string )
1316
+ }
1317
+
1318
+ if v , ok := cfg ["size_gb" ]; ok {
1319
+ bd .SizeGb = int64 (v .(int ))
1320
+ }
1321
+
1322
+ if v , ok := cfg ["provisioned_iops" ]; ok {
1323
+ bd .ProvisionedIops = int64 (v .(int ))
1324
+ }
1325
+
1326
+ if v , ok := cfg ["provisioned_throughput" ]; ok {
1327
+ bd .ProvisionedThroughput = int64 (v .(int ))
1328
+ }
1329
+
1330
+ return bd
1331
+ }
1332
+
1258
1333
func expandResourceManagerTags (v interface {}) * container.ResourceManagerTags {
1259
1334
if v == nil {
1260
1335
return nil
@@ -1617,6 +1692,7 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte
1617
1692
"containerd_config" : flattenContainerdConfig (c .ContainerdConfig ),
1618
1693
"disk_size_gb" : c .DiskSizeGb ,
1619
1694
"disk_type" : c .DiskType ,
1695
+ "boot_disk" : flattenBootDiskConfig (c .BootDisk ),
1620
1696
"guest_accelerator" : flattenContainerGuestAccelerators (c .Accelerators ),
1621
1697
"local_ssd_count" : c .LocalSsdCount ,
1622
1698
"logging_variant" : flattenLoggingVariant (c .LoggingConfig ),
@@ -1663,6 +1739,23 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte
1663
1739
return config
1664
1740
}
1665
1741
1742
+ func flattenBootDiskConfig (c * container.BootDisk ) []map [string ]interface {} {
1743
+ config := []map [string ]interface {}{}
1744
+
1745
+ if c == nil {
1746
+ return config
1747
+ }
1748
+
1749
+ config = append (config , map [string ]interface {}{
1750
+ "disk_type" : c .DiskType ,
1751
+ "size_gb" : c .SizeGb ,
1752
+ "provisioned_iops" : c .ProvisionedIops ,
1753
+ "provisioned_throughput" : c .ProvisionedThroughput ,
1754
+ })
1755
+
1756
+ return config
1757
+ }
1758
+
1666
1759
func flattenResourceManagerTags (c * container.ResourceManagerTags ) map [string ]interface {} {
1667
1760
if c == nil {
1668
1761
return nil
@@ -2171,7 +2264,9 @@ func nodePoolNodeConfigUpdate(d *schema.ResourceData, config *transport_tpg.Conf
2171
2264
if d .HasChange ("node_config.0.disk_size_gb" ) ||
2172
2265
d .HasChange ("node_config.0.disk_type" ) ||
2173
2266
d .HasChange ("node_config.0.machine_type" ) ||
2174
- d .HasChange ("node_config.0.storage_pools" ) {
2267
+ d .HasChange ("node_config.0.storage_pools" ) ||
2268
+ d .HasChange ("node_config.0.boot_disk" ) {
2269
+
2175
2270
req := & container.UpdateNodePoolRequest {
2176
2271
Name : name ,
2177
2272
DiskSizeGb : int64 (d .Get ("node_config.0.disk_size_gb" ).(int )),
@@ -2189,6 +2284,34 @@ func nodePoolNodeConfigUpdate(d *schema.ResourceData, config *transport_tpg.Conf
2189
2284
req .StoragePools = storagePools
2190
2285
}
2191
2286
2287
+ if v , ok := d .GetOk ("node_config.0.boot_disk" ); ok {
2288
+ bd := expandBootDiskConfig (v )
2289
+ req .BootDisk = bd
2290
+
2291
+ // The following checks are to ensure that the migrating fields are handled properly.
2292
+ // Migrating fields are disk_type -> boot_disk.disk_type and disk_size_gb -> boot_disk.size_gb
2293
+ // If the legacy (top level) disk_type field is not changing, nil it out to allow the API to fill it in.
2294
+ legacyDiskTypeOld , legacyDiskTypeNew := d .GetChange ("node_config.0.disk_type" )
2295
+ if legacyDiskTypeOld == legacyDiskTypeNew {
2296
+ req .DiskType = ""
2297
+ }
2298
+ // If the new boot disk configuration disk_filed is not changing, nil it out to allow the API to fill it in.
2299
+ bootDiskTypeOld , bootDiskTypeNew := d .GetChange ("node_config.0.boot_disk.0.disk_type" )
2300
+ if bootDiskTypeOld == bootDiskTypeNew {
2301
+ req .BootDisk .DiskType = ""
2302
+ }
2303
+ // If the legacy (top level) disk_size_gb field is not changing, nil it out to allow the API to fill it in.
2304
+ legacyDiskSizeGbOld , legacyDiskSizeGbNew := d .GetChange ("node_config.0.disk_size_gb" )
2305
+ if legacyDiskSizeGbOld == legacyDiskSizeGbNew {
2306
+ req .DiskSizeGb = 0
2307
+ }
2308
+ // if the new boot disk configuration size_gb field is not changing, nil it out to allow the API to fill it in.
2309
+ bootDiskSizeGbOld , bootDiskSizeGbNew := d .GetChange ("node_config.0.boot_disk.0.size_gb" )
2310
+ if bootDiskSizeGbOld == bootDiskSizeGbNew {
2311
+ req .BootDisk .SizeGb = 0
2312
+ }
2313
+ }
2314
+
2192
2315
updateF := func () error {
2193
2316
clusterNodePoolsUpdateCall := config .NewContainerClient (userAgent ).Projects .Locations .Clusters .NodePools .Update (nodePoolInfo .fullyQualifiedName (name ), req )
2194
2317
if config .UserProjectOverride {
@@ -2203,14 +2326,14 @@ func nodePoolNodeConfigUpdate(d *schema.ResourceData, config *transport_tpg.Conf
2203
2326
return ContainerOperationWait (config , op ,
2204
2327
nodePoolInfo .project ,
2205
2328
nodePoolInfo .location ,
2206
- "updating GKE node pool disk_size_gb/disk_type/machine_type/storage_pools" , userAgent ,
2329
+ "updating GKE node pool disk_size_gb/disk_type/machine_type/storage_pools/boot_disk " , userAgent ,
2207
2330
timeout )
2208
2331
}
2209
2332
2210
2333
if err := retryWhileIncompatibleOperation (timeout , npLockKey , updateF ); err != nil {
2211
2334
return err
2212
2335
}
2213
- log .Printf ("[INFO] Updated disk disk_size_gb/disk_type/machine_type/storage_pools for Node Pool %s" , d .Id ())
2336
+ log .Printf ("[INFO] Updated disk disk_size_gb/disk_type/machine_type/storage_pools/boot_disk for Node Pool %s" , d .Id ())
2214
2337
}
2215
2338
2216
2339
if d .HasChange (prefix + "node_config.0.taint" ) {
0 commit comments