Skip to content

Commit d66a242

Browse files
Add support for topology manager and memory manager (#14754) (#10681)
[upstream:70c707704a4a783329a02020090902d3c08a0c48] Signed-off-by: Modular Magician <[email protected]>
1 parent 99972a7 commit d66a242

File tree

5 files changed

+320
-7
lines changed

5 files changed

+320
-7
lines changed

.changelog/14754.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note:enhancement
2+
container: added new fields `memory_manager and `topology_manager` to `node_kubelet_config` block
3+
```

google-beta/services/container/node_config.go

Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -622,6 +622,47 @@ func schemaNodeConfig() *schema.Schema {
622622
ValidateFunc: validation.StringInSlice([]string{"static", "none", ""}, false),
623623
Description: `Control the CPU management policy on the node.`,
624624
},
625+
"memory_manager": {
626+
Type: schema.TypeList,
627+
Optional: true,
628+
MaxItems: 1,
629+
Description: `Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.`,
630+
Elem: &schema.Resource{
631+
Schema: map[string]*schema.Schema{
632+
"policy": {
633+
Type: schema.TypeString,
634+
Optional: true,
635+
Computed: true,
636+
Description: `The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.`,
637+
ValidateFunc: validation.StringInSlice([]string{"None", "Static", ""}, false),
638+
},
639+
},
640+
},
641+
},
642+
"topology_manager": {
643+
Type: schema.TypeList,
644+
Optional: true,
645+
MaxItems: 1,
646+
Description: `Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.`,
647+
Elem: &schema.Resource{
648+
Schema: map[string]*schema.Schema{
649+
"policy": {
650+
Type: schema.TypeString,
651+
Optional: true,
652+
Computed: true,
653+
Description: `The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.`,
654+
ValidateFunc: validation.StringInSlice([]string{"none", "restricted", "single-numa-node", "best-effort", ""}, false),
655+
},
656+
"scope": {
657+
Type: schema.TypeString,
658+
Optional: true,
659+
Computed: true,
660+
Description: `The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).`,
661+
ValidateFunc: validation.StringInSlice([]string{"container", "pod", ""}, false),
662+
},
663+
},
664+
},
665+
},
625666
"cpu_cfs_quota": {
626667
Type: schema.TypeBool,
627668
Optional: true,
@@ -1667,6 +1708,14 @@ func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig {
16671708
}
16681709
kConfig.EvictionSoft = evictionSoft
16691710
}
1711+
1712+
if v, ok := cfg["memory_manager"]; ok {
1713+
kConfig.MemoryManager = expandMemoryManager(v)
1714+
}
1715+
if v, ok := cfg["topology_manager"]; ok {
1716+
kConfig.TopologyManager = expandTopologyManager(v)
1717+
}
1718+
16701719
if v, ok := cfg["eviction_soft_grace_period"]; ok && len(v.([]interface{})) > 0 {
16711720
es := v.([]interface{})[0].(map[string]interface{})
16721721
periods := &container.EvictionGracePeriod{}
@@ -1716,6 +1765,54 @@ func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig {
17161765
return kConfig
17171766
}
17181767

1768+
func expandTopologyManager(v interface{}) *container.TopologyManager {
1769+
if v == nil {
1770+
return nil
1771+
}
1772+
ls := v.([]interface{})
1773+
if len(ls) == 0 {
1774+
return nil
1775+
}
1776+
if ls[0] == nil {
1777+
return &container.TopologyManager{}
1778+
}
1779+
cfg := ls[0].(map[string]interface{})
1780+
1781+
topologyManager := &container.TopologyManager{}
1782+
1783+
if v, ok := cfg["policy"]; ok {
1784+
topologyManager.Policy = v.(string)
1785+
}
1786+
1787+
if v, ok := cfg["scope"]; ok {
1788+
topologyManager.Scope = v.(string)
1789+
}
1790+
1791+
return topologyManager
1792+
}
1793+
1794+
func expandMemoryManager(v interface{}) *container.MemoryManager {
1795+
if v == nil {
1796+
return nil
1797+
}
1798+
ls := v.([]interface{})
1799+
if len(ls) == 0 {
1800+
return nil
1801+
}
1802+
if ls[0] == nil {
1803+
return &container.MemoryManager{}
1804+
}
1805+
cfg := ls[0].(map[string]interface{})
1806+
1807+
memoryManager := &container.MemoryManager{}
1808+
1809+
if v, ok := cfg["policy"]; ok {
1810+
memoryManager.Policy = v.(string)
1811+
}
1812+
1813+
return memoryManager
1814+
}
1815+
17191816
func expandLinuxNodeConfig(v interface{}) *container.LinuxNodeConfig {
17201817
if v == nil {
17211818
return nil
@@ -2354,6 +2451,8 @@ func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface
23542451
"cpu_cfs_quota": c.CpuCfsQuota,
23552452
"cpu_cfs_quota_period": c.CpuCfsQuotaPeriod,
23562453
"cpu_manager_policy": c.CpuManagerPolicy,
2454+
"memory_manager": flattenMemoryManager(c.MemoryManager),
2455+
"topology_manager": flattenTopologyManager(c.TopologyManager),
23572456
"insecure_kubelet_readonly_port_enabled": flattenInsecureKubeletReadonlyPortEnabled(c),
23582457
"pod_pids_limit": c.PodPidsLimit,
23592458
"container_log_max_size": c.ContainerLogMaxSize,
@@ -2374,6 +2473,27 @@ func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface
23742473
return result
23752474
}
23762475

2476+
func flattenTopologyManager(c *container.TopologyManager) []map[string]interface{} {
2477+
result := []map[string]interface{}{}
2478+
if c != nil {
2479+
result = append(result, map[string]interface{}{
2480+
"policy": c.Policy,
2481+
"scope": c.Scope,
2482+
})
2483+
}
2484+
return result
2485+
}
2486+
2487+
func flattenMemoryManager(c *container.MemoryManager) []map[string]interface{} {
2488+
result := []map[string]interface{}{}
2489+
if c != nil {
2490+
result = append(result, map[string]interface{}{
2491+
"policy": c.Policy,
2492+
})
2493+
}
2494+
return result
2495+
}
2496+
23772497
func flattenNodePoolAutoConfigNodeKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface{} {
23782498
result := []map[string]interface{}{}
23792499
if c != nil {

google-beta/services/container/resource_container_cluster_test.go

Lines changed: 151 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2068,6 +2068,68 @@ func TestAccContainerCluster_withNodeConfigLinuxNodeConfig(t *testing.T) {
20682068
})
20692069
}
20702070

2071+
func TestAccContainerCluster_withKubeletConfig(t *testing.T) {
2072+
t.Parallel()
2073+
2074+
clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
2075+
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
2076+
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
2077+
2078+
acctest.VcrTest(t, resource.TestCase{
2079+
PreCheck: func() { acctest.AccTestPreCheck(t) },
2080+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
2081+
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
2082+
Steps: []resource.TestStep{
2083+
{
2084+
Config: testAccContainerCluster_withKubeletConfig(clusterName, networkName, subnetworkName, "none", "None", "best-effort", "pod"),
2085+
Check: resource.ComposeTestCheckFunc(
2086+
resource.TestCheckResourceAttr(
2087+
"google_container_cluster.with_kubelet_config",
2088+
"node_config.0.kubelet_config.0.cpu_manager_policy", "none"),
2089+
resource.TestCheckResourceAttr(
2090+
"google_container_cluster.with_kubelet_config",
2091+
"node_config.0.kubelet_config.0.memory_manager.0.policy", "None"),
2092+
resource.TestCheckResourceAttr(
2093+
"google_container_cluster.with_kubelet_config",
2094+
"node_config.0.kubelet_config.0.topology_manager.0.policy", "best-effort"),
2095+
resource.TestCheckResourceAttr(
2096+
"google_container_cluster.with_kubelet_config",
2097+
"node_config.0.kubelet_config.0.topology_manager.0.scope", "pod"),
2098+
),
2099+
},
2100+
{
2101+
ResourceName: "google_container_cluster.with_kubelet_config",
2102+
ImportState: true,
2103+
ImportStateVerify: true,
2104+
ImportStateVerifyIgnore: []string{"deletion_protection"},
2105+
},
2106+
{
2107+
Config: testAccContainerCluster_withKubeletConfig(clusterName, networkName, subnetworkName, "static", "Static", "single-numa-node", "pod"),
2108+
Check: resource.ComposeTestCheckFunc(
2109+
resource.TestCheckResourceAttr(
2110+
"google_container_cluster.with_kubelet_config",
2111+
"node_config.0.kubelet_config.0.cpu_manager_policy", "static"),
2112+
resource.TestCheckResourceAttr(
2113+
"google_container_cluster.with_kubelet_config",
2114+
"node_config.0.kubelet_config.0.memory_manager.0.policy", "Static"),
2115+
resource.TestCheckResourceAttr(
2116+
"google_container_cluster.with_kubelet_config",
2117+
"node_config.0.kubelet_config.0.topology_manager.0.policy", "single-numa-node"),
2118+
resource.TestCheckResourceAttr(
2119+
"google_container_cluster.with_kubelet_config",
2120+
"node_config.0.kubelet_config.0.topology_manager.0.scope", "pod"),
2121+
),
2122+
},
2123+
{
2124+
ResourceName: "google_container_cluster.with_kubelet_config",
2125+
ImportState: true,
2126+
ImportStateVerify: true,
2127+
ImportStateVerifyIgnore: []string{"deletion_protection"},
2128+
},
2129+
},
2130+
})
2131+
}
2132+
20712133
func TestAccContainerCluster_withNodeConfigFastSocket(t *testing.T) {
20722134
t.Parallel()
20732135

@@ -8516,7 +8578,6 @@ resource "google_container_cluster" "with_node_config_kubelet_config_settings_in
85168578
node_pool {
85178579
name = "%s"
85188580
initial_node_count = 1
8519-
85208581
node_config {
85218582
machine_type = "n1-standard-1"
85228583
kubelet_config {
@@ -14756,3 +14817,92 @@ resource "google_container_cluster" "primary" {
1475614817
}
1475714818
`, clusterName, networkName, subnetworkName, unauthenticated, authenticated)
1475814819
}
14820+
14821+
func TestAccContainerCluster_withKubeletResourceManagerConfig(t *testing.T) {
14822+
t.Parallel()
14823+
14824+
clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
14825+
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
14826+
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
14827+
14828+
acctest.VcrTest(t, resource.TestCase{
14829+
PreCheck: func() { acctest.AccTestPreCheck(t) },
14830+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
14831+
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
14832+
Steps: []resource.TestStep{
14833+
{
14834+
Config: testAccContainerCluster_withKubeletConfig(clusterName, networkName, subnetworkName, "none", "None", "best-effort", "container"),
14835+
Check: resource.ComposeTestCheckFunc(
14836+
resource.TestCheckResourceAttr(
14837+
"google_container_cluster.with_kubelet_config",
14838+
"node_config.0.kubelet_config.0.cpu_manager_policy", "none"),
14839+
resource.TestCheckResourceAttr(
14840+
"google_container_cluster.with_kubelet_config",
14841+
"node_config.0.kubelet_config.0.memory_manager.0.policy", "None"),
14842+
resource.TestCheckResourceAttr(
14843+
"google_container_cluster.with_kubelet_config",
14844+
"node_config.0.kubelet_config.0.topology_manager.0.policy", "best-effort"),
14845+
resource.TestCheckResourceAttr(
14846+
"google_container_cluster.with_kubelet_config",
14847+
"node_config.0.kubelet_config.0.topology_manager.0.scope", "container"),
14848+
),
14849+
},
14850+
{
14851+
ResourceName: "google_container_cluster.with_kubelet_config",
14852+
ImportState: true,
14853+
ImportStateVerify: true,
14854+
ImportStateVerifyIgnore: []string{"deletion_protection"},
14855+
},
14856+
{
14857+
Config: testAccContainerCluster_withKubeletConfig(clusterName, networkName, subnetworkName, "static", "Static", "single-numa-node", "container"),
14858+
Check: resource.ComposeTestCheckFunc(
14859+
resource.TestCheckResourceAttr(
14860+
"google_container_cluster.with_kubelet_config",
14861+
"node_config.0.kubelet_config.0.cpu_manager_policy", "static"),
14862+
resource.TestCheckResourceAttr(
14863+
"google_container_cluster.with_kubelet_config",
14864+
"node_config.0.kubelet_config.0.memory_manager.0.policy", "Static"),
14865+
resource.TestCheckResourceAttr(
14866+
"google_container_cluster.with_kubelet_config",
14867+
"node_config.0.kubelet_config.0.topology_manager.0.policy", "single-numa-node"),
14868+
resource.TestCheckResourceAttr(
14869+
"google_container_cluster.with_kubelet_config",
14870+
"node_config.0.kubelet_config.0.topology_manager.0.scope", "container"),
14871+
),
14872+
},
14873+
{
14874+
ResourceName: "google_container_cluster.with_kubelet_config",
14875+
ImportState: true,
14876+
ImportStateVerify: true,
14877+
ImportStateVerifyIgnore: []string{"deletion_protection"},
14878+
},
14879+
},
14880+
})
14881+
}
14882+
14883+
func testAccContainerCluster_withKubeletConfig(clusterName, networkName, subnetworkName, cpuManagerPolicy, memoryManagerPolicy, topologyManagerPolicy, topologyManagerScope string) string {
14884+
return fmt.Sprintf(`
14885+
resource "google_container_cluster" "with_kubelet_config" {
14886+
name = %q
14887+
location = "us-central1-a"
14888+
initial_node_count = 1
14889+
network = %q
14890+
subnetwork = %q
14891+
deletion_protection = false
14892+
14893+
node_config {
14894+
machine_type = "c4-standard-2"
14895+
kubelet_config {
14896+
cpu_manager_policy = %q
14897+
memory_manager {
14898+
policy = %q
14899+
}
14900+
topology_manager {
14901+
policy = %q
14902+
scope = %q
14903+
}
14904+
}
14905+
}
14906+
}
14907+
`, clusterName, networkName, subnetworkName, cpuManagerPolicy, memoryManagerPolicy, topologyManagerPolicy, topologyManagerScope)
14908+
}

0 commit comments

Comments
 (0)