Skip to content

Commit 70c7077

Browse files
authored
Add support for topology manager and memory manager (#14754)
1 parent 44ced68 commit 70c7077

File tree

4 files changed

+318
-7
lines changed

4 files changed

+318
-7
lines changed

mmv1/third_party/terraform/services/container/node_config.go.tmpl

Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -615,6 +615,48 @@ func schemaNodeConfig() *schema.Schema {
615615
ValidateFunc: validation.StringInSlice([]string{"static", "none", ""}, false),
616616
Description: `Control the CPU management policy on the node.`,
617617
},
618+
"memory_manager": {
619+
Type: schema.TypeList,
620+
Optional: true,
621+
MaxItems: 1,
622+
Description: `Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.`,
623+
Elem: &schema.Resource{
624+
Schema: map[string]*schema.Schema{
625+
"policy": {
626+
Type: schema.TypeString,
627+
Optional: true,
628+
Computed: true,
629+
Description: `The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.`,
630+
ValidateFunc: validation.StringInSlice([]string{"None", "Static", ""}, false),
631+
},
632+
},
633+
},
634+
},
635+
"topology_manager": {
636+
Type: schema.TypeList,
637+
Optional: true,
638+
MaxItems: 1,
639+
Description: `Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.`,
640+
Elem: &schema.Resource{
641+
Schema: map[string]*schema.Schema{
642+
"policy": {
643+
Type: schema.TypeString,
644+
Optional: true,
645+
Computed: true,
646+
Description: `The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.`,
647+
ValidateFunc: validation.StringInSlice([]string{"none", "restricted", "single-numa-node", "best-effort", ""}, false),
648+
649+
},
650+
"scope": {
651+
Type: schema.TypeString,
652+
Optional: true,
653+
Computed: true,
654+
Description: `The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).`,
655+
ValidateFunc: validation.StringInSlice([]string{"container", "pod", ""}, false),
656+
},
657+
},
658+
},
659+
},
618660
"cpu_cfs_quota": {
619661
Type: schema.TypeBool,
620662
Optional: true,
@@ -1667,6 +1709,14 @@ func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig {
16671709
}
16681710
kConfig.EvictionSoft = evictionSoft
16691711
}
1712+
1713+
if v, ok := cfg["memory_manager"]; ok {
1714+
kConfig.MemoryManager = expandMemoryManager(v)
1715+
}
1716+
if v, ok := cfg["topology_manager"]; ok {
1717+
kConfig.TopologyManager = expandTopologyManager(v)
1718+
}
1719+
16701720
if v, ok := cfg["eviction_soft_grace_period"]; ok && len(v.([]interface{})) > 0 {
16711721
es := v.([]interface{})[0].(map[string]interface{})
16721722
periods := &container.EvictionGracePeriod{}
@@ -1716,6 +1766,54 @@ func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig {
17161766
return kConfig
17171767
}
17181768

1769+
func expandTopologyManager(v interface{}) *container.TopologyManager {
1770+
if v == nil {
1771+
return nil
1772+
}
1773+
ls := v.([]interface{})
1774+
if len(ls) == 0 {
1775+
return nil
1776+
}
1777+
if ls[0] == nil {
1778+
return &container.TopologyManager{}
1779+
}
1780+
cfg := ls[0].(map[string]interface{})
1781+
1782+
topologyManager := &container.TopologyManager{}
1783+
1784+
if v, ok := cfg["policy"]; ok {
1785+
topologyManager.Policy = v.(string)
1786+
}
1787+
1788+
if v, ok := cfg["scope"]; ok {
1789+
topologyManager.Scope = v.(string)
1790+
}
1791+
1792+
return topologyManager
1793+
}
1794+
1795+
func expandMemoryManager(v interface{}) *container.MemoryManager {
1796+
if v == nil {
1797+
return nil
1798+
}
1799+
ls := v.([]interface{})
1800+
if len(ls) == 0 {
1801+
return nil
1802+
}
1803+
if ls[0] == nil {
1804+
return &container.MemoryManager{}
1805+
}
1806+
cfg := ls[0].(map[string]interface{})
1807+
1808+
memoryManager := &container.MemoryManager{}
1809+
1810+
if v, ok := cfg["policy"]; ok {
1811+
memoryManager.Policy = v.(string)
1812+
}
1813+
1814+
return memoryManager
1815+
}
1816+
17191817
func expandLinuxNodeConfig(v interface{}) *container.LinuxNodeConfig {
17201818
if v == nil {
17211819
return nil
@@ -2364,6 +2462,8 @@ func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface
23642462
"cpu_cfs_quota": c.CpuCfsQuota,
23652463
"cpu_cfs_quota_period": c.CpuCfsQuotaPeriod,
23662464
"cpu_manager_policy": c.CpuManagerPolicy,
2465+
"memory_manager": flattenMemoryManager(c.MemoryManager),
2466+
"topology_manager": flattenTopologyManager(c.TopologyManager),
23672467
"insecure_kubelet_readonly_port_enabled": flattenInsecureKubeletReadonlyPortEnabled(c),
23682468
"pod_pids_limit": c.PodPidsLimit,
23692469
"container_log_max_size": c.ContainerLogMaxSize,
@@ -2384,6 +2484,27 @@ func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface
23842484
return result
23852485
}
23862486

2487+
func flattenTopologyManager(c *container.TopologyManager) []map[string]interface{} {
2488+
result := []map[string]interface{}{}
2489+
if c != nil {
2490+
result = append(result, map[string]interface{}{
2491+
"policy": c.Policy,
2492+
"scope": c.Scope,
2493+
})
2494+
}
2495+
return result
2496+
}
2497+
2498+
func flattenMemoryManager(c *container.MemoryManager) []map[string]interface{} {
2499+
result := []map[string]interface{}{}
2500+
if c != nil {
2501+
result = append(result, map[string]interface{}{
2502+
"policy": c.Policy,
2503+
})
2504+
}
2505+
return result
2506+
}
2507+
23872508
func flattenNodePoolAutoConfigNodeKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface{} {
23882509
result := []map[string]interface{}{}
23892510
if c != nil {

mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.tmpl

Lines changed: 151 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2057,6 +2057,68 @@ func TestAccContainerCluster_withNodeConfigLinuxNodeConfig(t *testing.T) {
20572057
})
20582058
}
20592059

2060+
func TestAccContainerCluster_withKubeletConfig(t *testing.T) {
2061+
t.Parallel()
2062+
2063+
clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
2064+
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
2065+
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
2066+
2067+
acctest.VcrTest(t, resource.TestCase{
2068+
PreCheck: func() { acctest.AccTestPreCheck(t) },
2069+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
2070+
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
2071+
Steps: []resource.TestStep{
2072+
{
2073+
Config: testAccContainerCluster_withKubeletConfig(clusterName, networkName, subnetworkName, "none", "None", "best-effort", "pod"),
2074+
Check: resource.ComposeTestCheckFunc(
2075+
resource.TestCheckResourceAttr(
2076+
"google_container_cluster.with_kubelet_config",
2077+
"node_config.0.kubelet_config.0.cpu_manager_policy", "none"),
2078+
resource.TestCheckResourceAttr(
2079+
"google_container_cluster.with_kubelet_config",
2080+
"node_config.0.kubelet_config.0.memory_manager.0.policy", "None"),
2081+
resource.TestCheckResourceAttr(
2082+
"google_container_cluster.with_kubelet_config",
2083+
"node_config.0.kubelet_config.0.topology_manager.0.policy", "best-effort"),
2084+
resource.TestCheckResourceAttr(
2085+
"google_container_cluster.with_kubelet_config",
2086+
"node_config.0.kubelet_config.0.topology_manager.0.scope", "pod"),
2087+
),
2088+
},
2089+
{
2090+
ResourceName: "google_container_cluster.with_kubelet_config",
2091+
ImportState: true,
2092+
ImportStateVerify: true,
2093+
ImportStateVerifyIgnore: []string{"deletion_protection"},
2094+
},
2095+
{
2096+
Config: testAccContainerCluster_withKubeletConfig(clusterName, networkName, subnetworkName, "static", "Static", "single-numa-node", "pod"),
2097+
Check: resource.ComposeTestCheckFunc(
2098+
resource.TestCheckResourceAttr(
2099+
"google_container_cluster.with_kubelet_config",
2100+
"node_config.0.kubelet_config.0.cpu_manager_policy", "static"),
2101+
resource.TestCheckResourceAttr(
2102+
"google_container_cluster.with_kubelet_config",
2103+
"node_config.0.kubelet_config.0.memory_manager.0.policy", "Static"),
2104+
resource.TestCheckResourceAttr(
2105+
"google_container_cluster.with_kubelet_config",
2106+
"node_config.0.kubelet_config.0.topology_manager.0.policy", "single-numa-node"),
2107+
resource.TestCheckResourceAttr(
2108+
"google_container_cluster.with_kubelet_config",
2109+
"node_config.0.kubelet_config.0.topology_manager.0.scope", "pod"),
2110+
),
2111+
},
2112+
{
2113+
ResourceName: "google_container_cluster.with_kubelet_config",
2114+
ImportState: true,
2115+
ImportStateVerify: true,
2116+
ImportStateVerifyIgnore: []string{"deletion_protection"},
2117+
},
2118+
},
2119+
})
2120+
}
2121+
20602122
func TestAccContainerCluster_withNodeConfigFastSocket(t *testing.T) {
20612123
t.Parallel()
20622124

@@ -8537,7 +8599,6 @@ resource "google_container_cluster" "with_node_config_kubelet_config_settings_in
85378599
node_pool {
85388600
name = "%s"
85398601
initial_node_count = 1
8540-
85418602
node_config {
85428603
machine_type = "n1-standard-1"
85438604
kubelet_config {
@@ -14795,3 +14856,92 @@ resource "google_container_cluster" "primary" {
1479514856
}
1479614857
`, clusterName, networkName, subnetworkName, unauthenticated, authenticated)
1479714858
}
14859+
14860+
func TestAccContainerCluster_withKubeletResourceManagerConfig(t *testing.T) {
14861+
t.Parallel()
14862+
14863+
clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
14864+
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
14865+
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
14866+
14867+
acctest.VcrTest(t, resource.TestCase{
14868+
PreCheck: func() { acctest.AccTestPreCheck(t) },
14869+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
14870+
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
14871+
Steps: []resource.TestStep{
14872+
{
14873+
Config: testAccContainerCluster_withKubeletConfig(clusterName, networkName, subnetworkName, "none", "None", "best-effort", "container"),
14874+
Check: resource.ComposeTestCheckFunc(
14875+
resource.TestCheckResourceAttr(
14876+
"google_container_cluster.with_kubelet_config",
14877+
"node_config.0.kubelet_config.0.cpu_manager_policy", "none"),
14878+
resource.TestCheckResourceAttr(
14879+
"google_container_cluster.with_kubelet_config",
14880+
"node_config.0.kubelet_config.0.memory_manager.0.policy", "None"),
14881+
resource.TestCheckResourceAttr(
14882+
"google_container_cluster.with_kubelet_config",
14883+
"node_config.0.kubelet_config.0.topology_manager.0.policy", "best-effort"),
14884+
resource.TestCheckResourceAttr(
14885+
"google_container_cluster.with_kubelet_config",
14886+
"node_config.0.kubelet_config.0.topology_manager.0.scope", "container"),
14887+
),
14888+
},
14889+
{
14890+
ResourceName: "google_container_cluster.with_kubelet_config",
14891+
ImportState: true,
14892+
ImportStateVerify: true,
14893+
ImportStateVerifyIgnore: []string{"deletion_protection"},
14894+
},
14895+
{
14896+
Config: testAccContainerCluster_withKubeletConfig(clusterName, networkName, subnetworkName, "static", "Static", "single-numa-node", "container"),
14897+
Check: resource.ComposeTestCheckFunc(
14898+
resource.TestCheckResourceAttr(
14899+
"google_container_cluster.with_kubelet_config",
14900+
"node_config.0.kubelet_config.0.cpu_manager_policy", "static"),
14901+
resource.TestCheckResourceAttr(
14902+
"google_container_cluster.with_kubelet_config",
14903+
"node_config.0.kubelet_config.0.memory_manager.0.policy", "Static"),
14904+
resource.TestCheckResourceAttr(
14905+
"google_container_cluster.with_kubelet_config",
14906+
"node_config.0.kubelet_config.0.topology_manager.0.policy", "single-numa-node"),
14907+
resource.TestCheckResourceAttr(
14908+
"google_container_cluster.with_kubelet_config",
14909+
"node_config.0.kubelet_config.0.topology_manager.0.scope", "container"),
14910+
),
14911+
},
14912+
{
14913+
ResourceName: "google_container_cluster.with_kubelet_config",
14914+
ImportState: true,
14915+
ImportStateVerify: true,
14916+
ImportStateVerifyIgnore: []string{"deletion_protection"},
14917+
},
14918+
},
14919+
})
14920+
}
14921+
14922+
func testAccContainerCluster_withKubeletConfig(clusterName, networkName, subnetworkName, cpuManagerPolicy, memoryManagerPolicy, topologyManagerPolicy , topologyManagerScope string) string {
14923+
return fmt.Sprintf(`
14924+
resource "google_container_cluster" "with_kubelet_config" {
14925+
name = %q
14926+
location = "us-central1-a"
14927+
initial_node_count = 1
14928+
network = %q
14929+
subnetwork = %q
14930+
deletion_protection = false
14931+
14932+
node_config {
14933+
machine_type = "c4-standard-2"
14934+
kubelet_config {
14935+
cpu_manager_policy = %q
14936+
memory_manager {
14937+
policy = %q
14938+
}
14939+
topology_manager {
14940+
policy = %q
14941+
scope = %q
14942+
}
14943+
}
14944+
}
14945+
}
14946+
`, clusterName, networkName, subnetworkName, cpuManagerPolicy, memoryManagerPolicy, topologyManagerPolicy, topologyManagerScope)
14947+
}

0 commit comments

Comments
 (0)