@@ -15,6 +15,12 @@ module "label" {
1515 context = module. this . context
1616}
1717
18+ data "aws_caller_identity" "current" {}
19+
20+ data "aws_iam_session_context" "current" {
21+ arn = data. aws_caller_identity . current . arn
22+ }
23+
1824locals {
1925 # The usage of the specific kubernetes.io/cluster/* resource tags below are required
2026 # for EKS and Kubernetes to discover and manage networking resources
@@ -45,18 +51,34 @@ locals {
4551 }
4652
4753 extra_policy_arn = " arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"
54+
55+ # Enable the IAM user creating the cluster to administer it,
56+ # without using the bootstrap_cluster_creator_admin_permissions option,
57+ # as a way to test the access_entry_map feature.
58+ # In general, this is not recommended. Instead, you should
59+ # create the access_entry_map statically, with the ARNs you want to
60+ # have access to the cluster. We do it dynamically here just for testing purposes.
61+ # See the original PR for more information:
62+ # https://github.com/cloudposse/terraform-aws-eks-cluster/pull/206
63+ access_entry_map = {
64+ (data. aws_iam_session_context . current . issuer_arn ) = {
65+ access_policy_associations = {
66+ ClusterAdmin = {}
67+ }
68+ }
69+ }
4870}
4971
5072module "vpc" {
5173 source = " cloudposse/vpc/aws"
52- version = " 2.1 .0"
74+ version = " 2.2 .0"
5375 ipv4_primary_cidr_block = var. vpc_cidr_block
5476 context = module. this . context
5577}
5678
5779module "subnets" {
5880 source = " cloudposse/dynamic-subnets/aws"
59- version = " 2.4.1 "
81+ version = " 2.4.2 "
6082 availability_zones = var. availability_zones
6183 vpc_id = module. vpc . vpc_id
6284 igw_id = [module . vpc . igw_id ]
@@ -68,11 +90,10 @@ module "subnets" {
6890
6991module "ssh_source_access" {
7092 source = " cloudposse/security-group/aws"
71- version = " 0.4.3 "
93+ version = " 2.2.0 "
7294
7395 attributes = [" ssh" , " source" ]
7496 security_group_description = " Test source security group ssh access only"
75- create_before_destroy = true
7697 allow_all_egress = true
7798
7899 rules = [local . allow_all_ingress_rule ]
@@ -85,11 +106,10 @@ module "ssh_source_access" {
85106
86107module "https_sg" {
87108 source = " cloudposse/security-group/aws"
88- version = " 0.4.3 "
109+ version = " 2.2.0 "
89110
90111 attributes = [" http" ]
91112 security_group_description = " Allow http access"
92- create_before_destroy = true
93113 allow_all_egress = true
94114
95115 rules = [local . allow_http_ingress_rule ]
@@ -101,21 +121,21 @@ module "https_sg" {
101121
102122module "eks_cluster" {
103123 source = " cloudposse/eks-cluster/aws"
104- version = " 2.9 .0"
124+ version = " 4.1 .0"
105125 region = var. region
106- vpc_id = module. vpc . vpc_id
107126 subnet_ids = module. subnets . public_subnet_ids
108127 kubernetes_version = var. kubernetes_version
109- local_exec_interpreter = var. local_exec_interpreter
110128 oidc_provider_enabled = var. oidc_provider_enabled
111129 enabled_cluster_log_types = var. enabled_cluster_log_types
112130 cluster_log_retention_period = var. cluster_log_retention_period
113131
114- # data auth has problems destroying the auth-map
115- kube_data_auth_enabled = false
116- kube_exec_auth_enabled = true
132+ access_config = {
133+ authentication_mode = " API"
134+ bootstrap_cluster_creator_admin_permissions = false
135+ }
117136
118- context = module. this . context
137+ access_entry_map = local. access_entry_map
138+ context = module. this . context
119139}
120140
121141module "eks_node_group" {
0 commit comments