Skip to content

Commit d892682

Browse files
committed
Initial commit
1 parent 91eb1ea commit d892682

File tree

10 files changed

+736
-58
lines changed

10 files changed

+736
-58
lines changed

.github/settings.yml

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,7 @@
11
# Upstream changes from _extends are only recognized when modifications are made to this file in the default branch.
22
_extends: .github
33
repository:
4-
name: template
5-
description: Template for Terraform Components
4+
name: aws-eks-external-dns
5+
description: This component creates a Helm deployment for [external-dns](https://github
66
homepage: https://cloudposse.com/accelerate
77
topics: terraform, terraform-component
8-
9-
10-
11-

README.yaml

Lines changed: 175 additions & 48 deletions
Large diffs are not rendered by default.

src/main.tf

Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,110 @@
11
locals {
22
enabled = module.this.enabled
3+
# Chart `sources` to watch
4+
source_defaults = ["service", "ingress"]
5+
source_istio = var.istio_enabled ? ["istio-gateway"] : []
6+
source_crd = var.crd_enabled ? ["crd"] : []
7+
sources = concat(local.source_defaults, local.source_istio, local.source_crd)
8+
txt_owner = var.txt_prefix != "" ? format(module.this.tenant != null ? "%[1]s-%[2]s-%[3]s-%[4]s" : "%[1]s-%[2]s-%[4]s", var.txt_prefix, module.this.environment, module.this.tenant, module.this.stage) : ""
9+
txt_prefix = var.txt_prefix != "" ? format("%s-", local.txt_owner) : ""
10+
zone_ids = compact(concat(
11+
values(module.dns_gbl_delegated.outputs.zones)[*].zone_id,
12+
values(module.dns_gbl_primary.outputs.zones)[*].zone_id,
13+
flatten([for k, v in module.additional_dns_components : [for i, j in v.outputs.zones : j.zone_id]])
14+
))
315
}
416

17+
data "aws_partition" "current" {
18+
count = local.enabled ? 1 : 0
19+
}
20+
21+
module "external_dns" {
22+
source = "cloudposse/helm-release/aws"
23+
version = "0.10.0"
24+
25+
name = module.this.name
26+
chart = var.chart
27+
repository = var.chart_repository
28+
description = var.chart_description
29+
chart_version = var.chart_version
30+
wait = var.wait
31+
atomic = var.atomic
32+
cleanup_on_fail = var.cleanup_on_fail
33+
timeout = var.timeout
34+
35+
create_namespace_with_kubernetes = var.create_namespace
36+
kubernetes_namespace = var.kubernetes_namespace
37+
kubernetes_namespace_labels = merge(module.this.tags, { name = var.kubernetes_namespace })
38+
39+
40+
eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "")
541

42+
service_account_name = module.this.name
43+
service_account_namespace = var.kubernetes_namespace
644

45+
iam_role_enabled = true
46+
iam_policy_statements = [
47+
{
48+
sid = "GrantChangeResourceRecordSets"
749

50+
actions = [
51+
"route53:ChangeResourceRecordSets"
52+
]
853

54+
effect = "Allow"
55+
resources = formatlist("arn:${join("", data.aws_partition.current.*.partition)}:route53:::hostedzone/%s", local.zone_ids)
56+
},
57+
{
58+
sid = "GrantListHostedZonesListResourceRecordSets"
59+
60+
actions = [
61+
"route53:ListHostedZones",
62+
"route53:ListHostedZonesByName",
63+
"route53:ListResourceRecordSets"
64+
]
65+
66+
effect = "Allow"
67+
resources = ["*"]
68+
},
69+
]
70+
71+
values = compact([
72+
# standard k8s object settings
73+
yamlencode({
74+
fullnameOverride = var.name,
75+
serviceAccount = {
76+
name = module.this.name
77+
},
78+
resources = var.resources
79+
rbac = {
80+
create = var.rbac_enabled
81+
}
82+
}),
83+
# standard metrics settings
84+
var.metrics_enabled ? yamlencode({
85+
prometheus = {
86+
enabled = var.metrics_enabled
87+
servicemonitor = {
88+
enabled = var.metrics_enabled
89+
}
90+
}
91+
}) : "",
92+
# external-dns-specific values
93+
yamlencode({
94+
aws = {
95+
region = var.region
96+
}
97+
policy = var.policy
98+
publishInternalServices = var.publish_internal_services
99+
txtOwnerId = local.txt_owner
100+
txtPrefix = local.txt_prefix
101+
sources = local.sources
102+
}),
103+
# hardcoded values
104+
file("${path.module}/resources/values.yaml"),
105+
# additional values
106+
yamlencode(var.chart_values)
107+
])
108+
109+
context = module.this.context
110+
}

src/outputs.tf

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
output "mock" {
2-
description = "Mock output example for the Cloud Posse Terraform component template"
3-
value = local.enabled ? "hello ${basename(abspath(path.module))}" : ""
1+
output "metadata" {
2+
value = module.external_dns.metadata
3+
description = "Block status of the deployed release"
44
}

src/provider-helm.tf

Lines changed: 201 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,201 @@
1+
##################
2+
#
3+
# This file is a drop-in to provide a helm provider.
4+
#
5+
# It depends on 2 standard Cloud Posse data source modules to be already
6+
# defined in the same component:
7+
#
8+
# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster
9+
# 2. module.eks to provide the EKS cluster information
10+
#
11+
# All the following variables are just about configuring the Kubernetes provider
12+
# to be able to modify EKS cluster. The reason there are so many options is
13+
# because at various times, each one of them has had problems, so we give you a choice.
14+
#
15+
# The reason there are so many "enabled" inputs rather than automatically
16+
# detecting whether or not they are enabled based on the value of the input
17+
# is that any logic based on input values requires the values to be known during
18+
# the "plan" phase of Terraform, and often they are not, which causes problems.
19+
#
20+
variable "kubeconfig_file_enabled" {
21+
type = bool
22+
default = false
23+
description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster"
24+
nullable = false
25+
}
26+
27+
variable "kubeconfig_file" {
28+
type = string
29+
default = ""
30+
description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`"
31+
nullable = false
32+
}
33+
34+
variable "kubeconfig_context" {
35+
type = string
36+
default = ""
37+
description = <<-EOT
38+
Context to choose from the Kubernetes config file.
39+
If supplied, `kubeconfig_context_format` will be ignored.
40+
EOT
41+
nullable = false
42+
}
43+
44+
variable "kubeconfig_context_format" {
45+
type = string
46+
default = ""
47+
description = <<-EOT
48+
A format string to use for creating the `kubectl` context name when
49+
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
50+
Must include a single `%s` which will be replaced with the cluster name.
51+
EOT
52+
nullable = false
53+
}
54+
55+
variable "kube_data_auth_enabled" {
56+
type = bool
57+
default = false
58+
description = <<-EOT
59+
If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
60+
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`.
61+
EOT
62+
nullable = false
63+
}
64+
65+
variable "kube_exec_auth_enabled" {
66+
type = bool
67+
default = true
68+
description = <<-EOT
69+
If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
70+
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`.
71+
EOT
72+
nullable = false
73+
}
74+
75+
variable "kube_exec_auth_role_arn" {
76+
type = string
77+
default = ""
78+
description = "The role ARN for `aws eks get-token` to use"
79+
nullable = false
80+
}
81+
82+
variable "kube_exec_auth_role_arn_enabled" {
83+
type = bool
84+
default = true
85+
description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`"
86+
nullable = false
87+
}
88+
89+
variable "kube_exec_auth_aws_profile" {
90+
type = string
91+
default = ""
92+
description = "The AWS config profile for `aws eks get-token` to use"
93+
nullable = false
94+
}
95+
96+
variable "kube_exec_auth_aws_profile_enabled" {
97+
type = bool
98+
default = false
99+
description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`"
100+
nullable = false
101+
}
102+
103+
variable "kubeconfig_exec_auth_api_version" {
104+
type = string
105+
default = "client.authentication.k8s.io/v1beta1"
106+
description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin"
107+
nullable = false
108+
}
109+
110+
variable "helm_manifest_experiment_enabled" {
111+
type = bool
112+
default = false
113+
description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan"
114+
nullable = false
115+
}
116+
117+
locals {
118+
kubeconfig_file_enabled = var.kubeconfig_file_enabled
119+
kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : ""
120+
kubeconfig_context = !local.kubeconfig_file_enabled ? "" : (
121+
length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : (
122+
length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : ""
123+
)
124+
)
125+
126+
kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled
127+
kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled
128+
129+
# Eventually we might try to get this from an environment variable
130+
kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version
131+
132+
exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [
133+
"--profile", var.kube_exec_auth_aws_profile
134+
] : []
135+
136+
kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn)
137+
exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [
138+
"--role-arn", local.kube_exec_auth_role_arn
139+
] : []
140+
141+
# Provide dummy configuration for the case where the EKS cluster is not available.
142+
certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null)
143+
cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null)
144+
# Use coalesce+try to handle both the case where the output is missing and the case where it is empty.
145+
eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing")
146+
eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "")
147+
}
148+
149+
data "aws_eks_cluster_auth" "eks" {
150+
count = local.kube_data_auth_enabled ? 1 : 0
151+
name = local.eks_cluster_id
152+
}
153+
154+
provider "helm" {
155+
kubernetes {
156+
host = local.eks_cluster_endpoint
157+
cluster_ca_certificate = local.cluster_ca_certificate
158+
token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null
159+
# It is too confusing to allow the Kubernetes provider to use environment variables to set authentication
160+
# in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH`
161+
# in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file.
162+
config_path = local.kubeconfig_file
163+
config_context = local.kubeconfig_context
164+
165+
dynamic "exec" {
166+
for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : []
167+
content {
168+
api_version = local.kubeconfig_exec_auth_api_version
169+
command = "aws"
170+
args = concat(local.exec_profile, [
171+
"eks", "get-token", "--cluster-name", local.eks_cluster_id
172+
], local.exec_role)
173+
}
174+
}
175+
}
176+
experiments {
177+
manifest = var.helm_manifest_experiment_enabled && module.this.enabled
178+
}
179+
}
180+
181+
provider "kubernetes" {
182+
host = local.eks_cluster_endpoint
183+
cluster_ca_certificate = local.cluster_ca_certificate
184+
token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null
185+
# It is too confusing to allow the Kubernetes provider to use environment variables to set authentication
186+
# in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH`
187+
# in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file.
188+
config_path = local.kubeconfig_file
189+
config_context = local.kubeconfig_context
190+
191+
dynamic "exec" {
192+
for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : []
193+
content {
194+
api_version = local.kubeconfig_exec_auth_api_version
195+
command = "aws"
196+
args = concat(local.exec_profile, [
197+
"eks", "get-token", "--cluster-name", local.eks_cluster_id
198+
], local.exec_role)
199+
}
200+
}
201+
}

src/providers.tf

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
provider "aws" {
2+
region = var.region
3+
4+
# Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null.
5+
profile = module.iam_roles.terraform_profile_name
6+
7+
dynamic "assume_role" {
8+
# module.iam_roles.terraform_role_arn may be null, in which case do not assume a role.
9+
for_each = compact([module.iam_roles.terraform_role_arn])
10+
content {
11+
role_arn = assume_role.value
12+
}
13+
}
14+
}
15+
16+
module "iam_roles" {
17+
source = "../../account-map/modules/iam-roles"
18+
context = module.this.context
19+
}

0 commit comments

Comments
 (0)