Skip to content

Commit 872f003

Browse files
committed
first blood
1 parent e27b238 commit 872f003

File tree

3 files changed

+386
-0
lines changed

3 files changed

+386
-0
lines changed

README.md

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# terraform-kubernetes-cassandra
2+
Kafka on Kubernetes
3+
4+
Tested on GKE but it should work for any kubernetes cluster given the right terraform-provider-kubernetes setup.
5+
6+
## Inputs
7+
8+
- **kafka_name** : name of the kafka deployment
9+
- **namespace** : kubernetes namespace to be deployed
10+
- **cluster_size** : kafka cluster size
11+
- **zookeeper_cluster_size** : zookeeper cluster size
12+
13+
## Dependencies
14+
15+
Terraform Kubernetes Provider
16+
17+
## Tested With
18+
19+
- terraform-providers/kubernetes : 1.9.0
20+
- confluentinc/cp-kafka:5.0.1 docker image
21+
- zookeeper:3.5.5 docker image
22+
- kubernetes 1.13.7-gke.8
23+
24+
## Credits
25+
26+
This module was initially generated from helm/incubator/kafka via [k2tf](https://github.com/sl1pm4t/k2tf) project.

main.tf

Lines changed: 356 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,356 @@
1+
resource "kubernetes_config_map" "zookeeper" {
2+
metadata {
3+
name = "${var.kafka_name}-zookeeper"
4+
namespace = "${var.namespace}"
5+
labels = { app = "${var.kafka_name}-zookeeper", component = "server" }
6+
}
7+
data = { ok = "#!/bin/sh\necho ruok | nc 127.0.0.1 $${1:-2181}\n", ready = "#!/bin/sh\necho ruok | nc 127.0.0.1 $${1:-2181}\n", run = "#!/bin/bash\n\nset -a\nROOT=$(echo /apache-zookeeper-*)\n\nZK_USER=$${ZK_USER:-\"zookeeper\"}\nZK_LOG_LEVEL=$${ZK_LOG_LEVEL:-\"INFO\"}\nZK_DATA_DIR=$${ZK_DATA_DIR:-\"/data\"}\nZK_DATA_LOG_DIR=$${ZK_DATA_LOG_DIR:-\"/data/log\"}\nZK_CONF_DIR=$${ZK_CONF_DIR:-\"/conf\"}\nZK_CLIENT_PORT=$${ZK_CLIENT_PORT:-2181}\nZK_SERVER_PORT=$${ZK_SERVER_PORT:-2888}\nZK_ELECTION_PORT=$${ZK_ELECTION_PORT:-3888}\nZK_TICK_TIME=$${ZK_TICK_TIME:-2000}\nZK_INIT_LIMIT=$${ZK_INIT_LIMIT:-10}\nZK_SYNC_LIMIT=$${ZK_SYNC_LIMIT:-5}\nZK_HEAP_SIZE=$${ZK_HEAP_SIZE:-2G}\nZK_MAX_CLIENT_CNXNS=$${ZK_MAX_CLIENT_CNXNS:-60}\nZK_MIN_SESSION_TIMEOUT=$${ZK_MIN_SESSION_TIMEOUT:- $((ZK_TICK_TIME*2))}\nZK_MAX_SESSION_TIMEOUT=$${ZK_MAX_SESSION_TIMEOUT:- $((ZK_TICK_TIME*20))}\nZK_SNAP_RETAIN_COUNT=$${ZK_SNAP_RETAIN_COUNT:-3}\nZK_PURGE_INTERVAL=$${ZK_PURGE_INTERVAL:-0}\nID_FILE=\"$ZK_DATA_DIR/myid\"\nZK_CONFIG_FILE=\"$ZK_CONF_DIR/zoo.cfg\"\nLOG4J_PROPERTIES=\"$ZK_CONF_DIR/log4j.properties\"\nHOST=$(hostname)\nDOMAIN=`hostname -d`\nZOOCFG=zoo.cfg\nZOOCFGDIR=$ZK_CONF_DIR\nJVMFLAGS=\"-Xmx$ZK_HEAP_SIZE -Xms$ZK_HEAP_SIZE\"\n\nAPPJAR=$(echo $ROOT/*jar)\nCLASSPATH=\"$${ROOT}/lib/*:$${APPJAR}:$${ZK_CONF_DIR}:\"\n\nif [[ $HOST =~ (.*)-([0-9]+)$ ]]; then\n NAME=$${BASH_REMATCH[1]}\n ORD=$${BASH_REMATCH[2]}\n MY_ID=$((ORD+1))\nelse\n echo \"Failed to extract ordinal from hostname $HOST\"\n exit 1\nfi\n\nmkdir -p $ZK_DATA_DIR\nmkdir -p $ZK_DATA_LOG_DIR\necho $MY_ID >> $ID_FILE\n\necho \"clientPort=$ZK_CLIENT_PORT\" >> $ZK_CONFIG_FILE\necho \"dataDir=$ZK_DATA_DIR\" >> $ZK_CONFIG_FILE\necho \"dataLogDir=$ZK_DATA_LOG_DIR\" >> $ZK_CONFIG_FILE\necho \"tickTime=$ZK_TICK_TIME\" >> $ZK_CONFIG_FILE\necho \"initLimit=$ZK_INIT_LIMIT\" >> $ZK_CONFIG_FILE\necho \"syncLimit=$ZK_SYNC_LIMIT\" >> $ZK_CONFIG_FILE\necho \"maxClientCnxns=$ZK_MAX_CLIENT_CNXNS\" >> $ZK_CONFIG_FILE\necho \"minSessionTimeout=$ZK_MIN_SESSION_TIMEOUT\" >> $ZK_CONFIG_FILE\necho \"maxSessionTimeout=$ZK_MAX_SESSION_TIMEOUT\" >> $ZK_CONFIG_FILE\necho \"autopurge.snapRetainCount=$ZK_SNAP_RETAIN_COUNT\" >> $ZK_CONFIG_FILE\necho \"autopurge.purgeInterval=$ZK_PURGE_INTERVAL\" >> $ZK_CONFIG_FILE\necho \"4lw.commands.whitelist=*\" >> $ZK_CONFIG_FILE\n\nfor (( i=1; i<=$ZK_REPLICAS; i++ ))\ndo\n echo \"server.$i=$NAME-$((i-1)).$DOMAIN:$ZK_SERVER_PORT:$ZK_ELECTION_PORT\" >> $ZK_CONFIG_FILE\ndone\n\nrm -f $LOG4J_PROPERTIES\n\necho \"zookeeper.root.logger=$ZK_LOG_LEVEL, CONSOLE\" >> $LOG4J_PROPERTIES\necho \"zookeeper.console.threshold=$ZK_LOG_LEVEL\" >> $LOG4J_PROPERTIES\necho \"zookeeper.log.threshold=$ZK_LOG_LEVEL\" >> $LOGGER_PROPERS_FILE\necho \"zookeeper.log.dir=$ZK_DATA_LOG_DIR\" >> $LOG4J_PROPERTIES\necho \"zookeeper.log.file=zookeeper.log\" >> $LOG4J_PROPERTIES\necho \"zookeeper.log.maxfilesize=256MB\" >> $LOG4J_PROPERTIES\necho \"zookeeper.log.maxbackupindex=10\" >> $LOG4J_PROPERTIES\necho \"zookeeper.tracelog.dir=$ZK_DATA_LOG_DIR\" >> $LOG4J_PROPERTIES\necho \"zookeeper.tracelog.file=zookeeper_trace.log\" >> $LOG4J_PROPERTIES\necho \"log4j.rootLogger=\\$${zookeeper.root.logger}\" >> $LOG4J_PROPERTIES\necho \"log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\" >> $LOG4J_PROPERTIES\necho \"log4j.appender.CONSOLE.Threshold=\\$${zookeeper.console.threshold}\" >> $LOG4J_PROPERTIES\necho \"log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\" >> $LOG4J_PROPERTIES\necho \"log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n\" >> $LOG4J_PROPERTIES\n\nif [ -n \"$JMXDISABLE\" ]\nthen\n MAIN=org.apache.zookeeper.server.quorum.QuorumPeerMain\nelse\n MAIN=\"-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$JMXPORT -Dcom.sun.management.jmxremote.authenticate=$JMXAUTH -Dcom.sun.management.jmxremote.ssl=$JMXSSL -Dzookeeper.jmx.log4j.disable=$JMXLOG4J org.apache.zookeeper.server.quorum.QuorumPeerMain\"\nfi\n\nset -x\nexec java -cp \"$CLASSPATH\" $JVMFLAGS $MAIN $ZK_CONFIG_FILE\n" }
8+
}
9+
10+
resource "kubernetes_service" "zookeeper_headless" {
11+
metadata {
12+
name = "${var.kafka_name}-zookeeper-headless"
13+
namespace = "${var.namespace}"
14+
labels = { app = "${var.kafka_name}-zookeeper" }
15+
}
16+
spec {
17+
port {
18+
name = "client"
19+
protocol = "TCP"
20+
port = 2181
21+
target_port = "client"
22+
}
23+
port {
24+
name = "election"
25+
protocol = "TCP"
26+
port = 3888
27+
target_port = "election"
28+
}
29+
port {
30+
name = "server"
31+
protocol = "TCP"
32+
port = 2888
33+
target_port = "server"
34+
}
35+
selector = { app = "${var.kafka_name}-zookeeper" }
36+
cluster_ip = "None"
37+
}
38+
}
39+
40+
resource "kubernetes_service" "zookeeper" {
41+
metadata {
42+
name = "${var.kafka_name}-zookeeper"
43+
namespace = "${var.namespace}"
44+
labels = { app = "${var.kafka_name}-zookeeper" }
45+
}
46+
spec {
47+
port {
48+
name = "client"
49+
protocol = "TCP"
50+
port = 2181
51+
target_port = "client"
52+
}
53+
selector = { app = "${var.kafka_name}-zookeeper" }
54+
type = "ClusterIP"
55+
}
56+
}
57+
58+
resource "kubernetes_service" "kafka" {
59+
metadata {
60+
name = "${var.kafka_name}"
61+
namespace = "${var.namespace}"
62+
labels = { app = "${var.kafka_name}" }
63+
}
64+
spec {
65+
port {
66+
name = "broker"
67+
port = 9092
68+
target_port = "kafka"
69+
}
70+
selector = { app = "${var.kafka_name}" }
71+
}
72+
}
73+
74+
resource "kubernetes_service" "kafka_headless" {
75+
metadata {
76+
name = "${var.kafka_name}-headless"
77+
namespace = "${var.namespace}"
78+
labels = { app = "${var.kafka_name}" }
79+
annotations = { "service.alpha.kubernetes.io/tolerate-unready-endpoints" = "true" }
80+
}
81+
spec {
82+
port {
83+
name = "broker"
84+
port = 9092
85+
}
86+
selector = { app = "${var.kafka_name}" }
87+
cluster_ip = "None"
88+
}
89+
}
90+
91+
resource "kubernetes_stateful_set" "zookeeper" {
92+
metadata {
93+
name = "${var.kafka_name}-zookeeper"
94+
namespace = "${var.namespace}"
95+
labels = { app = "${var.kafka_name}-zookeeper", component = "server" }
96+
}
97+
spec {
98+
replicas = "${var.zookeeper_cluster_size}"
99+
selector {
100+
match_labels = { app = "${var.kafka_name}-zookeeper", component = "server" }
101+
}
102+
template {
103+
metadata {
104+
labels = { app = "${var.kafka_name}-zookeeper", component = "server" }
105+
}
106+
spec {
107+
volume {
108+
name = "config"
109+
config_map {
110+
name = "${var.kafka_name}-zookeeper"
111+
default_mode = "0555"
112+
}
113+
}
114+
volume {
115+
name = "data"
116+
}
117+
container {
118+
name = "zookeeper"
119+
image = "zookeeper:3.5.5"
120+
command = ["/bin/bash", "-xec", "/config-scripts/run"]
121+
port {
122+
name = "client"
123+
container_port = 2181
124+
protocol = "TCP"
125+
}
126+
port {
127+
name = "election"
128+
container_port = 3888
129+
protocol = "TCP"
130+
}
131+
port {
132+
name = "server"
133+
container_port = 2888
134+
protocol = "TCP"
135+
}
136+
env {
137+
name = "ZK_REPLICAS"
138+
value = "3"
139+
}
140+
env {
141+
name = "JMXAUTH"
142+
value = "false"
143+
}
144+
env {
145+
name = "JMXDISABLE"
146+
value = "false"
147+
}
148+
env {
149+
name = "JMXPORT"
150+
value = "1099"
151+
}
152+
env {
153+
name = "JMXSSL"
154+
value = "false"
155+
}
156+
env {
157+
name = "ZK_HEAP_SIZE"
158+
value = "1G"
159+
}
160+
env {
161+
name = "ZK_SYNC_LIMIT"
162+
value = "10"
163+
}
164+
env {
165+
name = "ZK_TICK_TIME"
166+
value = "2000"
167+
}
168+
env {
169+
name = "ZOO_AUTOPURGE_PURGEINTERVAL"
170+
value = "0"
171+
}
172+
env {
173+
name = "ZOO_AUTOPURGE_SNAPRETAINCOUNT"
174+
value = "3"
175+
}
176+
env {
177+
name = "ZOO_INIT_LIMIT"
178+
value = "5"
179+
}
180+
env {
181+
name = "ZOO_MAX_CLIENT_CNXNS"
182+
value = "60"
183+
}
184+
env {
185+
name = "ZOO_PORT"
186+
value = "2181"
187+
}
188+
env {
189+
name = "ZOO_STANDALONE_ENABLED"
190+
value = "false"
191+
}
192+
env {
193+
name = "ZOO_TICK_TIME"
194+
value = "2000"
195+
}
196+
volume_mount {
197+
name = "data"
198+
mount_path = "/data"
199+
}
200+
volume_mount {
201+
name = "config"
202+
mount_path = "/config-scripts"
203+
}
204+
liveness_probe {
205+
exec {
206+
command = ["sh", "/config-scripts/ok"]
207+
}
208+
initial_delay_seconds = 20
209+
timeout_seconds = 5
210+
period_seconds = 30
211+
success_threshold = 1
212+
failure_threshold = 2
213+
}
214+
readiness_probe {
215+
exec {
216+
command = ["sh", "/config-scripts/ready"]
217+
}
218+
initial_delay_seconds = 20
219+
timeout_seconds = 5
220+
period_seconds = 30
221+
success_threshold = 1
222+
failure_threshold = 2
223+
}
224+
image_pull_policy = "IfNotPresent"
225+
}
226+
termination_grace_period_seconds = 1800
227+
security_context {
228+
run_as_user = 1000
229+
fs_group = 1000
230+
}
231+
}
232+
}
233+
service_name = "${var.kafka_name}-zookeeper-headless"
234+
update_strategy {
235+
type = "RollingUpdate"
236+
}
237+
}
238+
}
239+
240+
resource "kubernetes_stateful_set" "kafka" {
241+
metadata {
242+
name = "${var.kafka_name}"
243+
namespace = "${var.namespace}"
244+
labels = { app = "${var.kafka_name}" }
245+
}
246+
spec {
247+
replicas = "${var.cluster_size}"
248+
selector {
249+
match_labels = { app = "${var.kafka_name}" }
250+
}
251+
template {
252+
metadata {
253+
labels = { app = "${var.kafka_name}" }
254+
}
255+
spec {
256+
container {
257+
name = "kafka-broker"
258+
image = "confluentinc/cp-kafka:5.0.1"
259+
command = ["sh", "-exc", "unset KAFKA_PORT && \\\nexport KAFKA_BROKER_ID=$${POD_NAME##*-} && \\\nexport KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://$${POD_IP}:9092 && \\\nexec /etc/confluent/docker/run\n"]
260+
port {
261+
name = "kafka"
262+
container_port = 9092
263+
}
264+
env {
265+
name = "POD_IP"
266+
value_from {
267+
field_ref {
268+
field_path = "status.podIP"
269+
}
270+
}
271+
}
272+
env {
273+
name = "POD_NAME"
274+
value_from {
275+
field_ref {
276+
field_path = "metadata.name"
277+
}
278+
}
279+
}
280+
env {
281+
name = "POD_NAMESPACE"
282+
value_from {
283+
field_ref {
284+
field_path = "metadata.namespace"
285+
}
286+
}
287+
}
288+
env {
289+
name = "KAFKA_HEAP_OPTS"
290+
value = "-Xmx1G -Xms1G"
291+
}
292+
env {
293+
name = "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR"
294+
value = "3"
295+
}
296+
env {
297+
name = "KAFKA_ZOOKEEPER_CONNECT"
298+
value = "${var.kafka_name}-zookeeper:2181"
299+
}
300+
env {
301+
name = "KAFKA_LOG_DIRS"
302+
value = "/opt/kafka/data/logs"
303+
}
304+
env {
305+
name = "KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE"
306+
value = "false"
307+
}
308+
env {
309+
name = "KAFKA_JMX_PORT"
310+
value = "5555"
311+
}
312+
volume_mount {
313+
name = "datadir"
314+
mount_path = "/opt/kafka/data"
315+
}
316+
liveness_probe {
317+
exec {
318+
command = ["sh", "-ec", "/usr/bin/jps | /bin/grep -q SupportedKafka"]
319+
}
320+
initial_delay_seconds = 30
321+
timeout_seconds = 5
322+
}
323+
readiness_probe {
324+
tcp_socket {
325+
port = "kafka"
326+
}
327+
initial_delay_seconds = 30
328+
timeout_seconds = 5
329+
period_seconds = 10
330+
success_threshold = 1
331+
failure_threshold = 3
332+
}
333+
image_pull_policy = "IfNotPresent"
334+
}
335+
termination_grace_period_seconds = 60
336+
}
337+
}
338+
volume_claim_template {
339+
metadata {
340+
name = "datadir"
341+
}
342+
spec {
343+
access_modes = ["ReadWriteOnce"]
344+
resources {
345+
requests = { storage = "1Gi" }
346+
}
347+
}
348+
}
349+
service_name = "${var.kafka_name}-kafka-headless"
350+
pod_management_policy = "OrderedReady"
351+
update_strategy {
352+
type = "OnDelete"
353+
}
354+
}
355+
}
356+

variables.tf

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
variable "namespace" {}
2+
variable "kafka_name" {}
3+
variable "cluster_size" {}
4+
variable "zookeeper_cluster_size" {}

0 commit comments

Comments
 (0)