Skip to content

Commit 791c713

Browse files
committed
Started working on Control center
1 parent 6b047d7 commit 791c713

File tree

7 files changed

+134
-0
lines changed

7 files changed

+134
-0
lines changed

vagrant/roles/role_kafka_control_center/defaults/main.yml

Whitespace-only changes.

vagrant/roles/role_kafka_control_center/files/main.yml

Whitespace-only changes.
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+

vagrant/roles/role_kafka_control_center/meta/main.yml

Whitespace-only changes.
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
###
2+
### Templating config files
3+
###
4+
5+
### /etc/kafka
6+
7+
- name: Copying connect-distributed.properties
8+
template:
9+
src: templates/kafka/connect-distributed.properties.j2
10+
dest: /etc/kafka/connect-distributed.properties
11+
mode: 0755
12+
tags:
13+
- kafka-configfiles
14+
- kafka-connect
15+
16+
17+
#############################################################################
18+
###
19+
### Enabling and starting Kafka services at boot
20+
###
21+
22+
- debug:
23+
msg: "Starting services for Kafka Connect Distributed..."
24+
when:
25+
- inventory_hostname in groups['kafka_worker_nodes']
26+
tags:
27+
- kafka-systemctl
28+
- kafka-connect
29+
30+
- name: Start and enable services at boot time
31+
systemd:
32+
name: "{{ item }}"
33+
state: started
34+
enabled: True
35+
with_items:
36+
- confluent-kafka-connect
37+
when:
38+
- inventory_hostname in groups['kafka_worker_nodes']
39+
tags:
40+
- kafka-systemctl
41+
- kafka-connect
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
##
2+
# Licensed to the Apache Software Foundation (ASF) under one or more
3+
# contributor license agreements. See the NOTICE file distributed with
4+
# this work for additional information regarding copyright ownership.
5+
# The ASF licenses this file to You under the Apache License, Version 2.0
6+
# (the "License"); you may not use this file except in compliance with
7+
# the License. You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
##
17+
18+
# This file contains some of the configurations for the Kafka Connect distributed worker. This file is intended
19+
# to be used with the examples, and some settings may differ from those used in a production system, especially
20+
# the `bootstrap.servers` and those specifying replication factors.
21+
22+
23+
# added 2018-09-03:
24+
security.protocol={{ kafka_connect_security_protocol }}
25+
client.id={{ kafka_service_ip_address }}
26+
27+
28+
# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
29+
bootstrap.servers={{ kafka_rest_bootstrap_servers }}
30+
31+
# unique name for the cluster, used in forming the Connect cluster group. Note that this must not conflict with consumer group IDs
32+
group.id={{ kafka_connect_group_id }}
33+
34+
# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will
35+
# need to configure these based on the format they want their data in when loaded from or stored into Kafka
36+
key.converter=org.apache.kafka.connect.json.JsonConverter
37+
value.converter=org.apache.kafka.connect.json.JsonConverter
38+
# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply
39+
# it to
40+
key.converter.schemas.enable=true
41+
value.converter.schemas.enable=true
42+
43+
# Topic to use for storing offsets. This topic should have many partitions and be replicated and compacted.
44+
# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
45+
# the topic before starting Kafka Connect if a specific topic configuration is needed.
46+
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
47+
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
48+
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
49+
offset.storage.topic={{ kafka_connect_offset_storage_topic }}
50+
offset.storage.replication.factor={{ kafka_connect_offset_storage_replication_factor }}
51+
#offset.storage.partitions=25
52+
53+
# Topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated,
54+
# and compacted topic. Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
55+
# the topic before starting Kafka Connect if a specific topic configuration is needed.
56+
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
57+
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
58+
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
59+
config.storage.topic={{ kafka_connect_config_storage_topic }}
60+
config.storage.replication.factor={{ kafka_connect_config_storage_replication_factor }}
61+
62+
# Topic to use for storing statuses. This topic can have multiple partitions and should be replicated and compacted.
63+
# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
64+
# the topic before starting Kafka Connect if a specific topic configuration is needed.
65+
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
66+
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
67+
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
68+
status.storage.topic={{ kafka_connect_status_storage_topic }}
69+
status.storage.replication.factor={{ kafka_connect_status_storage_replication_factor }}
70+
#status.storage.partitions=5
71+
72+
# Flush much faster than normal, which is useful for testing/debugging
73+
offset.flush.interval.ms=10000
74+
75+
# These are provided to inform the user about the presence of the REST host and port configs
76+
# Hostname & Port for the REST API to listen on. If this is set, it will bind to the interface used to listen to requests.
77+
rest.host.name={{ kafka_service_ip_address }}
78+
rest.port={{ port_kafka_connect }}
79+
80+
# The Hostname & Port that will be given out to other workers to connect to i.e. URLs that are routable from other servers.
81+
rest.advertised.host.name={{ kafka_service_ip_address }}
82+
rest.advertised.port={{ port_kafka_connect }}
83+
84+
# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins
85+
# (connectors, converters, transformations). The list should consist of top level directories that include
86+
# any combination of:
87+
# a) directories immediately containing jars with plugins and their dependencies
88+
# b) uber-jars with plugins and their dependencies
89+
# c) directories immediately containing the package directory structure of classes of plugins and their dependencies
90+
# Examples:
91+
# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors,
92+
plugin.path=/usr/share/java

vagrant/roles/role_kafka_control_center/vars/main.yml

Whitespace-only changes.

0 commit comments

Comments
 (0)