diff --git a/.copyrightconfig b/.copyrightconfig index 8677582..dd6cb61 100644 --- a/.copyrightconfig +++ b/.copyrightconfig @@ -11,4 +11,4 @@ startyear: 2023 # - Dotfiles already skipped automatically # Enable by removing the leading '# ' from the next line and editing values. # filesexcluded: third_party/*, docs/generated/*.md, assets/*.png, scripts/temp_*.py, vendor/lib.js -filesexcluded: .github/*, README.md, CONTRIBUTING.md, Jenkinsfile, gradle/*, docker-compose.yml, *.gradle, gradle.properties, gradlew, gradlew.bat, **/test/resources/**, docs/** +filesexcluded: .github/*, README.md, CONTRIBUTING.md, Jenkinsfile, gradle/*, docker-compose.yml, *.gradle, gradle.properties, gradlew, gradlew.bat, **/test/resources/**, docs/**, test-app/docker-compose.yml diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index edc90ec..a547d8e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,8 @@ This guide describes how to develop and contribute pull requests to this connector. The focus is currently on how to -develop and test the connector, either via a Docker cluster install of Confluent Platform or of the regular Kafka -distribution. +develop and test the connector. There are two methods available - automated and manual. Both methods are performed via a +Docker stack. The automated tests stack creates MarkLogic, Sonar, and Postgres instance for the automated tests. The +manual tests use Confluent Platform in a different Docker stack to allow testing the connector via Confluent Control +Center with a MarkLogic instance in the same stack. ### Requirements: * MarkLogic Server 11+ @@ -10,7 +12,7 @@ See [the Confluent compatibility matrix](https://docs.confluent.io/platform/curr for more information. After installing your desired version of Java, ensure that the `JAVA_HOME` environment variable points to your Java installation. -# Configuring Local Automated and Manual Testing +# Configuring Local Automated Testing The test suite for the MarkLogic Kafka connector, found at `src/test`, requires that the test application first be deployed to a MarkLogic instance. The recommendation is for this application to be deployed via Docker and @@ -19,51 +21,17 @@ deployed to a MarkLogic instance. The recommendation is for this application to Note that you do not need to install [Gradle](https://gradle.org/) - the "gradlew" program used below will install the appropriate version of Gradle if you do not have it installed already. -## Virtual Server Preparation -The project includes a docker-compose file in the repository root that includes MarkLogic, SonarQube with a Postgres server, and Confluent -Platform servers. - -### Confluent Platform -[Confluent Platform](https://docs.confluent.io/platform/current/overview.html) provides an easy mechanism for running -Kafka locally via a single Docker cluster. A primary benefit of testing with Confluent Platform is to test configuring -the MarkLogic Kafka connector via the -[Confluent Control Center](https://docs.confluent.io/platform/current/control-center/index.html) web application. -The Confluent Platform servers in this docker-compose file are based on the Confluent files and instructions at -[Install a Confluent Platform cluster in Docker using a Confluent docker-compose file](https://docs.confluent.io/platform/current/platform-quickstart.html). - -## Docker Cluster Preparation -To setup the docker cluster, use the docker-compose file in the repository root to build the Docker cluster with -the command: +## Docker Cluster Preparation for Automated Testing +The automated tests require a MarkLogic server, SonarQube server, and Postgres server. The docker-compose file in the +repository root includes these services. To prepare for running the automated tests, perform the following steps: ``` docker-compose up -d --build ``` -When the setup is complete, you should be able to run -``` -docker-compose ps -``` -and see results similar to the following. -``` -NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS -broker confluentinc/cp-kafka:7.6.1 "/etc/confluent/dock…" broker 14 minutes ago Up 14 minutes 0.0.0.0:9092->9092/tcp, 0.0.0.0:9101->9101/tcp -connect cnfldemos/cp-server-connect-datagen:0.6.4-7.6.0 "/etc/confluent/dock…" connect 14 minutes ago Up 14 minutes 0.0.0.0:8083->8083/tcp, 9092/tcp -control-center confluentinc/cp-enterprise-control-center:7.6.1 "/etc/confluent/dock…" control-center 14 minutes ago Up 14 minutes 0.0.0.0:9021->9021/tcp -ksql-datagen confluentinc/ksqldb-examples:7.6.1 "bash -c 'echo Waiti…" ksql-datagen 14 minutes ago Up 14 minutes -ksqldb-cli confluentinc/cp-ksqldb-cli:7.6.1 "/bin/sh" ksqldb-cli 14 minutes ago Up 14 minutes -ksqldb-server confluentinc/cp-ksqldb-server:7.6.1 "/etc/confluent/dock…" ksqldb-server 14 minutes ago Up 14 minutes 0.0.0.0:8088->8088/tcp -marklogic marklogicdb/marklogic-db:11.2.0-centos-1.1.2 "/tini -- /usr/local…" marklogic 14 minutes ago Up 14 minutes 25/tcp, 7997-7999/tcp, 0.0.0.0:8000-8002->8000-8002/tcp, 0.0.0.0:8010-8013->8010-8013/tcp, 8003-8009/tcp, 0.0.0.0:8018-8019->8018-8019/tcp -marklogic-kafka-confluent-postgres-1 postgres:15-alpine "docker-entrypoint.s…" postgres 14 minutes ago Up 14 minutes 5432/tcp -marklogic-kafka-confluent-sonarqube-1 sonarqube:10.3.0-community "/opt/sonarqube/dock…" sonarqube 14 minutes ago Up 14 minutes 0.0.0.0:9000->9000/tcp -rest-proxy confluentinc/cp-kafka-rest:7.6.1 "/etc/confluent/dock…" rest-proxy 14 minutes ago Up 14 minutes 0.0.0.0:8082->8082/tcp -schema-registry confluentinc/cp-schema-registry:7.6.1 "/etc/confluent/dock…" schema-registry 14 minutes ago Up 14 minutes 0.0.0.0:8081->8081/tcp -``` -You can now visit several web applications: +You can now visit these web applications: * http://localhost:8000 to access the MarkLogic server. * http://localhost:9000 to use the SonarQube server as described in the "Running Sonar Code Analysis" section below. -* http://localhost:9021 to access - [Confluent's Control Center GUI](https://docs.confluent.io/platform/current/control-center/index.html) application. - Within Control Center, click on "controlcenter.cluster" to access the configuration for the Kafka cluster. ## MarkLogic Preparation To prepare the MarkLogic server for automated testing as well as testing with the Confluent Platform, the Data Hub based @@ -100,8 +68,8 @@ To configure the SonarQube service, perform the following steps: 8. On the "Analysis Method" page, click on "Locally". 9. In the "Provide a token" panel, click on "Generate". Copy the token. 10. Click the "Continue" button. -11. Update `systemProp.sonar.token=` in `gradle-local.properties` in the root of your - project. +11. Update `systemProp.sonar.token=` in `gradle-local.properties` in the root directory +of your project. To run the SonarQube analysis, run the following Gradle task in the root directory, which will run all the tests with code coverage and then generate a quality report with SonarQube: @@ -125,8 +93,51 @@ without having to re-run the tests. For more assistance with Sonar and Gradle, see the [Sonar Gradle plugin docs](https://docs.sonarqube.org/latest/analyzing-source-code/scanners/sonarscanner-for-gradle/). +# Configuring Local Manual Testing +This project includes a Docker Compose file that creates a Kafka cluster using Confluent Platform along with a +MarkLogic server. This allows you to test the MarkLogic Kafka connector via the Confluent Control Center web +application. The instructions below describe how to get started. + +## Docker Cluster Preparation for Manual Testing +The docker-compose file in the test-app directory includes these services along with a MarkLogic server. +``` +docker-compose --env-file ./.env -f test-app/docker-compose.yml up -d --build +``` + +When the setup is complete, you should be able to run +``` +docker-compose --env-file ./.env -f test-app/docker-compose.yml ps +``` +and see results similar to the following. +``` +NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS +broker confluentinc/cp-kafka:7.6.1 "/etc/confluent/dock…" broker 14 minutes ago Up 14 minutes 0.0.0.0:9092->9092/tcp, 0.0.0.0:9101->9101/tcp +connect cnfldemos/cp-server-connect-datagen:0.6.4-7.6.0 "/etc/confluent/dock…" connect 14 minutes ago Up 14 minutes 0.0.0.0:8083->8083/tcp, 9092/tcp +control-center confluentinc/cp-enterprise-control-center:7.6.1 "/etc/confluent/dock…" control-center 14 minutes ago Up 14 minutes 0.0.0.0:9021->9021/tcp +ksql-datagen confluentinc/ksqldb-examples:7.6.1 "bash -c 'echo Waiti…" ksql-datagen 14 minutes ago Up 14 minutes +ksqldb-cli confluentinc/cp-ksqldb-cli:7.6.1 "/bin/sh" ksqldb-cli 14 minutes ago Up 14 minutes +ksqldb-server confluentinc/cp-ksqldb-server:7.6.1 "/etc/confluent/dock…" ksqldb-server 14 minutes ago Up 14 minutes 0.0.0.0:8088->8088/tcp +marklogic marklogicdb/marklogic-db:11.2.0-centos-1.1.2 "/tini -- /usr/local…" marklogic 14 minutes ago Up 14 minutes 25/tcp, 7997-7999/tcp, 0.0.0.0:8000-8002->8000-8002/tcp, 0.0.0.0:8010-8013->8010-8013/tcp, 8003-8009/tcp, 0.0.0.0:8018-8019->8018-8019/tcp +marklogic-kafka-confluent-postgres-1 postgres:15-alpine "docker-entrypoint.s…" postgres 14 minutes ago Up 14 minutes 5432/tcp +marklogic-kafka-confluent-sonarqube-1 sonarqube:10.3.0-community "/opt/sonarqube/dock…" sonarqube 14 minutes ago Up 14 minutes 0.0.0.0:9000->9000/tcp +rest-proxy confluentinc/cp-kafka-rest:7.6.1 "/etc/confluent/dock…" rest-proxy 14 minutes ago Up 14 minutes 0.0.0.0:8082->8082/tcp +schema-registry confluentinc/cp-schema-registry:7.6.1 "/etc/confluent/dock…" schema-registry 14 minutes ago Up 14 minutes 0.0.0.0:8081->8081/tcp +``` + +You can now visit several web applications: +* http://localhost:8000 to access the MarkLogic server. +* http://localhost:9021 to access + [Confluent's Control Center GUI](https://docs.confluent.io/platform/current/control-center/index.html) application. + Within Control Center, click on "controlcenter.cluster" to access the configuration for the Kafka cluster. + +### Confluent Platform for Manual Testing +[Confluent Platform](https://docs.confluent.io/platform/current/overview.html) provides an easy mechanism for running +Kafka locally via a single Docker cluster. A primary benefit of testing with Confluent Platform is to test configuring +the MarkLogic Kafka connector via the +[Confluent Control Center](https://docs.confluent.io/platform/current/control-center/index.html) web application. +The Confluent Platform servers in this docker-compose file are based on the Confluent files and instructions at +[Install a Confluent Platform cluster in Docker using a Confluent docker-compose file](https://docs.confluent.io/platform/current/platform-quickstart.html). -## Confluent Platform for Manual Testing ### Building and Sharing the Connector with the Docker Container Using gradle in the root directory, build the connector archive and copy it to a directory shared with the Confluent @@ -134,7 +145,7 @@ Platform Docker cluster built in the that section, using this gradle command in ``` ./gradlew copyConnectorToDockerVolume ``` -**You MUST restart the "connect" server in the Docker "confluent-platform-example" cluster.** +**You MUST restart the "connect" server in the Docker "manual-tests-marklogic-kafka-confluent" cluster.** Now, verify the connector has loaded properly. 1. Click on "Connect" in the left sidebar. diff --git a/Jenkinsfile b/Jenkinsfile index fedbf83..b151e1f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -9,7 +9,7 @@ def runtests(String marklogicVersion) { sudo /usr/local/sbin/mladmin cleandata cd kafka-connector MARKLOGIC_LOGS_VOLUME=/tmp MARKLOGIC_IMAGE='''+marklogicVersion+''' docker-compose up -d --build - sleep 120s; + sleep 60s; ''' sh label:'deploy project', script: '''#!/bin/bash export JAVA_HOME=$JAVA17_HOME_DIR @@ -17,6 +17,7 @@ def runtests(String marklogicVersion) { export PATH=$GRADLE_USER_HOME:$JAVA_HOME/bin:$PATH cd kafka-connector ./gradlew hubInit + ./gradlew mlTestConnections ./gradlew -i mlDeploy ''' sh label:'test', script: '''#!/bin/bash diff --git a/docker-compose.yml b/docker-compose.yml index 1fb078a..67289fc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,197 +1,6 @@ --- -name: marklogic-kafka-confluent +name: docker-tests-marklogic-kafka-confluent services: - - # This compose file is based on: - # This guide - https://docs.confluent.io/platform/current/platform-quickstart.html#step-6-uninstall-and-clean-up - # This compose file - https://raw.githubusercontent.com/confluentinc/cp-all-in-one/7.6.1-post/cp-all-in-one-kraft/docker-compose.yml - # Extended to include a MarkLogic container - - broker: - image: confluentinc/cp-kafka:7.6.1 - hostname: broker - container_name: broker - ports: - - "9092:9092" - - "9101:9101" - - # The NET_RAW capability allows a process to create raw sockets. Polaris does not like that. - # This setting removes the NET_RAW capability from the container. - cap_drop: - - NET_RAW - environment: - KAFKA_NODE_ID: 1 - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT' - KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092' - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 - KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 - KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_JMX_PORT: 9101 - KAFKA_JMX_HOSTNAME: localhost - KAFKA_PROCESS_ROLES: 'broker,controller' - KAFKA_CONTROLLER_QUORUM_VOTERS: '1@broker:29093' - KAFKA_LISTENERS: 'PLAINTEXT://broker:29092,CONTROLLER://broker:29093,PLAINTEXT_HOST://0.0.0.0:9092' - KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' - KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' - KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' - # Replace CLUSTER_ID with a unique base64 UUID using "bin/kafka-storage.sh random-uuid" - # See https://docs.confluent.io/kafka/operations-tools/kafka-tools.html#kafka-storage-sh - CLUSTER_ID: 'MkU3OEVBNTcwNTJENDM2Qk' - - schema-registry: - image: confluentinc/cp-schema-registry:7.6.1 - hostname: schema-registry - container_name: schema-registry - depends_on: - - broker - ports: - - "8081:8081" - cap_drop: - - NET_RAW - environment: - SCHEMA_REGISTRY_HOST_NAME: schema-registry - SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092' - SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 - - connect: - image: cnfldemos/cp-server-connect-datagen:0.6.4-7.6.0 - hostname: connect - container_name: connect - depends_on: - - broker - - schema-registry - ports: - - "8083:8083" - cap_drop: - - NET_RAW - environment: - CONNECT_BOOTSTRAP_SERVERS: 'broker:29092' - CONNECT_REST_ADVERTISED_HOST_NAME: connect - CONNECT_GROUP_ID: compose-connect-group - CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs - CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1 - CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000 - CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets - CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1 - CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status - CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1 - CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter - CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter - CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081 - # CLASSPATH required due to CC-2422 - CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-7.6.1.jar - CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" - CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" - CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components,/usr/share/confluent-marklogic-components" - CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR - volumes: - - ./docker/confluent-marklogic-components:/usr/share/confluent-marklogic-components - - control-center: - image: confluentinc/cp-enterprise-control-center:7.6.1 - hostname: control-center - container_name: control-center - depends_on: - - broker - - schema-registry - - connect - - ksqldb-server - ports: - - "9021:9021" - cap_drop: - - NET_RAW - environment: - CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092' - CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER: 'connect:8083' - CONTROL_CENTER_CONNECT_HEALTHCHECK_ENDPOINT: '/connectors' - CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088" - CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088" - CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" - CONTROL_CENTER_REPLICATION_FACTOR: 1 - CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1 - CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1 - CONFLUENT_METRICS_TOPIC_REPLICATION: 1 - PORT: 9021 - - ksqldb-server: - image: confluentinc/cp-ksqldb-server:7.6.1 - hostname: ksqldb-server - container_name: ksqldb-server - depends_on: - - broker - - connect - ports: - - "8088:8088" - cap_drop: - - NET_RAW - environment: - KSQL_CONFIG_DIR: "/etc/ksql" - KSQL_BOOTSTRAP_SERVERS: "broker:29092" - KSQL_HOST_NAME: ksqldb-server - KSQL_LISTENERS: "http://0.0.0.0:8088" - KSQL_CACHE_MAX_BYTES_BUFFERING: 0 - KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" - KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" - KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" - KSQL_KSQL_CONNECT_URL: "http://connect:8083" - KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1 - KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: 'true' - KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true' - - ksqldb-cli: - image: confluentinc/cp-ksqldb-cli:7.6.1 - container_name: ksqldb-cli - depends_on: - - broker - - connect - - ksqldb-server - entrypoint: /bin/sh - tty: true - cap_drop: - - NET_RAW - - ksql-datagen: - image: confluentinc/ksqldb-examples:7.6.1 - hostname: ksql-datagen - container_name: ksql-datagen - depends_on: - - ksqldb-server - - broker - - schema-registry - - connect - command: "bash -c 'echo Waiting for Kafka to be ready... && \ - cub kafka-ready -b broker:29092 1 40 && \ - echo Waiting for Confluent Schema Registry to be ready... && \ - cub sr-ready schema-registry 8081 40 && \ - echo Waiting a few seconds for topic creation to finish... && \ - sleep 11 && \ - tail -f /dev/null'" - cap_drop: - - NET_RAW - environment: - KSQL_CONFIG_DIR: "/etc/ksql" - STREAMS_BOOTSTRAP_SERVERS: broker:29092 - STREAMS_SCHEMA_REGISTRY_HOST: schema-registry - STREAMS_SCHEMA_REGISTRY_PORT: 8081 - - rest-proxy: - image: confluentinc/cp-kafka-rest:7.6.1 - depends_on: - - broker - - schema-registry - ports: - - 8082:8082 - hostname: rest-proxy - container_name: rest-proxy - cap_drop: - - NET_RAW - environment: - KAFKA_REST_HOST_NAME: rest-proxy - KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092' - KAFKA_REST_LISTENERS: "http://0.0.0.0:8082" - KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081' - marklogic: image: "${MARKLOGIC_IMAGE}" platform: linux/amd64 diff --git a/test-app/docker-compose.yml b/test-app/docker-compose.yml new file mode 100644 index 0000000..bd1e1fa --- /dev/null +++ b/test-app/docker-compose.yml @@ -0,0 +1,210 @@ +--- +name: manual-tests-marklogic-kafka-confluent +services: + + # This compose file is based on: + # This guide - https://docs.confluent.io/platform/current/platform-quickstart.html#step-6-uninstall-and-clean-up + # This compose file - https://raw.githubusercontent.com/confluentinc/cp-all-in-one/7.6.1-post/cp-all-in-one-kraft/docker-compose.yml + # Extended to include a MarkLogic container + + broker: + image: confluentinc/cp-kafka:7.6.1 + hostname: broker + container_name: broker + ports: + - "9092:9092" + - "9101:9101" + + # The NET_RAW capability allows a process to create raw sockets. Polaris does not like that. + # This setting removes the NET_RAW capability from the container. + cap_drop: + - NET_RAW + environment: + KAFKA_NODE_ID: 1 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT' + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092' + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_JMX_PORT: 9101 + KAFKA_JMX_HOSTNAME: localhost + KAFKA_PROCESS_ROLES: 'broker,controller' + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@broker:29093' + KAFKA_LISTENERS: 'PLAINTEXT://broker:29092,CONTROLLER://broker:29093,PLAINTEXT_HOST://0.0.0.0:9092' + KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' + KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' + KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' + # Replace CLUSTER_ID with a unique base64 UUID using "bin/kafka-storage.sh random-uuid" + # See https://docs.confluent.io/kafka/operations-tools/kafka-tools.html#kafka-storage-sh + CLUSTER_ID: 'MkU3OEVBNTcwNTJENDM2Qk' + + schema-registry: + image: confluentinc/cp-schema-registry:7.6.1 + hostname: schema-registry + container_name: schema-registry + depends_on: + - broker + ports: + - "8081:8081" + cap_drop: + - NET_RAW + environment: + SCHEMA_REGISTRY_HOST_NAME: schema-registry + SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092' + SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 + + connect: + image: cnfldemos/cp-server-connect-datagen:0.6.4-7.6.0 + hostname: connect + container_name: connect + depends_on: + - broker + - schema-registry + ports: + - "8083:8083" + cap_drop: + - NET_RAW + environment: + CONNECT_BOOTSTRAP_SERVERS: 'broker:29092' + CONNECT_REST_ADVERTISED_HOST_NAME: connect + CONNECT_GROUP_ID: compose-connect-group + CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs + CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000 + CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets + CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status + CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter + CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter + CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081 + # CLASSPATH required due to CC-2422 + CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-7.6.1.jar + CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" + CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" + CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components,/usr/share/confluent-marklogic-components" + CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR + volumes: + - ./docker/confluent-marklogic-components:/usr/share/confluent-marklogic-components + + control-center: + image: confluentinc/cp-enterprise-control-center:7.6.1 + hostname: control-center + container_name: control-center + depends_on: + - broker + - schema-registry + - connect + - ksqldb-server + ports: + - "9021:9021" + cap_drop: + - NET_RAW + environment: + CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092' + CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER: 'connect:8083' + CONTROL_CENTER_CONNECT_HEALTHCHECK_ENDPOINT: '/connectors' + CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088" + CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088" + CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" + CONTROL_CENTER_REPLICATION_FACTOR: 1 + CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1 + CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1 + CONFLUENT_METRICS_TOPIC_REPLICATION: 1 + PORT: 9021 + + ksqldb-server: + image: confluentinc/cp-ksqldb-server:7.6.1 + hostname: ksqldb-server + container_name: ksqldb-server + depends_on: + - broker + - connect + ports: + - "8088:8088" + cap_drop: + - NET_RAW + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + KSQL_BOOTSTRAP_SERVERS: "broker:29092" + KSQL_HOST_NAME: ksqldb-server + KSQL_LISTENERS: "http://0.0.0.0:8088" + KSQL_CACHE_MAX_BYTES_BUFFERING: 0 + KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" + KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" + KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" + KSQL_KSQL_CONNECT_URL: "http://connect:8083" + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1 + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: 'true' + KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true' + + ksqldb-cli: + image: confluentinc/cp-ksqldb-cli:7.6.1 + container_name: ksqldb-cli + depends_on: + - broker + - connect + - ksqldb-server + entrypoint: /bin/sh + tty: true + cap_drop: + - NET_RAW + + ksql-datagen: + image: confluentinc/ksqldb-examples:7.6.1 + hostname: ksql-datagen + container_name: ksql-datagen + depends_on: + - ksqldb-server + - broker + - schema-registry + - connect + command: "bash -c 'echo Waiting for Kafka to be ready... && \ + cub kafka-ready -b broker:29092 1 40 && \ + echo Waiting for Confluent Schema Registry to be ready... && \ + cub sr-ready schema-registry 8081 40 && \ + echo Waiting a few seconds for topic creation to finish... && \ + sleep 11 && \ + tail -f /dev/null'" + cap_drop: + - NET_RAW + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + STREAMS_BOOTSTRAP_SERVERS: broker:29092 + STREAMS_SCHEMA_REGISTRY_HOST: schema-registry + STREAMS_SCHEMA_REGISTRY_PORT: 8081 + + rest-proxy: + image: confluentinc/cp-kafka-rest:7.6.1 + depends_on: + - broker + - schema-registry + ports: + - 8082:8082 + hostname: rest-proxy + container_name: rest-proxy + cap_drop: + - NET_RAW + environment: + KAFKA_REST_HOST_NAME: rest-proxy + KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092' + KAFKA_REST_LISTENERS: "http://0.0.0.0:8082" + KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081' + + marklogic: + image: "${MARKLOGIC_IMAGE}" + platform: linux/amd64 + environment: + - INSTALL_CONVERTERS=true + - MARKLOGIC_INIT=true + - MARKLOGIC_ADMIN_USERNAME=admin + - MARKLOGIC_ADMIN_PASSWORD=admin + volumes: + - ${MARKLOGIC_LOGS_VOLUME}:/var/opt/MarkLogic/Logs + ports: + - "8000-8002:8000-8002" + - "8010-8013:8010-8013" + - "8018-8019:8018-8019" + cap_drop: + - NET_RAW