diff --git a/.copyrightconfig b/.copyrightconfig index dd6cb61..97d4035 100644 --- a/.copyrightconfig +++ b/.copyrightconfig @@ -11,4 +11,4 @@ startyear: 2023 # - Dotfiles already skipped automatically # Enable by removing the leading '# ' from the next line and editing values. # filesexcluded: third_party/*, docs/generated/*.md, assets/*.png, scripts/temp_*.py, vendor/lib.js -filesexcluded: .github/*, README.md, CONTRIBUTING.md, Jenkinsfile, gradle/*, docker-compose.yml, *.gradle, gradle.properties, gradlew, gradlew.bat, **/test/resources/**, docs/**, test-app/docker-compose.yml +filesexcluded: .github/*, README.md, CONTRIBUTING.md, Jenkinsfile, gradle/*, docker-compose.yml, *.gradle, gradle.properties, gradlew, gradlew.bat, **/test/resources/**, docs/**, test-app/docker-compose.yml, docker/prometheus/*.yml diff --git a/.gitignore b/.gitignore index 5a0f859..0b54d47 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,8 @@ build out gradle-local.properties -docker +docker/confluent-marklogic-components/marklogic-kafka-marklogic-connector* +docker/marklogic bin .vscode diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a547d8e..e7a8769 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ This guide describes how to develop and contribute pull requests to this connector. The focus is currently on how to develop and test the connector. There are two methods available - automated and manual. Both methods are performed via a -Docker stack. The automated tests stack creates MarkLogic, Sonar, and Postgres instance for the automated tests. The +Docker stack. The automated tests stack creates a MarkLogic instance for the automated tests. The manual tests use Confluent Platform in a different Docker stack to allow testing the connector via Confluent Control Center with a MarkLogic instance in the same stack. @@ -22,22 +22,19 @@ Note that you do not need to install [Gradle](https://gradle.org/) - the "gradle appropriate version of Gradle if you do not have it installed already. ## Docker Cluster Preparation for Automated Testing -The automated tests require a MarkLogic server, SonarQube server, and Postgres server. The docker-compose file in the -repository root includes these services. To prepare for running the automated tests, perform the following steps: +The automated tests require a MarkLogic server. The docker-compose file in the repository root includes these services. +To prepare for running the automated tests, perform the following steps: ``` docker-compose up -d --build ``` -You can now visit these web applications: +You can now visit this web applications: * http://localhost:8000 to access the MarkLogic server. -* http://localhost:9000 to use the SonarQube server as described in the "Running Sonar Code Analysis" - section below. ## MarkLogic Preparation To prepare the MarkLogic server for automated testing as well as testing with the Confluent Platform, the Data Hub based application must be deployed. From the root directory, follow these steps: 1. Run `./gradlew hubInit` -2. Edit gradle-local.properties and set `mlUsername` and `mlPassword` 3. Run `./gradlew -i mlDeploy` Note: If you change the version of Data Hub Framework used by this project, you should also delete the following directories: @@ -54,44 +51,11 @@ directory. Note that you must be using Java 17 for this command due to the lates Alternatively, you can import this project into an IDE such as IntelliJ and run each of the tests found under `src/test/java`. -## Running Sonar Code Analysis +## Generating code quality reports with SonarQube -To configure the SonarQube service, perform the following steps: +Please see our [internal Wiki page](https://progresssoftware.atlassian.net/wiki/spaces/PM/pages/1763541097/Developer+Experience+SonarQube) +for information on setting up SonarQube if you have not yet already. -1. Go to http://localhost:9000 . -2. Login as admin/admin. SonarQube will ask you to change this password; you can choose whatever you want ("password" works). -3. Click on "Create a local project". -4. Enter "marklogic-kafka-connector" for the Project Display Name; use that as the Project Key as well. -5. Enter "master" as the main branch name. -6. Click on "Next". -7. Click on "Use the global setting" and then "Create project". -8. On the "Analysis Method" page, click on "Locally". -9. In the "Provide a token" panel, click on "Generate". Copy the token. -10. Click the "Continue" button. -11. Update `systemProp.sonar.token=` in `gradle-local.properties` in the root directory -of your project. - -To run the SonarQube analysis, run the following Gradle task in the root directory, which will run all the tests with -code coverage and then generate a quality report with SonarQube: - - ./gradlew test sonar - -If you do not update `systemProp.sonar.token` in your `gradle.properties` file, you can specify the token via the -following: - - ./gradlew test sonar -Dsonar.token=paste your token here - -When that completes, you can find the results at http://localhost:9000/dashboard?id=marklogic-kafka-connector - -Click on that link. If it's the first time you've run the report, you'll see all issues. If you've run the report -before, then SonarQube will show "New Code" by default. That's handy, as you can use that to quickly see any issues -you've introduced on the feature branch you're working on. You can then click on "Overall Code" to see all issues. - -Note that if you only need results on code smells and vulnerabilities, you can repeatedly run "./gradlew sonar" -without having to re-run the tests. - -For more assistance with Sonar and Gradle, see the -[Sonar Gradle plugin docs](https://docs.sonarqube.org/latest/analyzing-source-code/scanners/sonarscanner-for-gradle/). # Configuring Local Manual Testing This project includes a Docker Compose file that creates a Kafka cluster using Confluent Platform along with a @@ -101,27 +65,30 @@ application. The instructions below describe how to get started. ## Docker Cluster Preparation for Manual Testing The docker-compose file in the test-app directory includes these services along with a MarkLogic server. ``` -docker-compose --env-file ./.env -f test-app/docker-compose.yml up -d --build +docker-compose --env-file test-app/.env -f test-app/docker-compose.yml up -d --build ``` When the setup is complete, you should be able to run ``` -docker-compose --env-file ./.env -f test-app/docker-compose.yml ps +docker-compose --env-file test-app/.env -f test-app/docker-compose.yml ps ``` and see results similar to the following. ``` -NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS -broker confluentinc/cp-kafka:7.6.1 "/etc/confluent/dock…" broker 14 minutes ago Up 14 minutes 0.0.0.0:9092->9092/tcp, 0.0.0.0:9101->9101/tcp -connect cnfldemos/cp-server-connect-datagen:0.6.4-7.6.0 "/etc/confluent/dock…" connect 14 minutes ago Up 14 minutes 0.0.0.0:8083->8083/tcp, 9092/tcp -control-center confluentinc/cp-enterprise-control-center:7.6.1 "/etc/confluent/dock…" control-center 14 minutes ago Up 14 minutes 0.0.0.0:9021->9021/tcp -ksql-datagen confluentinc/ksqldb-examples:7.6.1 "bash -c 'echo Waiti…" ksql-datagen 14 minutes ago Up 14 minutes -ksqldb-cli confluentinc/cp-ksqldb-cli:7.6.1 "/bin/sh" ksqldb-cli 14 minutes ago Up 14 minutes -ksqldb-server confluentinc/cp-ksqldb-server:7.6.1 "/etc/confluent/dock…" ksqldb-server 14 minutes ago Up 14 minutes 0.0.0.0:8088->8088/tcp -marklogic marklogicdb/marklogic-db:11.2.0-centos-1.1.2 "/tini -- /usr/local…" marklogic 14 minutes ago Up 14 minutes 25/tcp, 7997-7999/tcp, 0.0.0.0:8000-8002->8000-8002/tcp, 0.0.0.0:8010-8013->8010-8013/tcp, 8003-8009/tcp, 0.0.0.0:8018-8019->8018-8019/tcp -marklogic-kafka-confluent-postgres-1 postgres:15-alpine "docker-entrypoint.s…" postgres 14 minutes ago Up 14 minutes 5432/tcp -marklogic-kafka-confluent-sonarqube-1 sonarqube:10.3.0-community "/opt/sonarqube/dock…" sonarqube 14 minutes ago Up 14 minutes 0.0.0.0:9000->9000/tcp -rest-proxy confluentinc/cp-kafka-rest:7.6.1 "/etc/confluent/dock…" rest-proxy 14 minutes ago Up 14 minutes 0.0.0.0:8082->8082/tcp -schema-registry confluentinc/cp-schema-registry:7.6.1 "/etc/confluent/dock…" schema-registry 14 minutes ago Up 14 minutes 0.0.0.0:8081->8081/tcp +NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS +alertmanager confluentinc/cp-enterprise-alertmanager:2.2.0 "alertmanager-start" alertmanager 51 seconds ago Up 50 seconds 0.0.0.0:9093->9093/tcp, [::]:9093->9093/tcp +broker confluentinc/cp-server:8.0.0 "/etc/confluent/dock…" broker 51 seconds ago Up 50 seconds 0.0.0.0:9092->9092/tcp, [::]:9092->9092/tcp, 0.0.0.0:9101->9101/tcp, [::]:9101->9101/tcp +connect cnfldemos/cp-server-connect-datagen:0.6.7-8.0.0 "/etc/confluent/dock…" connect 51 seconds ago Up 49 seconds 0.0.0.0:8083->8083/tcp, [::]:8083->8083/tcp +control-center confluentinc/cp-enterprise-control-center-next-gen:2.2.0 "/etc/confluent/dock…" control-center 51 seconds ago Up 49 seconds 0.0.0.0:9021->9021/tcp, [::]:9021->9021/tcp +flink-jobmanager cnfldemos/flink-kafka:1.19.1-scala_2.12-java17 "/docker-entrypoint.…" flink-jobmanager 51 seconds ago Up 50 seconds 0.0.0.0:9081->9081/tcp, [::]:9081->9081/tcp +flink-sql-client cnfldemos/flink-sql-client-kafka:1.19.1-scala_2.12-java17 "/docker-entrypoint.…" flink-sql-client 51 seconds ago Up 50 seconds 6123/tcp, 8081/tcp +flink-taskmanager cnfldemos/flink-kafka:1.19.1-scala_2.12-java17 "/docker-entrypoint.…" flink-taskmanager 51 seconds ago Up 50 seconds 6123/tcp, 8081/tcp +ksql-datagen confluentinc/ksqldb-examples:8.0.0 "bash -c 'echo Waiti…" ksql-datagen 51 seconds ago Up 49 seconds +ksqldb-cli confluentinc/cp-ksqldb-cli:8.0.0 "/bin/sh" ksqldb-cli 51 seconds ago Up 49 seconds +ksqldb-server confluentinc/cp-ksqldb-server:8.0.0 "/etc/confluent/dock…" ksqldb-server 51 seconds ago Up 49 seconds 0.0.0.0:8088->8088/tcp, [::]:8088->8088/tcp +manual-tests-marklogic-kafka-confluent-marklogic-1 ml-docker-db-dev-tierpoint.bed-artifactory.bedford.progress.com/marklogic/marklogic-server-ubi:latest-12 "/tini -- /usr/local…" marklogic 51 seconds ago Up 50 seconds 0.0.0.0:8000-8002->8000-8002/tcp, [::]:8000-8002->8000-8002/tcp, 0.0.0.0:8010-8013->8010-8013/tcp, [::]:8010-8013->8010-8013/tcp, 0.0.0.0:8018-8019->8018-8019/tcp, [::]:8018-8019->8018-8019/tcp +prometheus confluentinc/cp-enterprise-prometheus:2.2.0 "prometheus-start" prometheus 51 seconds ago Up 50 seconds 0.0.0.0:9090->9090/tcp, [::]:9090->9090/tcp +rest-proxy confluentinc/cp-kafka-rest:8.0.0 "/etc/confluent/dock…" rest-proxy 51 seconds ago Up 49 seconds 0.0.0.0:8082->8082/tcp, [::]:8082->8082/tcp +schema-registry confluentinc/cp-schema-registry:8.0.0 "/etc/confluent/dock…" schema-registry 51 seconds ago Up 50 seconds 0.0.0.0:8081->8081/tcp, [::]:8081->8081/tcp ``` You can now visit several web applications: @@ -139,6 +106,15 @@ The Confluent Platform servers in this docker-compose file are based on the Conf [Install a Confluent Platform cluster in Docker using a Confluent docker-compose file](https://docs.confluent.io/platform/current/platform-quickstart.html). +### MarkLogic Preparation +Please ensure you've followed the instructions for "MarkLogic Preparation" in the "Configuring Local Automated Testing" +sectuib above for deploying a Data Hub test application. + +Note: If you change the version of Data Hub Framework used by this project, you should also delete the following directories: +* 'test-app/src/main/entity-config' +* 'test-app/src/main/hub-internal-config' + + ### Building and Sharing the Connector with the Docker Container Using gradle in the root directory, build the connector archive and copy it to a directory shared with the Confluent Platform Docker cluster built in the that section, using this gradle command in the root directory: @@ -187,7 +163,7 @@ In the Control Center GUI, you can verify the MarkLogic Kafka connector instance 3. Click on the "marklogic-purchases-sink" connector You can then verify that data is being written to MarkLogic by using MarkLogic's qconsole application to inspect the -contents of the `data-hub-FINAL` database. +contents of the `data-hub-FINAL` database. There should be documents with URIs that start with `/purchase/*`. ### Load a MarkLogic Kafka source connector instance You can also load an instance of the MarkLogic Kafka source connector that will read rows from the `demo/purchases` diff --git a/build.gradle b/build.gradle index be6575b..5484b0b 100644 --- a/build.gradle +++ b/build.gradle @@ -27,10 +27,15 @@ plugins { } java { - sourceCompatibility = 1.8 - targetCompatibility = 1.8 + toolchain { + languageVersion = JavaLanguageVersion.of(17) + } + sourceCompatibility = JavaVersion.VERSION_17 + targetCompatibility = JavaVersion.VERSION_17 } + + repositories { mavenCentral() } @@ -46,42 +51,43 @@ configurations { // CVE-2020-15250 (https://www.cve.org/CVERecord?id=CVE-2020-15250) force "org.apache.commons:commons-collections4:4.5.0" - // Force v3.18 of commons-lang3 to avoid CVE-2025-48924 - // (https://www.cve.org/CVERecord?id=CVE-2025-48924), without also - // upgrading ml-app-deployer to 6.0.0, which we are not ready to do yet. - force 'org.apache.commons:commons-lang3:3.18.0' + // Force v3.19 of commons-lang3 to avoid CVE-2025-48924 (https://www.cve.org/CVERecord?id=CVE-2025-48924), which + // is caused by the use of avro-compiler v1.12.0 with older dependencies including commons-lang3 v3.12.0. + force 'org.apache.commons:commons-lang3:3.19.0' } } } ext { - kafkaVersion = "3.9.1" + kafkaVersion = "4.1.0" } dependencies { def kafkaConnectRuntime = "org.apache.kafka:connect-runtime:${kafkaVersion}" compileOnly kafkaConnectRuntime - compileOnly "org.slf4j:slf4j-api:1.7.36" - // Force DHF to use the latest version of ml-app-deployer, which minimizes security vulnerabilities - implementation "com.marklogic:ml-app-deployer:5.0.0" + // Force DHF to use the latest version of ml-gradle, which minimizes security vulnerabilities + implementation "com.marklogic:ml-gradle:6.0.1" - implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-csv:2.17.2" + implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-csv:2.19.0" // Note that in general, the version of the DHF jar must match that of the deployed DHF instance. Different versions // may work together, but that behavior is not guaranteed. implementation("com.marklogic:marklogic-data-hub:6.2.1") { + exclude module: "ml-gradle" + } + + testImplementation('com.marklogic:marklogic-junit5:1.5.0') { + // Use the Java Client declared above. exclude module: "marklogic-client-api" - exclude module: "ml-javaclient-util" - exclude module: "ml-app-deployer" - // No need for mlcp-util, it's only used in 'legacy' DHF 4 jobs - exclude module: "mlcp-util" - // Excluding because it causes Kafka Connect to complain mightily if included - exclude module: "logback-classic" + // Use the Spring dependencies from ml-app-deployer 6 to avoid vulnerabilities in Spring 5. + exclude group: "org.springframework" } - testImplementation 'com.marklogic:marklogic-junit5:1.5.0' + // Add back all required Spring 6 modules for tests, since junit5 and test code need more than just spring-test + testImplementation "org.springframework:spring-test:6.2.11" + testImplementation "org.springframework:spring-context:6.2.11" testImplementation "org.apache.kafka:connect-json:${kafkaVersion}" testImplementation kafkaConnectRuntime @@ -223,5 +229,5 @@ task connectorArchive(type: Zip, dependsOn: connectorArchive_BuildDirectory, gro task copyConnectorToDockerVolume(type: Copy, dependsOn: connectorArchive, group: confluentTestingGroup) { description = "Copies the connector's archive directory to the Docker volume shared with the Connect server" from "build/connectorArchive" - into "test-app/docker/confluent-marklogic-components" + into "./docker/confluent-marklogic-components" } diff --git a/docker-compose.yml b/docker-compose.yml index 67289fc..2f769e2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -17,39 +17,3 @@ services: - "8018-8019:8018-8019" cap_drop: - NET_RAW - - # Copied from https://docs.sonarsource.com/sonarqube/latest/setup-and-upgrade/install-the-server/#example-docker-compose-configuration . - sonarqube: - image: sonarqube:10.3.0-community - depends_on: - - postgres - environment: - SONAR_JDBC_URL: jdbc:postgresql://postgres:5432/sonar - SONAR_JDBC_USERNAME: sonar - SONAR_JDBC_PASSWORD: sonar - volumes: - - sonarqube_data:/opt/sonarqube/data - - sonarqube_extensions:/opt/sonarqube/extensions - - sonarqube_logs:/opt/sonarqube/logs - ports: - - "9000:9000" - cap_drop: - - NET_RAW - - postgres: - image: postgres:15-alpine - environment: - POSTGRES_USER: sonar - POSTGRES_PASSWORD: sonar - volumes: - - postgresql:/var/lib/postgresql - - postgresql_data:/var/lib/postgresql/data - cap_drop: - - NET_RAW - -volumes: - sonarqube_data: - sonarqube_extensions: - sonarqube_logs: - postgresql: - postgresql_data: diff --git a/docker/prometheus/config/alertmanager-generated.yml b/docker/prometheus/config/alertmanager-generated.yml new file mode 100644 index 0000000..4cdaa00 --- /dev/null +++ b/docker/prometheus/config/alertmanager-generated.yml @@ -0,0 +1,8 @@ +global: + resolve_timeout: 1m + smtp_require_tls: false +receivers: +- name: default +route: + receiver: default + routes: [] diff --git a/docker/prometheus/config/prometheus-generated.yml b/docker/prometheus/config/prometheus-generated.yml new file mode 100644 index 0000000..e69de29 diff --git a/docker/prometheus/config/web-config-am.yml b/docker/prometheus/config/web-config-am.yml new file mode 100644 index 0000000..e69de29 diff --git a/docker/prometheus/config/web-config-prom.yml b/docker/prometheus/config/web-config-prom.yml new file mode 100644 index 0000000..e69de29 diff --git a/test-app/.env b/test-app/.env new file mode 100644 index 0000000..70d05d1 --- /dev/null +++ b/test-app/.env @@ -0,0 +1,2 @@ +MARKLOGIC_IMAGE=ml-docker-db-dev-tierpoint.bed-artifactory.bedford.progress.com/marklogic/marklogic-server-ubi:latest-12 +MARKLOGIC_LOGS_VOLUME=../docker/marklogic/logs diff --git a/test-app/docker-compose.yml b/test-app/docker-compose.yml index bd1e1fa..3d2a887 100644 --- a/test-app/docker-compose.yml +++ b/test-app/docker-compose.yml @@ -4,11 +4,11 @@ services: # This compose file is based on: # This guide - https://docs.confluent.io/platform/current/platform-quickstart.html#step-6-uninstall-and-clean-up - # This compose file - https://raw.githubusercontent.com/confluentinc/cp-all-in-one/7.6.1-post/cp-all-in-one-kraft/docker-compose.yml + # This compose file - https://github.com/confluentinc/cp-all-in-one/blob/8.0.0-post/cp-all-in-one/docker-compose.yml # Extended to include a MarkLogic container broker: - image: confluentinc/cp-kafka:7.6.1 + image: confluentinc/cp-server:8.0.0 hostname: broker container_name: broker ports: @@ -25,22 +25,43 @@ services: KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092' KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 KAFKA_JMX_PORT: 9101 KAFKA_JMX_HOSTNAME: localhost + KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081 + KAFKA_METRIC_REPORTERS: io.confluent.telemetry.reporter.TelemetryReporter + CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092 + CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1 KAFKA_PROCESS_ROLES: 'broker,controller' KAFKA_CONTROLLER_QUORUM_VOTERS: '1@broker:29093' KAFKA_LISTENERS: 'PLAINTEXT://broker:29092,CONTROLLER://broker:29093,PLAINTEXT_HOST://0.0.0.0:9092' KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' + CONFLUENT_METRICS_ENABLE: 'true' + CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous' # Replace CLUSTER_ID with a unique base64 UUID using "bin/kafka-storage.sh random-uuid" # See https://docs.confluent.io/kafka/operations-tools/kafka-tools.html#kafka-storage-sh CLUSTER_ID: 'MkU3OEVBNTcwNTJENDM2Qk' + KAFKA_CONFLUENT_TELEMETRY_EXPORTER_C3PLUSPLUS_TYPE: "http" + KAFKA_CONFLUENT_TELEMETRY_EXPORTER_C3PLUSPLUS_ENABLED: "true" + KAFKA_CONFLUENT_TELEMETRY_EXPORTER_C3PLUSPLUS_METRICS_INCLUDE: "io.confluent.kafka.server.request.(?!.*delta).*|io.confluent.kafka.server.server.broker.state|io.confluent.kafka.server.replica.manager.leader.count|io.confluent.kafka.server.request.queue.size|io.confluent.kafka.server.broker.topic.failed.produce.requests.rate.1.min|io.confluent.kafka.server.tier.archiver.total.lag|io.confluent.kafka.server.request.total.time.ms.p99|io.confluent.kafka.server.broker.topic.failed.fetch.requests.rate.1.min|io.confluent.kafka.server.broker.topic.total.fetch.requests.rate.1.min|io.confluent.kafka.server.partition.caught.up.replicas.count|io.confluent.kafka.server.partition.observer.replicas.count|io.confluent.kafka.server.tier.tasks.num.partitions.in.error|io.confluent.kafka.server.broker.topic.bytes.out.rate.1.min|io.confluent.kafka.server.request.total.time.ms.p95|io.confluent.kafka.server.controller.active.controller.count|io.confluent.kafka.server.request.total.time.ms.p999|io.confluent.kafka.server.controller.active.broker.count|io.confluent.kafka.server.request.handler.pool.request.handler.avg.idle.percent.rate.1.min|io.confluent.kafka.server.controller.unclean.leader.elections.rate.1.min|io.confluent.kafka.server.replica.manager.partition.count|io.confluent.kafka.server.controller.unclean.leader.elections.total|io.confluent.kafka.server.partition.replicas.count|io.confluent.kafka.server.broker.topic.total.produce.requests.rate.1.min|io.confluent.kafka.server.controller.offline.partitions.count|io.confluent.kafka.server.socket.server.network.processor.avg.idle.percent|io.confluent.kafka.server.partition.under.replicated|io.confluent.kafka.server.log.log.start.offset|io.confluent.kafka.server.log.tier.size|io.confluent.kafka.server.log.size|io.confluent.kafka.server.tier.fetcher.bytes.fetched.total|io.confluent.kafka.server.request.total.time.ms.p50|io.confluent.kafka.server.tenant.consumer.lag.offsets|io.confluent.kafka.server.log.log.end.offset|io.confluent.kafka.server.broker.topic.bytes.in.rate.1.min|io.confluent.kafka.server.partition.under.min.isr|io.confluent.kafka.server.partition.in.sync.replicas.count|io.confluent.telemetry.http.exporter.batches.dropped|io.confluent.telemetry.http.exporter.items.total|io.confluent.telemetry.http.exporter.items.succeeded|io.confluent.telemetry.http.exporter.send.time.total.millis|io.confluent.kafka.server.controller.leader.election.rate.(?!.*delta).*|io.confluent.telemetry.http.exporter.batches.failed" + KAFKA_CONFLUENT_TELEMETRY_EXPORTER_C3PLUSPLUS_CLIENT_BASE_URL: "http://prometheus:9090/api/v1/otlp" + KAFKA_CONFLUENT_TELEMETRY_EXPORTER_C3PLUSPLUS_CLIENT_COMPRESSION: "gzip" + KAFKA_CONFLUENT_TELEMETRY_EXPORTER_C3PLUSPLUS_API_KEY: "dummy" + KAFKA_CONFLUENT_TELEMETRY_EXPORTER_C3PLUSPLUS_API_SECRET: "dummy" + KAFKA_CONFLUENT_TELEMETRY_EXPORTER_C3PLUSPLUS_BUFFER_PENDING_BATCHES_MAX: "80" + KAFKA_CONFLUENT_TELEMETRY_EXPORTER_C3PLUSPLUS_BUFFER_BATCH_ITEMS_MAX: "4000" + KAFKA_CONFLUENT_TELEMETRY_EXPORTER_C3PLUSPLUS_BUFFER_INFLIGHT_SUBMISSIONS_MAX: "10" + KAFKA_CONFLUENT_TELEMETRY_METRICS_COLLECTOR_INTERVAL_MS: "60000" + KAFKA_CONFLUENT_TELEMETRY_REMOTECONFIG_CONFLUENT_ENABLED: "false" + KAFKA_CONFLUENT_CONSUMER_LAG_EMITTER_ENABLED: "true" schema-registry: - image: confluentinc/cp-schema-registry:7.6.1 + image: confluentinc/cp-schema-registry:8.0.0 hostname: schema-registry container_name: schema-registry depends_on: @@ -55,7 +76,8 @@ services: SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 connect: - image: cnfldemos/cp-server-connect-datagen:0.6.4-7.6.0 + image: cnfldemos/cp-server-connect-datagen:0.6.7-8.0.0 + platform: linux/amd64 hostname: connect container_name: connect depends_on: @@ -79,17 +101,43 @@ services: CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081 - # CLASSPATH required due to CC-2422 - CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-7.6.1.jar - CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" - CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components,/usr/share/confluent-marklogic-components" - CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR + CONNECT_LOG4J_LOGGERS: log4j.rootLogger=DEBUG,org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR + volumes: + - ../docker/confluent-marklogic-components:/usr/share/confluent-marklogic-components + + prometheus: + image: confluentinc/cp-enterprise-prometheus:2.2.0 + hostname: cp-enterprise-prometheus + container_name: prometheus volumes: - - ./docker/confluent-marklogic-components:/usr/share/confluent-marklogic-components + - ../docker/prometheus/config:/mnt/config + ports: + - "9090:9090" + cap_drop: + - NET_RAW + environment: + CONFIG_PATH: "/mnt/config" + SHOULD_LOG_TO_FILE: false + + alertmanager: + image: confluentinc/cp-enterprise-alertmanager:2.2.0 + hostname: cp-enterprise-alertmanager + container_name: alertmanager + depends_on: + - prometheus + volumes: + - ../docker/prometheus/config:/mnt/config + ports: + - "9093:9093" + cap_drop: + - NET_RAW + environment: + CONFIG_PATH: "/mnt/config" + SHOULD_LOG_TO_FILE: false control-center: - image: confluentinc/cp-enterprise-control-center:7.6.1 + image: confluentinc/cp-enterprise-control-center-next-gen:2.2.0 hostname: control-center container_name: control-center depends_on: @@ -97,10 +145,14 @@ services: - schema-registry - connect - ksqldb-server + - prometheus + - alertmanager ports: - "9021:9021" cap_drop: - NET_RAW + volumes: + - ../docker/prometheus/config:/mnt/config environment: CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092' CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER: 'connect:8083' @@ -112,10 +164,15 @@ services: CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1 CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1 CONFLUENT_METRICS_TOPIC_REPLICATION: 1 + CONTROL_CENTER_PROMETHEUS_ENABLE: true + CONTROL_CENTER_PROMETHEUS_URL: http://prometheus:9090 + CONTROL_CENTER_PROMETHEUS_RULES_FILE: /mnt/config/trigger_rules-generated.yml + CONTROL_CENTER_ALERTMANAGER_URL: http://alertmanager:9093 + CONTROL_CENTER_ALERTMANAGER_CONFIG_FILE: /mnt/config/alertmanager-generated.yml PORT: 9021 ksqldb-server: - image: confluentinc/cp-ksqldb-server:7.6.1 + image: confluentinc/cp-ksqldb-server:8.0.0 hostname: ksqldb-server container_name: ksqldb-server depends_on: @@ -140,7 +197,7 @@ services: KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true' ksqldb-cli: - image: confluentinc/cp-ksqldb-cli:7.6.1 + image: confluentinc/cp-ksqldb-cli:8.0.0 container_name: ksqldb-cli depends_on: - broker @@ -152,7 +209,7 @@ services: - NET_RAW ksql-datagen: - image: confluentinc/ksqldb-examples:7.6.1 + image: confluentinc/ksqldb-examples:8.0.0 hostname: ksql-datagen container_name: ksql-datagen depends_on: @@ -176,7 +233,7 @@ services: STREAMS_SCHEMA_REGISTRY_PORT: 8081 rest-proxy: - image: confluentinc/cp-kafka-rest:7.6.1 + image: confluentinc/cp-kafka-rest:8.0.0 depends_on: - broker - schema-registry @@ -192,6 +249,48 @@ services: KAFKA_REST_LISTENERS: "http://0.0.0.0:8082" KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081' + flink-sql-client: + image: cnfldemos/flink-sql-client-kafka:1.19.1-scala_2.12-java17 + depends_on: + - flink-jobmanager + hostname: flink-sql-client + container_name: flink-sql-client + cap_drop: + - NET_RAW + environment: + FLINK_JOBMANAGER_HOST: flink-jobmanager + + flink-jobmanager: + image: cnfldemos/flink-kafka:1.19.1-scala_2.12-java17 + hostname: flink-jobmanager + container_name: flink-jobmanager + ports: + - 9081:9081 + cap_drop: + - NET_RAW + command: jobmanager + environment: + - | + FLINK_PROPERTIES= + jobmanager.rpc.address: flink-jobmanager + rest.bind-port: 9081 + + flink-taskmanager: + image: cnfldemos/flink-kafka:1.19.1-scala_2.12-java17 + hostname: flink-taskmanager + container_name: flink-taskmanager + depends_on: + - flink-jobmanager + command: taskmanager + scale: 1 + environment: + - | + FLINK_PROPERTIES= + jobmanager.rpc.address: flink-jobmanager + taskmanager.numberOfTaskSlots: 10 + cap_drop: + - NET_RAW + marklogic: image: "${MARKLOGIC_IMAGE}" platform: linux/amd64