diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml
index fed1e5d8766..c31dd05e048 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yaml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yaml
@@ -34,6 +34,7 @@ body:
- K3S
- K6
- Kafka
+ - LDAP
- LocalStack
- MariaDB
- Milvus
@@ -50,6 +51,7 @@ body:
- Oracle Free
- Oracle XE
- OrientDB
+ - Pinecone
- PostgreSQL
- Presto
- Pulsar
@@ -57,6 +59,7 @@ body:
- QuestDB
- RabbitMQ
- Redpanda
+ - ScyllaDB
- Selenium
- Solace
- Solr
diff --git a/.github/ISSUE_TEMPLATE/enhancement.yaml b/.github/ISSUE_TEMPLATE/enhancement.yaml
index c89fc29208c..9b9a06ecf6a 100644
--- a/.github/ISSUE_TEMPLATE/enhancement.yaml
+++ b/.github/ISSUE_TEMPLATE/enhancement.yaml
@@ -34,6 +34,7 @@ body:
- K3S
- K6
- Kafka
+ - LDAP
- LocalStack
- MariaDB
- Milvus
@@ -50,6 +51,7 @@ body:
- Oracle Free
- Oracle XE
- OrientDB
+ - Pinecone
- PostgreSQL
- Presto
- Pulsar
@@ -57,6 +59,7 @@ body:
- QuestDB
- RabbitMQ
- Redpanda
+ - ScyllaDB
- Selenium
- Solace
- Solr
diff --git a/.github/ISSUE_TEMPLATE/feature.yaml b/.github/ISSUE_TEMPLATE/feature.yaml
index aa9bf4e7777..b655b4ac505 100644
--- a/.github/ISSUE_TEMPLATE/feature.yaml
+++ b/.github/ISSUE_TEMPLATE/feature.yaml
@@ -34,6 +34,7 @@ body:
- K3S
- K6
- Kafka
+ - LDAP
- LocalStack
- MariaDB
- Milvus
@@ -50,6 +51,7 @@ body:
- Oracle Free
- Oracle XE
- OrientDB
+ - Pinecone
- PostgreSQL
- Qdrant
- QuestDB
@@ -57,6 +59,7 @@ body:
- Pulsar
- RabbitMQ
- Redpanda
+ - ScyllaDB
- Selenium
- Solace
- Solr
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 17dd0e2aa05..fe7a57a8603 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -22,8 +22,6 @@ updates:
update-types: [ "version-update:semver-minor", "version-update:semver-patch" ]
- dependency-name: "org.apache.commons:commons-compress"
update-types: [ "version-update:semver-minor" ]
- - dependency-name: "org.awaitility:awaitility"
- update-types: [ "version-update:semver-patch" ]
- package-ecosystem: "gradle"
directory: "/"
allow:
@@ -43,12 +41,12 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/azure"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/cassandra"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "io.dropwizard.metrics:metrics-core"
@@ -56,79 +54,76 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/chromadb"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/clickhouse"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/cockroachdb"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/consul"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/couchbase"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- ignore:
- - dependency-name: "org.awaitility:awaitility"
- update-types: [ "version-update:semver-patch" ]
- package-ecosystem: "gradle"
directory: "/modules/cratedb"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/database-commons"
schedule:
- interval: "weekly"
+ interval: "monthly"
- package-ecosystem: "gradle"
directory: "/modules/databend"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/db2"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/dynalite"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/elasticsearch"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/gcloud"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/grafana"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/hivemq"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/influxdb"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "com.influxdb:influxdb-java-client"
@@ -136,7 +131,7 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/jdbc"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "org.mockito:mockito-core"
@@ -144,7 +139,7 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/jdbc-test"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "org.apache.tomcat:tomcat-jdbc"
@@ -152,7 +147,7 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/junit-jupiter"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "org.mockito:mockito-core"
@@ -160,7 +155,7 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/k3s"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml"
@@ -168,7 +163,7 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/k6"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml"
@@ -176,17 +171,22 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/kafka"
schedule:
- interval: "weekly"
+ interval: "monthly"
+ open-pull-requests-limit: 10
+ - package-ecosystem: "gradle"
+ directory: "/modules/ldap"
+ schedule:
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/localstack"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/mariadb"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "org.mariadb:r2dbc-mariadb"
@@ -194,37 +194,37 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/milvus"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/minio"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/mockserver"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/mongodb"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/mssqlserver"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/mysql"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/neo4j"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "org.neo4j.driver:neo4j-java-driver"
@@ -234,68 +234,70 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/nginx"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/oceanbase"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/ollama"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/openfga"
schedule:
- interval: "weekly"
+ interval: "monthly"
- package-ecosystem: "gradle"
directory: "/modules/oracle-free"
schedule:
- interval: "weekly"
+ interval: "monthly"
- package-ecosystem: "gradle"
directory: "/modules/oracle-xe"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/orientdb"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/postgresql"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/presto"
schedule:
- interval: "weekly"
+ interval: "monthly"
+ open-pull-requests-limit: 10
+ - package-ecosystem: "gradle"
+ directory: "/modules/pinecone"
+ schedule:
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/pulsar"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/qdrant"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/questdb"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- ignore:
- - dependency-name: "org.awaitility:awaitility"
- update-types: [ "version-update:semver-patch" ]
- package-ecosystem: "gradle"
directory: "/modules/r2dbc"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "io.r2dbc:r2dbc-spi"
@@ -303,17 +305,22 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/rabbitmq"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/redpanda"
schedule:
- interval: "weekly"
+ interval: "monthly"
+ open-pull-requests-limit: 10
+ - package-ecosystem: "gradle"
+ directory: "/modules/scylladb"
+ schedule:
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/selenium"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "org.seleniumhq.selenium:selenium-bom"
@@ -321,17 +328,15 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/solace"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "org.apache.qpid:qpid-jms-client"
update-types: [ "version-update:semver-major" ]
- - dependency-name: "org.awaitility:awaitility"
- update-types: [ "version-update:semver-patch" ]
- package-ecosystem: "gradle"
directory: "/modules/solr"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "org.apache.solr:solr-solrj"
@@ -339,54 +344,54 @@ updates:
- package-ecosystem: "gradle"
directory: "/modules/spock"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/tidb"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/timeplus"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/toxiproxy"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/trino"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/typesense"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/vault"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/weaviate"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/yugabytedb"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
# Examples
- package-ecosystem: "gradle"
directory: "/examples"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "ch.qos.logback:logback-classic"
@@ -406,11 +411,11 @@ updates:
- dependency-name: "com.hazelcast:hazelcast"
update-types: [ "version-update:semver-minor" ]
- # Smoke test
+# Smoke test
- package-ecosystem: "gradle"
directory: "/smoke-test"
schedule:
- interval: "weekly"
+ interval: "monthly"
open-pull-requests-limit: 10
ignore:
- dependency-name: "ch.qos.logback:logback-classic"
diff --git a/.github/labeler.yml b/.github/labeler.yml
index 537f40a944b..f4649bd7f99 100644
--- a/.github/labeler.yml
+++ b/.github/labeler.yml
@@ -107,6 +107,10 @@
- changed-files:
- any-glob-to-any-file:
- modules/kafka/**/*
+"modules/ldap":
+ - changed-files:
+ - any-glob-to-any-file:
+ - modules/ldap/**/*
"modules/localstack":
- changed-files:
- any-glob-to-any-file:
@@ -168,6 +172,10 @@
- changed-files:
- any-glob-to-any-file:
- modules/orientdb/**/*
+"modules/pinecone":
+ - changed-files:
+ - any-glob-to-any-file:
+ - modules/pinecone/**/*
"modules/postgres":
- changed-files:
- any-glob-to-any-file:
@@ -200,6 +208,10 @@
- changed-files:
- any-glob-to-any-file:
- modules/redpanda/**/*
+"modules/scylladb":
+ - changed-files:
+ - any-glob-to-any-file:
+ - modules/scylladb/**/*
"modules/selenium":
- changed-files:
- any-glob-to-any-file:
diff --git a/.github/settings.yml b/.github/settings.yml
index cc9f477b19d..63c8afacb90 100644
--- a/.github/settings.yml
+++ b/.github/settings.yml
@@ -169,6 +169,9 @@ labels:
- name: modules/kafka
color: '#006b75'
+ - name: modules/ldap
+ color: '#006b75'
+
- name: modules/localstack
color: '#006b75'
@@ -211,6 +214,9 @@ labels:
- name: modules/orientdb
color: '#006b75'
+ - name: modules/pinecone
+ color: '#006b75'
+
- name: modules/postgres
color: '#006b75'
diff --git a/.github/workflows/combine-prs.yml b/.github/workflows/combine-prs.yml
index e8f34878405..ed43d6ceed0 100644
--- a/.github/workflows/combine-prs.yml
+++ b/.github/workflows/combine-prs.yml
@@ -13,6 +13,6 @@ jobs:
steps:
- name: combine-prs
id: combine-prs
- uses: github/combine-prs@v5.1.0
+ uses: github/combine-prs@v5.2.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/moby-latest.yml b/.github/workflows/moby-latest.yml
index 28529f882c6..dbb717757b3 100644
--- a/.github/workflows/moby-latest.yml
+++ b/.github/workflows/moby-latest.yml
@@ -43,7 +43,7 @@ jobs:
- name: Notify to Slack on failures
if: failure()
id: slack
- uses: slackapi/slack-github-action@v1.27.0
+ uses: slackapi/slack-github-action@v2.1.1
with:
payload: |
{
diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml
index 43e9773f19e..d57159f44ca 100644
--- a/.github/workflows/release-drafter.yml
+++ b/.github/workflows/release-drafter.yml
@@ -18,6 +18,6 @@ jobs:
if: github.repository == 'testcontainers/testcontainers-java'
runs-on: ubuntu-latest
steps:
- - uses: release-drafter/release-drafter@3f0f87098bd6b5c5b9a36d49c41d998ea58f9348 # v5.19.0
+ - uses: release-drafter/release-drafter@b1476f6e6eb133afa41ed8589daba6dc69b4d3f5 # v5.19.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 4586838d0d9..77128657b0e 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -13,7 +13,7 @@ permissions:
jobs:
release:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup-java
@@ -30,6 +30,13 @@ jobs:
run: |
./gradlew publish \
-Pversion="${{github.event.release.tag_name}}" --scan --no-daemon -i
+
+ - name: Run Gradle Deploy
+ run: |
+ ./gradlew jreleaserDeploy -Pversion="${{github.event.release.tag_name}}" --scan --no-daemon -i
env:
- OSSRH_USERNAME: ${{secrets.OSSRH_USERNAME}}
- OSSRH_PASSWORD: ${{secrets.OSSRH_PASSWORD}}
+ JRELEASER_GPG_PUBLIC_KEY: ${{ vars.GPG_PUBLIC_KEY }}
+ JRELEASER_GPG_SECRET_KEY: ${{ secrets.GPG_SIGNING_KEY }}
+ JRELEASER_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
+ JRELEASER_MAVENCENTRAL_USERNAME: ${{ secrets.JRELEASER_MAVENCENTRAL_USERNAME }}
+ JRELEASER_MAVENCENTRAL_PASSWORD: ${{ secrets.JRELEASER_MAVENCENTRAL_PASSWORD }}
diff --git a/.github/workflows/update-docs-version.yml b/.github/workflows/update-docs-version.yml
index b6501902995..0248fa1d93a 100644
--- a/.github/workflows/update-docs-version.yml
+++ b/.github/workflows/update-docs-version.yml
@@ -13,7 +13,7 @@ jobs:
contents: write # for peter-evans/create-pull-request to create branch
pull-requests: write # for peter-evans/create-pull-request to create a PR
if: github.repository == 'testcontainers/testcontainers-java'
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
with:
@@ -23,7 +23,7 @@ jobs:
sed -i "s/latest_version: .*/latest_version: ${GITHUB_REF##*/}/g" mkdocs.yml
git diff
- name: Create Pull Request
- uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v3.10.1
+ uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v3.10.1
with:
title: Update docs version to ${GITHUB_REF##*/}
body: |
diff --git a/.github/workflows/update-gradle-wrapper.yml b/.github/workflows/update-gradle-wrapper.yml
index b69353a6ab5..8740b7a3e4f 100644
--- a/.github/workflows/update-gradle-wrapper.yml
+++ b/.github/workflows/update-gradle-wrapper.yml
@@ -19,9 +19,9 @@ jobs:
- uses: actions/checkout@v4
- name: Update Gradle Wrapper
- uses: gradle-update/update-gradle-wrapper-action@9268373d69bd0974b6318eb3b512b8e025060bbe # v1.0.13
+ uses: gradle-update/update-gradle-wrapper-action@512b1875f3b6270828abfe77b247d5895a2da1e5 # v1.0.13
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
labels: dependencies
- - uses: gradle/actions/wrapper-validation@v3
+ - uses: gradle/actions/wrapper-validation@v4
diff --git a/.github/workflows/update-testcontainers-version.yml b/.github/workflows/update-testcontainers-version.yml
index 7623b3dff69..13837626c06 100644
--- a/.github/workflows/update-testcontainers-version.yml
+++ b/.github/workflows/update-testcontainers-version.yml
@@ -13,7 +13,7 @@ jobs:
contents: write # for peter-evans/create-pull-request to create branch
pull-requests: write # for peter-evans/create-pull-request to create a PR
if: github.repository == 'testcontainers/testcontainers-java'
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
with:
@@ -23,7 +23,7 @@ jobs:
sed -i "s/^testcontainers\.version=.*/testcontainers\.version=${GITHUB_REF##*/}/g" gradle.properties
git diff
- name: Create Pull Request
- uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v3.10.1
+ uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v3.10.1
with:
title: Update testcontainers version to ${GITHUB_REF##*/}
body: |
diff --git a/CHANGELOG.md b/CHANGELOG.md
index aac8c306777..cef3d65e346 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -281,7 +281,7 @@
## [1.1.8] - 2017-01-22
### Fixed
- Compatibility fixes for Docker for Mac v1.13.0 ([\#272](https://github.com/testcontainers/testcontainers-java/issues/272))
-- Relax docker environment disk space check to accomodate unusual empty `df` output observed on Docker for Mac with OverlayFS ([\#273](https://github.com/testcontainers/testcontainers-java/issues/273), [\#278](https://github.com/testcontainers/testcontainers-java/issues/278))
+- Relax docker environment disk space check to accommodate unusual empty `df` output observed on Docker for Mac with OverlayFS ([\#273](https://github.com/testcontainers/testcontainers-java/issues/273), [\#278](https://github.com/testcontainers/testcontainers-java/issues/278))
- Fix inadvertent private-scoping of startup checks' `StartupStatus`, which made implementation of custom startup checks impossible ([\#266](https://github.com/testcontainers/testcontainers-java/issues/266))
- Fix potential resource lead/deadlock when errors are encountered building images from a Dockerfile ([\#274](https://github.com/testcontainers/testcontainers-java/issues/274))
diff --git a/build.gradle b/build.gradle
index 7f4bad6e835..488f005147d 100644
--- a/build.gradle
+++ b/build.gradle
@@ -12,9 +12,10 @@ buildscript {
plugins {
id 'io.franzbecker.gradle-lombok' version '5.0.0'
- id 'com.gradleup.shadow' version '8.3.0'
+ id 'com.gradleup.shadow' version '8.3.8'
id 'me.champeau.gradle.japicmp' version '0.4.3' apply false
id 'com.diffplug.spotless' version '6.22.0' apply false
+ id 'org.jreleaser' version '1.18.0' apply false
}
apply from: "$rootDir/gradle/ci-support.gradle"
@@ -135,7 +136,7 @@ subprojects {
}
checkstyle {
- toolVersion = "10.12.4"
+ toolVersion = "10.23.0"
configFile = rootProject.file('config/checkstyle/checkstyle.xml')
}
}
diff --git a/core/build.gradle b/core/build.gradle
index a7b600f041f..138c0548f0f 100644
--- a/core/build.gradle
+++ b/core/build.gradle
@@ -62,23 +62,22 @@ tasks.japicmp {
configurations.all {
resolutionStrategy {
- // use lower Jackson version
- force 'com.fasterxml.jackson.core:jackson-databind:2.8.8'
- force 'com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.8.8'
+ force 'com.fasterxml.jackson.core:jackson-databind:2.18.4'
+ force 'com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.18.4'
}
}
dependencies {
api 'junit:junit:4.13.2'
api 'org.slf4j:slf4j-api:1.7.36'
- compileOnly 'org.jetbrains:annotations:24.1.0'
- testCompileOnly 'org.jetbrains:annotations:24.1.0'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
+ testCompileOnly 'org.jetbrains:annotations:26.0.2'
api 'org.apache.commons:commons-compress:1.24.0'
api ('org.rnorth.duct-tape:duct-tape:1.0.8') {
exclude(group: 'org.jetbrains', module: 'annotations')
}
- provided('com.google.cloud.tools:jib-core:0.23.0') {
+ provided('com.google.cloud.tools:jib-core:0.27.3') {
exclude group: 'com.google.guava', module: 'guava'
exclude group: 'com.fasterxml.jackson.datatype', module: 'jackson-datatype-jsr310'
exclude group: 'com.fasterxml.jackson.core', module: 'jackson-core'
@@ -86,10 +85,10 @@ dependencies {
exclude group: 'org.apache.commons', module: 'commons-compress'
}
- shaded 'org.awaitility:awaitility:4.2.0'
+ shaded 'org.awaitility:awaitility:4.3.0'
- api platform('com.github.docker-java:docker-java-bom:3.4.1')
- shaded platform('com.github.docker-java:docker-java-bom:3.4.1')
+ api platform('com.github.docker-java:docker-java-bom:3.5.3')
+ shaded platform('com.github.docker-java:docker-java-bom:3.5.3')
api "com.github.docker-java:docker-java-api"
@@ -100,18 +99,18 @@ dependencies {
api 'com.github.docker-java:docker-java-transport-zerodep'
shaded 'com.google.guava:guava:33.3.1-jre'
- shaded "org.yaml:snakeyaml:1.33"
+ shaded "org.yaml:snakeyaml:2.4"
shaded 'org.glassfish.main.external:trilead-ssh2-repackaged:4.1.2'
shaded 'org.zeroturnaround:zt-exec:1.12'
- testImplementation('com.google.cloud.tools:jib-core:0.23.0') {
+ testImplementation('com.google.cloud.tools:jib-core:0.27.3') {
exclude group: 'com.google.guava', module: 'guava'
}
testImplementation 'org.apache.httpcomponents:httpclient:4.5.14'
- testImplementation 'redis.clients:jedis:5.1.5'
- testImplementation 'com.rabbitmq:amqp-client:5.22.0'
+ testImplementation 'redis.clients:jedis:6.0.0'
+ testImplementation 'com.rabbitmq:amqp-client:5.25.0'
testImplementation 'org.mongodb:mongo-java-driver:3.12.14'
testImplementation ('org.mockito:mockito-core:4.11.0') {
@@ -120,13 +119,13 @@ dependencies {
// Synthetic JAR used for MountableFileTest and DirectoryTarResourceTest
testImplementation files('testlib/repo/fakejar/fakejar/0/fakejar-0.jar')
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'io.rest-assured:rest-assured:5.5.0'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'io.rest-assured:rest-assured:5.5.5'
jarFileTestCompileOnly "org.projectlombok:lombok:${lombok.version}"
jarFileTestAnnotationProcessor "org.projectlombok:lombok:${lombok.version}"
jarFileTestImplementation 'junit:junit:4.13.2'
- jarFileTestImplementation 'org.assertj:assertj-core:3.26.3'
+ jarFileTestImplementation 'org.assertj:assertj-core:3.27.3'
jarFileTestImplementation 'org.ow2.asm:asm-debug-all:5.2'
}
diff --git a/core/src/main/java/org/testcontainers/containers/DockerMcpGatewayContainer.java b/core/src/main/java/org/testcontainers/containers/DockerMcpGatewayContainer.java
new file mode 100644
index 00000000000..40e80ae2061
--- /dev/null
+++ b/core/src/main/java/org/testcontainers/containers/DockerMcpGatewayContainer.java
@@ -0,0 +1,105 @@
+package org.testcontainers.containers;
+
+import org.testcontainers.DockerClientFactory;
+import org.testcontainers.containers.wait.strategy.Wait;
+import org.testcontainers.images.builder.Transferable;
+import org.testcontainers.utility.DockerImageName;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Testcontainers implementation of the Docker MCP Gateway container.
+ *
+ * Supported images: {@code docker/mcp-gateway}
+ *
+ * Exposed ports: 8811
+ */
+public class DockerMcpGatewayContainer extends GenericContainer {
+
+ private static final String DOCKER_MCP_GATEWAY_IMAGE = "docker/mcp-gateway";
+
+ private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse(DOCKER_MCP_GATEWAY_IMAGE);
+
+ private static final int DEFAULT_PORT = 8811;
+
+ private static final String SECRETS_PATH = "/testcontainers/app/secrets";
+
+ private final List servers = new ArrayList<>();
+
+ private final List tools = new ArrayList<>();
+
+ private final Map secrets = new HashMap<>();
+
+ public DockerMcpGatewayContainer(String dockerImageName) {
+ this(DockerImageName.parse(dockerImageName));
+ }
+
+ public DockerMcpGatewayContainer(DockerImageName dockerImageName) {
+ super(dockerImageName);
+ dockerImageName.assertCompatibleWith(DEFAULT_IMAGE_NAME);
+ withExposedPorts(DEFAULT_PORT);
+ withFileSystemBind(DockerClientFactory.instance().getRemoteDockerUnixSocketPath(), "/var/run/docker.sock");
+ waitingFor(Wait.forLogMessage(".*Start sse server on port.*", 1));
+ }
+
+ @Override
+ protected void configure() {
+ List command = new ArrayList<>();
+ command.add("--transport=sse");
+ for (String server : this.servers) {
+ if (!server.isEmpty()) {
+ command.add("--servers=" + server);
+ }
+ }
+ for (String tool : this.tools) {
+ if (!tool.isEmpty()) {
+ command.add("--tools=" + tool);
+ }
+ }
+ if (this.secrets != null && !this.secrets.isEmpty()) {
+ command.add("--secrets=" + SECRETS_PATH);
+ }
+ withCommand(String.join(" ", command));
+ }
+
+ @Override
+ protected void containerIsCreated(String containerId) {
+ if (this.secrets != null && !this.secrets.isEmpty()) {
+ StringBuilder secretsFile = new StringBuilder();
+ for (Map.Entry entry : this.secrets.entrySet()) {
+ secretsFile.append(entry.getKey()).append("=").append(entry.getValue()).append("\n");
+ }
+ copyFileToContainer(Transferable.of(secretsFile.toString()), SECRETS_PATH);
+ }
+ }
+
+ public DockerMcpGatewayContainer withServer(String server, List tools) {
+ this.servers.add(server);
+ this.tools.addAll(tools);
+ return this;
+ }
+
+ public DockerMcpGatewayContainer withServer(String server, String... tools) {
+ this.servers.add(server);
+ this.tools.addAll(Arrays.asList(tools));
+ return this;
+ }
+
+ public DockerMcpGatewayContainer withSecrets(Map secrets) {
+ this.secrets.putAll(secrets);
+ return this;
+ }
+
+ public DockerMcpGatewayContainer withSecret(String secretKey, String secretValue) {
+ this.secrets.put(secretKey, secretValue);
+ return this;
+ }
+
+ public String getEndpoint() {
+ return "http://" + getHost() + ":" + getMappedPort(DEFAULT_PORT);
+ }
+}
diff --git a/core/src/main/java/org/testcontainers/containers/DockerModelRunnerContainer.java b/core/src/main/java/org/testcontainers/containers/DockerModelRunnerContainer.java
new file mode 100644
index 00000000000..145b4e609ad
--- /dev/null
+++ b/core/src/main/java/org/testcontainers/containers/DockerModelRunnerContainer.java
@@ -0,0 +1,89 @@
+package org.testcontainers.containers;
+
+import com.github.dockerjava.api.command.InspectContainerResponse;
+import lombok.extern.slf4j.Slf4j;
+import org.testcontainers.containers.wait.strategy.Wait;
+import org.testcontainers.utility.DockerImageName;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Testcontainers proxy container for the Docker Model Runner service
+ * provided by Docker Desktop.
+ *
+ * Supported images: {@code alpine/socat}
+ *
+ * Exposed ports: 80
+ */
+@Slf4j
+public class DockerModelRunnerContainer extends SocatContainer {
+
+ private static final String MODEL_RUNNER_ENDPOINT = "model-runner.docker.internal";
+
+ private static final int PORT = 80;
+
+ private String model;
+
+ public DockerModelRunnerContainer(String image) {
+ this(DockerImageName.parse(image));
+ }
+
+ public DockerModelRunnerContainer(DockerImageName image) {
+ super(image);
+ withTarget(PORT, MODEL_RUNNER_ENDPOINT);
+ waitingFor(Wait.forHttp("/").forResponsePredicate(res -> res.contains("The service is running")));
+ }
+
+ @Override
+ protected void containerIsStarted(InspectContainerResponse containerInfo) {
+ if (this.model != null) {
+ logger().info("Pulling model: {}. Please be patient.", this.model);
+
+ String url = getBaseEndpoint() + "/models/create";
+ String payload = String.format("{\"from\": \"%s\"}", this.model);
+
+ try {
+ HttpURLConnection connection = (HttpURLConnection) new URL(url).openConnection();
+ connection.setRequestMethod("POST");
+ connection.setRequestProperty("Content-Type", "application/json");
+ connection.setDoOutput(true);
+
+ try (OutputStream os = connection.getOutputStream()) {
+ os.write(payload.getBytes());
+ os.flush();
+ }
+
+ try (
+ BufferedReader br = new BufferedReader(
+ new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8)
+ )
+ ) {
+ while (br.readLine() != null) {}
+ }
+ connection.disconnect();
+ } catch (IOException e) {
+ logger().error("Failed to pull model {}: {}", this.model, e);
+ }
+ logger().info("Finished pulling model: {}", this.model);
+ }
+ }
+
+ public DockerModelRunnerContainer withModel(String model) {
+ this.model = model;
+ return this;
+ }
+
+ public String getBaseEndpoint() {
+ return "http://" + getHost() + ":" + getMappedPort(PORT);
+ }
+
+ public String getOpenAIEndpoint() {
+ return getBaseEndpoint() + "/engines";
+ }
+}
diff --git a/core/src/main/java/org/testcontainers/containers/PortForwardingContainer.java b/core/src/main/java/org/testcontainers/containers/PortForwardingContainer.java
index a06e554a14c..a5f7c29a8b4 100644
--- a/core/src/main/java/org/testcontainers/containers/PortForwardingContainer.java
+++ b/core/src/main/java/org/testcontainers/containers/PortForwardingContainer.java
@@ -24,7 +24,7 @@ public enum PortForwardingContainer {
private static ContainerDef DEFINITION = new ContainerDef() {
{
- setImage(DockerImageName.parse("testcontainers/sshd:1.2.0"));
+ setImage(DockerImageName.parse("testcontainers/sshd:1.3.0"));
addExposedTcpPort(22);
addEnvVar("PASSWORD", PASSWORD);
}
diff --git a/core/src/main/java/org/testcontainers/images/PullPolicy.java b/core/src/main/java/org/testcontainers/images/PullPolicy.java
index 12d05b6fe5a..8c3a067cc15 100644
--- a/core/src/main/java/org/testcontainers/images/PullPolicy.java
+++ b/core/src/main/java/org/testcontainers/images/PullPolicy.java
@@ -40,7 +40,7 @@ public static synchronized ImagePullPolicy defaultPolicy() {
.currentThread()
.getContextClassLoader()
.loadClass(imagePullPolicyClassName)
- .getConstructor()
+ .getDeclaredConstructor()
.newInstance();
} catch (Exception e) {
throw new IllegalArgumentException(
diff --git a/core/src/main/java/org/testcontainers/utility/ResourceReaper.java b/core/src/main/java/org/testcontainers/utility/ResourceReaper.java
index 1b99cd2abb4..ccea407ed3b 100644
--- a/core/src/main/java/org/testcontainers/utility/ResourceReaper.java
+++ b/core/src/main/java/org/testcontainers/utility/ResourceReaper.java
@@ -368,7 +368,7 @@ static class FilterRegistry {
* Registers the given filters with Ryuk
*
* @param filters the filter to register
- * @return true if the filters have been registered successfuly, false otherwise
+ * @return true if the filters have been registered successfully, false otherwise
* @throws IOException if communication with Ryuk fails
*/
protected boolean register(List> filters) throws IOException {
diff --git a/core/src/main/java/org/testcontainers/utility/RyukContainer.java b/core/src/main/java/org/testcontainers/utility/RyukContainer.java
index 7175790bb6c..7c75a12b109 100644
--- a/core/src/main/java/org/testcontainers/utility/RyukContainer.java
+++ b/core/src/main/java/org/testcontainers/utility/RyukContainer.java
@@ -9,7 +9,7 @@
class RyukContainer extends GenericContainer {
RyukContainer() {
- super("testcontainers/ryuk:0.11.0");
+ super("testcontainers/ryuk:0.12.0");
withExposedPorts(8080);
withCreateContainerCmdModifier(cmd -> {
cmd.withName("testcontainers-ryuk-" + DockerClientFactory.SESSION_ID);
diff --git a/core/src/test/java/org/testcontainers/containers/DockerMcpGatewayContainerTest.java b/core/src/test/java/org/testcontainers/containers/DockerMcpGatewayContainerTest.java
new file mode 100644
index 00000000000..28e4cf6fb6a
--- /dev/null
+++ b/core/src/test/java/org/testcontainers/containers/DockerMcpGatewayContainerTest.java
@@ -0,0 +1,37 @@
+package org.testcontainers.containers;
+
+import org.junit.Test;
+
+import java.util.Collections;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class DockerMcpGatewayContainerTest {
+
+ @Test
+ public void serviceSuccessfullyStarts() {
+ try (DockerMcpGatewayContainer gateway = new DockerMcpGatewayContainer("docker/mcp-gateway:latest")) {
+ gateway.start();
+
+ assertThat(gateway.isRunning()).isTrue();
+ }
+ }
+
+ @Test
+ public void gatewayStartsWithServers() {
+ try (
+ // container {
+ DockerMcpGatewayContainer gateway = new DockerMcpGatewayContainer("docker/mcp-gateway:latest")
+ .withServer("curl", "curl")
+ .withServer("brave", "brave_local_search", "brave_web_search")
+ .withServer("github-official", Collections.singletonList("add_issue_comment"))
+ .withSecret("brave.api_key", "test_key")
+ .withSecrets(Collections.singletonMap("github.personal_access_token", "test_token"))
+ // }
+ ) {
+ gateway.start();
+
+ assertThat(gateway.getLogs()).contains("4 tools listed");
+ }
+ }
+}
diff --git a/core/src/test/java/org/testcontainers/containers/DockerModelRunnerContainerTest.java b/core/src/test/java/org/testcontainers/containers/DockerModelRunnerContainerTest.java
new file mode 100644
index 00000000000..c04c0159e51
--- /dev/null
+++ b/core/src/test/java/org/testcontainers/containers/DockerModelRunnerContainerTest.java
@@ -0,0 +1,49 @@
+package org.testcontainers.containers;
+
+import io.restassured.RestAssured;
+import io.restassured.response.Response;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assumptions.assumeThat;
+
+public class DockerModelRunnerContainerTest {
+
+ @Test
+ public void checkStatus() {
+ assumeThat(System.getenv("CI")).isNull();
+
+ try (
+ // container {
+ DockerModelRunnerContainer dmr = new DockerModelRunnerContainer("alpine/socat:1.7.4.3-r0")
+ // }
+ ) {
+ dmr.start();
+
+ Response modelResponse = RestAssured.get(dmr.getBaseEndpoint() + "/status").thenReturn();
+ assertThat(modelResponse.body().asString()).contains("The service is running");
+ }
+ }
+
+ @Test
+ public void pullsModelAndExposesInference() {
+ assumeThat(System.getenv("CI")).isNull();
+
+ String modelName = "ai/smollm2:360M-Q4_K_M";
+
+ try (
+ // pullModel {
+ DockerModelRunnerContainer dmr = new DockerModelRunnerContainer("alpine/socat:1.7.4.3-r0")
+ .withModel(modelName)
+ // }
+ ) {
+ dmr.start();
+
+ Response modelResponse = RestAssured.get(dmr.getBaseEndpoint() + "/models").thenReturn();
+ assertThat(modelResponse.body().jsonPath().getList("tags.flatten()")).contains(modelName);
+
+ Response openAiResponse = RestAssured.get(dmr.getOpenAIEndpoint() + "/v1/models").prettyPeek().thenReturn();
+ assertThat(openAiResponse.body().jsonPath().getList("data.id")).contains(modelName);
+ }
+ }
+}
diff --git a/core/src/test/java/org/testcontainers/images/OverrideImagePullPolicyTest.java b/core/src/test/java/org/testcontainers/images/OverrideImagePullPolicyTest.java
index 3b410fd5ab9..43c026ea294 100644
--- a/core/src/test/java/org/testcontainers/images/OverrideImagePullPolicyTest.java
+++ b/core/src/test/java/org/testcontainers/images/OverrideImagePullPolicyTest.java
@@ -51,4 +51,20 @@ public void simpleConfigurationTest() {
container.stop();
}
}
+
+ @Test
+ public void alwaysPullConfigurationTest() {
+ Mockito
+ .doReturn(AlwaysPullPolicy.class.getCanonicalName())
+ .when(TestcontainersConfiguration.getInstance())
+ .getImagePullPolicy();
+
+ try (DockerRegistryContainer registry = new DockerRegistryContainer()) {
+ registry.start();
+ GenericContainer> container = new GenericContainer<>(registry.createImage()).withExposedPorts(8080);
+ container.start();
+ assertThat(container.getImage().imagePullPolicy).isInstanceOf(AlwaysPullPolicy.class);
+ container.stop();
+ }
+ }
}
diff --git a/core/src/test/java/org/testcontainers/junit/DependenciesTest.java b/core/src/test/java/org/testcontainers/junit/DependenciesTest.java
index fd58dc4335f..effa9b06447 100644
--- a/core/src/test/java/org/testcontainers/junit/DependenciesTest.java
+++ b/core/src/test/java/org/testcontainers/junit/DependenciesTest.java
@@ -37,7 +37,7 @@ public void shouldWorkWithSimpleDependency() {
}
@Test
- public void shouldWorkWithMutlipleDependencies() {
+ public void shouldWorkWithMultipleDependencies() {
InvocationCountingStartable startable1 = new InvocationCountingStartable();
InvocationCountingStartable startable2 = new InvocationCountingStartable();
diff --git a/core/src/test/java/org/testcontainers/utility/LazyFutureTest.java b/core/src/test/java/org/testcontainers/utility/LazyFutureTest.java
index dc1a1b803df..822109ff9cd 100644
--- a/core/src/test/java/org/testcontainers/utility/LazyFutureTest.java
+++ b/core/src/test/java/org/testcontainers/utility/LazyFutureTest.java
@@ -21,7 +21,7 @@
public class LazyFutureTest {
@Test
- public void testLazyness() throws Exception {
+ public void testLaziness() throws Exception {
AtomicInteger counter = new AtomicInteger();
Future lazyFuture = new LazyFuture() {
diff --git a/docs/contributing.md b/docs/contributing.md
index e771347bde9..1cad43877cf 100644
--- a/docs/contributing.md
+++ b/docs/contributing.md
@@ -1,6 +1,6 @@
# Contributing
-* Star the project on [Github](https://github.com/testcontainers/testcontainers-java) and help spread the word :)
+* Star the project on [GitHub](https://github.com/testcontainers/testcontainers-java) and help spread the word :)
* Join our [Slack workspace](http://slack.testcontainers.org)
* [Start a discussion](https://github.com/testcontainers/testcontainers-java/discussions) if you have an idea, find a possible bug or have a general question.
* Contribute improvements or fixes using a [Pull Request](https://github.com/testcontainers/testcontainers-java/pulls). If you're going to contribute, thank you! Please just be sure to:
@@ -97,7 +97,7 @@ We will evaluate incubating modules periodically, and remove the label when appr
Since we generally get a lot of Dependabot PRs, we regularly combine them into single commits.
For this, we are using the [gh-combine-prs](https://github.com/rnorth/gh-combine-prs) extension for [GitHub CLI](https://cli.github.com/).
-The whole process is as follow:
+The whole process is as follows:
1. Check that all open Dependabot PRs did succeed their build. If they did not succeed, trigger a rerun if the cause were external factors or else document the reason if obvious.
2. Run the extension from an up-to-date local `main` branch: `gh combine-prs --query "author:app/dependabot"`
diff --git a/docs/contributing_docs.md b/docs/contributing_docs.md
index aa195cf4546..51cb5aae780 100644
--- a/docs/contributing_docs.md
+++ b/docs/contributing_docs.md
@@ -84,7 +84,7 @@ foo.doSomething();
Note that:
-* Any code included will be have its indentation reduced
+* Any code included will have its indentation reduced
* Every line in the source file will be searched for an instance of the token (e.g. `doFoo`). If more than one line
includes that token, then potentially more than one block could be targeted for inclusion. It is advisable to use a
specific, unique token to avoid unexpected behaviour.
diff --git a/docs/examples.md b/docs/examples.md
index 05638d00d43..c63fa739817 100644
--- a/docs/examples.md
+++ b/docs/examples.md
@@ -4,7 +4,6 @@ Examples of different use cases provided by Testcontainers can be found below:
- [Hazelcast](https://github.com/testcontainers/testcontainers-java/tree/main/examples/hazelcast)
- [Kafka Cluster with multiple brokers](https://github.com/testcontainers/testcontainers-java/tree/main/examples/kafka-cluster)
-- [Linked containers](https://github.com/testcontainers/testcontainers-java/tree/main/examples/linked-container)
- [Neo4j](https://github.com/testcontainers/testcontainers-java/tree/main/examples/neo4j-container)
- [Redis](https://github.com/testcontainers/testcontainers-java/tree/main/examples/redis-backed-cache)
- [Selenium](https://github.com/testcontainers/testcontainers-java/tree/main/examples/selenium-container)
diff --git a/docs/examples/junit4/generic/build.gradle b/docs/examples/junit4/generic/build.gradle
index 5a276cf6a7f..50b7d9caf32 100644
--- a/docs/examples/junit4/generic/build.gradle
+++ b/docs/examples/junit4/generic/build.gradle
@@ -7,6 +7,6 @@ dependencies {
testImplementation project(":mysql")
testRuntimeOnly 'mysql:mysql-connector-java:8.0.33'
- testImplementation "org.seleniumhq.selenium:selenium-api:4.25.0"
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation "org.seleniumhq.selenium:selenium-api:4.34.0"
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
diff --git a/docs/examples/junit4/generic/src/test/java/generic/support/TestSpecificImageNameSubstitutor.java b/docs/examples/junit4/generic/src/test/java/generic/support/TestSpecificImageNameSubstitutor.java
index 6955d49454b..0850c7c7a50 100644
--- a/docs/examples/junit4/generic/src/test/java/generic/support/TestSpecificImageNameSubstitutor.java
+++ b/docs/examples/junit4/generic/src/test/java/generic/support/TestSpecificImageNameSubstitutor.java
@@ -13,7 +13,7 @@ public class TestSpecificImageNameSubstitutor extends ImageNameSubstitutor {
@Override
public DockerImageName apply(final DockerImageName original) {
if (original.equals(DockerImageName.parse("registry.mycompany.com/mirror/mysql:8.0.36"))) {
- return DockerImageName.parse("mysql");
+ return DockerImageName.parse("mysql:8.0.36");
} else {
return original;
}
diff --git a/docs/examples/junit4/redis/build.gradle b/docs/examples/junit4/redis/build.gradle
index 2a0bfb73e7c..f2e492fa7df 100644
--- a/docs/examples/junit4/redis/build.gradle
+++ b/docs/examples/junit4/redis/build.gradle
@@ -5,5 +5,5 @@ dependencies {
testImplementation "junit:junit:4.13.2"
testImplementation project(":testcontainers")
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
diff --git a/docs/examples/junit5/redis/build.gradle b/docs/examples/junit5/redis/build.gradle
index 2fc21e92237..4419bc3216b 100644
--- a/docs/examples/junit5/redis/build.gradle
+++ b/docs/examples/junit5/redis/build.gradle
@@ -3,12 +3,13 @@ description = "Examples for docs"
dependencies {
api "io.lettuce:lettuce-core:6.4.0.RELEASE"
- testImplementation "org.junit.jupiter:junit-jupiter-api:5.11.0"
- testImplementation "org.junit.jupiter:junit-jupiter-params:5.10.3"
- testRuntimeOnly "org.junit.jupiter:junit-jupiter-engine:5.11.0"
+ testImplementation 'org.junit.jupiter:junit-jupiter-api:5.13.3'
+ testImplementation 'org.junit.jupiter:junit-jupiter-params:5.13.3'
+ testRuntimeOnly 'org.junit.jupiter:junit-jupiter-engine:5.13.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.10.3'
testImplementation project(":testcontainers")
testImplementation project(":junit-jupiter")
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
test {
diff --git a/docs/features/advanced_options.md b/docs/features/advanced_options.md
index 9dd86edccd5..e979da9ecdf 100644
--- a/docs/features/advanced_options.md
+++ b/docs/features/advanced_options.md
@@ -38,6 +38,14 @@ You can also configure Testcontainers to use your custom implementation by using
pull.policy=com.mycompany.testcontainers.ExampleImagePullPolicy
```
+You can also use the provided implementation to always pull images
+
+=== "`src/test/resources/testcontainers.properties`"
+ ```text
+ pull.policy=org.testcontainers.images.AlwaysPullPolicy
+ ```
+
+
Please see [the documentation on configuration mechanisms](./configuration.md) for more information.
## Customizing the container
diff --git a/docs/modules/azure.md b/docs/modules/azure.md
index e09634b09c4..e0e9419bf70 100644
--- a/docs/modules/azure.md
+++ b/docs/modules/azure.md
@@ -5,12 +5,13 @@ This module is INCUBATING. While it is ready for use and operational in the curr
Testcontainers module for the Microsoft Azure's [SDK](https://github.com/Azure/azure-sdk-for-java).
-Currently, the module supports `Azurite`, `Azure Event Hubs` and `CosmosDB` emulators. In order to use them, you should use the following classes:
+Currently, the module supports `Azurite`, `Azure Event Hubs`, `Azure Service Bus` and `CosmosDB` emulators. In order to use them, you should use the following classes:
Class | Container Image
-|-
AzuriteContainer | [mcr.microsoft.com/azure-storage/azurite](https://github.com/microsoft/containerregistry)
-AzureEventHubsContainer | [mcr.microsoft.com/azure-messaging/eventhubs-emulator](https://github.com/microsoft/containerregistry)
+EventHubsEmulatorContainer | [mcr.microsoft.com/azure-messaging/eventhubs-emulator](https://github.com/microsoft/containerregistry)
+ServiceBusEmulatorContainer | [mcr.microsoft.com/azure-messaging/servicebus-emulator](https://github.com/microsoft/containerregistry)
CosmosDBEmulatorContainer | [mcr.microsoft.com/cosmosdb/linux/azure-cosmos-emulator](https://github.com/microsoft/containerregistry)
## Usage example
@@ -20,7 +21,7 @@ CosmosDBEmulatorContainer | [mcr.microsoft.com/cosmosdb/linux/azure-cosmos-emula
Start Azurite Emulator during a test:
-[Starting a Azurite container](../../modules/azure/src/test/java/org/testcontainers/azure/AzuriteContainerTest.java) inside_block:emulatorContainer
+[Starting an Azurite container](../../modules/azure/src/test/java/org/testcontainers/azure/AzuriteContainerTest.java) inside_block:emulatorContainer
!!! note
@@ -30,11 +31,11 @@ If the tested application needs to use more than one set of credentials, the con
Please see some examples below.
-[Starting a Azurite Blob container with one account and two keys](../../modules/azure/src/test/java/org/testcontainers/azure/AzuriteContainerTest.java) inside_block:withTwoAccountKeys
+[Starting an Azurite Blob container with one account and two keys](../../modules/azure/src/test/java/org/testcontainers/azure/AzuriteContainerTest.java) inside_block:withTwoAccountKeys
-[Starting a Azurite Blob container with more accounts and keys](../../modules/azure/src/test/java/org/testcontainers/azure/AzuriteContainerTest.java) inside_block:withMoreAccounts
+[Starting an Azurite Blob container with more accounts and keys](../../modules/azure/src/test/java/org/testcontainers/azure/AzuriteContainerTest.java) inside_block:withMoreAccounts
#### Using with Blob
@@ -82,15 +83,15 @@ Build Azure Table client:
Start Azure Event Hubs Emulator during a test:
-[Setting up a network](../../modules/azure/src/test/java/org/testcontainers/azure/AzureEventHubsContainerTest.java) inside_block:network
+[Setting up a network](../../modules/azure/src/test/java/org/testcontainers/azure/EventHubsEmulatorContainerTest.java) inside_block:network
-[Starting an Azurite container as dependency](../../modules/azure/src/test/java/org/testcontainers/azure/AzureEventHubsContainerTest.java) inside_block:azuriteContainer
+[Starting an Azurite container as dependency](../../modules/azure/src/test/java/org/testcontainers/azure/EventHubsEmulatorContainerTest.java) inside_block:azuriteContainer
-[Starting an Azure Event Hubs Emulator container](../../modules/azure/src/test/java/org/testcontainers/azure/AzureEventHubsContainerTest.java) inside_block:emulatorContainer
+[Starting an Azure Event Hubs Emulator container](../../modules/azure/src/test/java/org/testcontainers/azure/EventHubsEmulatorContainerTest.java) inside_block:emulatorContainer
#### Using Azure Event Hubs clients
@@ -98,7 +99,39 @@ Start Azure Event Hubs Emulator during a test:
Configure the consumer and the producer clients:
-[Configuring the clients](../../modules/azure/src/test/java/org/testcontainers/azure/AzureEventHubsContainerTest.java) inside_block:createProducerAndConsumer
+[Configuring the clients](../../modules/azure/src/test/java/org/testcontainers/azure/EventHubsEmulatorContainerTest.java) inside_block:createProducerAndConsumer
+
+
+### Azure Service Bus Emulator
+
+
+[Configuring the Azure Service Bus Emulator container](../../modules/azure/src/test/resources/service-bus-config.json)
+
+
+Start Azure Service Bus Emulator during a test:
+
+
+[Setting up a network](../../modules/azure/src/test/java/org/testcontainers/azure/ServiceBusEmulatorContainerTest.java) inside_block:network
+
+
+
+[Starting a SQL Server container as dependency](../../modules/azure/src/test/java/org/testcontainers/azure/ServiceBusEmulatorContainerTest.java) inside_block:sqlContainer
+
+
+
+[Starting a Service Bus Emulator container](../../modules/azure/src/test/java/org/testcontainers/azure/ServiceBusEmulatorContainerTest.java) inside_block:emulatorContainer
+
+
+#### Using Azure Service Bus clients
+
+Configure the sender and the processor clients:
+
+
+[Configuring the sender client](../../modules/azure/src/test/java/org/testcontainers/azure/ServiceBusEmulatorContainerTest.java) inside_block:senderClient
+
+
+
+[Configuring the processor client](../../modules/azure/src/test/java/org/testcontainers/azure/ServiceBusEmulatorContainerTest.java) inside_block:processorClient
### CosmosDB
@@ -106,7 +139,7 @@ Configure the consumer and the producer clients:
Start Azure CosmosDB Emulator during a test:
-[Starting a Azure CosmosDB Emulator container](../../modules/azure/src/test/java/org/testcontainers/containers/CosmosDBEmulatorContainerTest.java) inside_block:emulatorContainer
+[Starting an Azure CosmosDB Emulator container](../../modules/azure/src/test/java/org/testcontainers/containers/CosmosDBEmulatorContainerTest.java) inside_block:emulatorContainer
Prepare KeyStore to use for SSL.
diff --git a/docs/modules/databases/jdbc.md b/docs/modules/databases/jdbc.md
index 90183228bd5..05229db4ec0 100644
--- a/docs/modules/databases/jdbc.md
+++ b/docs/modules/databases/jdbc.md
@@ -135,7 +135,7 @@ By default database container is being stopped as soon as last connection is clo
`jdbc:tc:mysql:8.0.36:///databasename?TC_DAEMON=true`
-With this parameter database container will keep running even when there're no open connections.
+With this parameter database container will keep running even when there's no open connections.
### Running container with tmpfs options
diff --git a/docs/modules/databases/r2dbc.md b/docs/modules/databases/r2dbc.md
index 055d4ad7b0f..5aded5431ca 100644
--- a/docs/modules/databases/r2dbc.md
+++ b/docs/modules/databases/r2dbc.md
@@ -35,7 +35,7 @@ So that the URL becomes:
#### Using ClickHouse
-`r2dbc:tc:clickhouse:///databasename?TC_IMAGE_TAG=21.9.2-alpine`
+`r2dbc:tc:clickhouse:///databasename?TC_IMAGE_TAG=21.11.11-alpine`
#### Using MySQL
diff --git a/docs/modules/databases/scylladb.md b/docs/modules/databases/scylladb.md
new file mode 100644
index 00000000000..186f5097d39
--- /dev/null
+++ b/docs/modules/databases/scylladb.md
@@ -0,0 +1,58 @@
+# ScyllaDB
+
+Testcontainers module for [ScyllaDB](https://hub.docker.com/r/scylladb/scylla)
+
+## ScyllaDB's usage examples
+
+You can start a ScyllaDB container instance from any Java application by using:
+
+
+[Create container](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:container
+
+
+
+[Custom config file](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:customConfiguration
+
+
+### Building CqlSession
+
+
+[Using CQL port](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:session
+
+
+
+[Using SSL](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:sslContext
+
+
+
+[Using Shard Awareness port](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:shardAwarenessSession
+
+
+### Alternator
+
+
+[Enabling Alternator](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:alternator
+
+
+
+[DynamoDbClient with Alternator](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:dynamodDbClient
+
+
+## Adding this module to your project dependencies
+
+Add the following dependency to your `pom.xml`/`build.gradle` file:
+
+=== "Gradle"
+ ```groovy
+ testImplementation "org.testcontainers:scylladb:{{latest_version}}"
+ ```
+
+=== "Maven"
+ ```xml
+
+ org.testcontainers
+ scylladb
+ {{latest_version}}
+ test
+
+ ```
diff --git a/docs/modules/docker_mcp_gateway.md b/docs/modules/docker_mcp_gateway.md
new file mode 100644
index 00000000000..af526bab3fe
--- /dev/null
+++ b/docs/modules/docker_mcp_gateway.md
@@ -0,0 +1,32 @@
+# Docker MCP Gateway
+
+Testcontainers module for [Docker MCP Gateway](https://hub.docker.com/r/docker/mcp-gateway).
+
+## DockerMcpGatewayContainer's usage examples
+
+You can start a Docker MCP Gateway container instance from any Java application by using:
+
+
+[Create a DockerMcpGatewayContainer](../../core/src/test/java/org/testcontainers/containers/DockerMcpGatewayContainerTest.java) inside_block:container
+
+
+## Adding this module to your project dependencies
+
+*Docker MCP Gateway support is part of the core Testcontainers library.*
+
+Add the following dependency to your `pom.xml`/`build.gradle` file:
+
+=== "Gradle"
+ ```groovy
+ testImplementation "org.testcontainers:testcontainers:{{latest_version}}"
+ ```
+=== "Maven"
+ ```xml
+
+ org.testcontainers
+ testcontainers
+ {{latest_version}}
+ test
+
+ ```
+
diff --git a/docs/modules/docker_model_runner.md b/docs/modules/docker_model_runner.md
new file mode 100644
index 00000000000..b610279e93b
--- /dev/null
+++ b/docs/modules/docker_model_runner.md
@@ -0,0 +1,41 @@
+# Docker Model Runner
+
+This module helps connect to [Docker Model Runner](https://docs.docker.com/desktop/features/model-runner/)
+provided by Docker Desktop 4.40.0.
+
+## DockerModelRunner's usage examples
+
+You can start a Docker Model Runner proxy container instance from any Java application by using:
+
+
+[Create a DockerModelRunnerContainer](../../core/src/test/java/org/testcontainers/containers/DockerModelRunnerContainerTest.java) inside_block:container
+
+
+### Pulling the model
+
+Pulling the model is as simple as:
+
+
+[Pull model](../../core/src/test/java/org/testcontainers/containers/DockerModelRunnerContainerTest.java) inside_block:pullModel
+
+
+## Adding this module to your project dependencies
+
+*Docker Model Runner support is part of the core Testcontainers library.*
+
+Add the following dependency to your `pom.xml`/`build.gradle` file:
+
+=== "Gradle"
+ ```groovy
+ testImplementation "org.testcontainers:testcontainers:{{latest_version}}"
+ ```
+=== "Maven"
+ ```xml
+
+ org.testcontainers
+ testcontainers
+ {{latest_version}}
+ test
+
+ ```
+
diff --git a/docs/modules/kafka.md b/docs/modules/kafka.md
index a284163248a..cd2a660c8e0 100644
--- a/docs/modules/kafka.md
+++ b/docs/modules/kafka.md
@@ -43,7 +43,7 @@ Now your tests or any other process running on your machine can get access to ru
Create a `ConfluentKafkaContainer` to use it in your tests:
-[Creating a ConlfuentKafkaContainer](../../modules/kafka/src/test/java/org/testcontainers/kafka/ConfluentKafkaContainerTest.java) inside_block:constructorWithVersion
+[Creating a ConfluentKafkaContainer](../../modules/kafka/src/test/java/org/testcontainers/kafka/ConfluentKafkaContainerTest.java) inside_block:constructorWithVersion
### Using org.testcontainers.kafka.KafkaContainer
@@ -85,7 +85,7 @@ There are scenarios where additional listeners are needed because the consumer/p
container in the same network or a different process where the port to connect differs from the default exposed port. E.g [Toxiproxy](../../modules/toxiproxy/).
-[Register additional listener](../../modules/kafka/src/test/java/org/testcontainers/containers/KafkaContainerTest.java) inside_block:registerListener
+[Register additional listener](../../modules/kafka/src/test/java/org/testcontainers/kafka/KafkaContainerTest.java) inside_block:registerListener
Container defined in the same network:
diff --git a/docs/modules/ldap.md b/docs/modules/ldap.md
new file mode 100644
index 00000000000..771975d6346
--- /dev/null
+++ b/docs/modules/ldap.md
@@ -0,0 +1,30 @@
+# LDAP
+
+Testcontainers module for [LLDAP](https://hub.docker.com/r/lldap/lldap).
+
+## LLdapContainer's usage examples
+
+You can start a LLDAP container instance from any Java application by using:
+
+
+[LLDAP container](../../modules/ldap/src/test/java/org/testcontainers/ldap/LLdapContainerTest.java) inside_block:container
+
+
+## Adding this module to your project dependencies
+
+Add the following dependency to your `pom.xml`/`build.gradle` file:
+
+=== "Gradle"
+ ```groovy
+ testImplementation "org.testcontainers:ldap:{{latest_version}}"
+ ```
+
+=== "Maven"
+ ```xml
+
+ org.testcontainers
+ ldap
+ {{latest_version}}
+ test
+
+ ```
diff --git a/docs/modules/pinecone.md b/docs/modules/pinecone.md
new file mode 100644
index 00000000000..b4eac925c51
--- /dev/null
+++ b/docs/modules/pinecone.md
@@ -0,0 +1,30 @@
+# Pinecone
+
+Testcontainers module for [Pinecone Local](https://github.com/orgs/pinecone-io/packages/container/package/pinecone-local).
+
+## PineconeLocalContainer's usage examples
+
+You can start a Pinecone container instance from any Java application by using:
+
+
+[Pinecone container](../../modules/pinecone/src/test/java/org/testcontainers/pinecone/PineconeLocalContainerTest.java) inside_block:container
+
+
+## Adding this module to your project dependencies
+
+Add the following dependency to your `pom.xml`/`build.gradle` file:
+
+=== "Gradle"
+```groovy
+testImplementation "org.testcontainers:pinecone:{{latest_version}}"
+```
+
+=== "Maven"
+```xml
+
+ org.testcontainers
+ pinecone
+ {{latest_version}}
+ test
+
+```
diff --git a/docs/modules/solr.md b/docs/modules/solr.md
index 44be46c1a1a..a332c7cbcbe 100644
--- a/docs/modules/solr.md
+++ b/docs/modules/solr.md
@@ -1,10 +1,6 @@
# Solr Container
-!!! note
- This module is INCUBATING. While it is ready for use and operational in the current version of Testcontainers, it is possible that it may receive breaking changes in the future. See [our contributing guidelines](/contributing/#incubating-modules) for more information on our incubating modules policy.
-
-
-This module helps running [solr](https://lucene.apache.org/solr/) using Testcontainers.
+This module helps running [solr](https://solr.apache.org/) using Testcontainers.
Note that it's based on the [official Docker image](https://hub.docker.com/_/solr/).
diff --git a/docs/modules/typesense.md b/docs/modules/typesense.md
index b9828e40f33..e7c332884c7 100644
--- a/docs/modules/typesense.md
+++ b/docs/modules/typesense.md
@@ -4,7 +4,7 @@ Testcontainers module for [Typesense](https://hub.docker.com/r/typesense/typesen
## TypesenseContainer's usage examples
-You can start an Typesense container instance from any Java application by using:
+You can start a Typesense container instance from any Java application by using:
[Typesense container](../../modules/typesense/src/test/java/org/testcontainers/typesense/TypesenseContainerTest.java) inside_block:container
diff --git a/docs/test_framework_integration/junit_5.md b/docs/test_framework_integration/junit_5.md
index 15597f90cdb..8f3c21372dd 100644
--- a/docs/test_framework_integration/junit_5.md
+++ b/docs/test_framework_integration/junit_5.md
@@ -62,7 +62,7 @@ Since this module has a dependency onto JUnit Jupiter and on Testcontainers core
has a dependency onto JUnit 4.x, projects using this module will end up with both, JUnit Jupiter
and JUnit 4.x in the test classpath.
-This extension has only be tested with sequential test execution. Using it with parallel test execution is unsupported and may have unintended side effects.
+This extension has only been tested with sequential test execution. Using it with parallel test execution is unsupported and may have unintended side effects.
## Adding Testcontainers JUnit 5 support to your project dependencies
diff --git a/examples/build.gradle b/examples/build.gradle
index 821f2b4a1fc..e80815cdae6 100644
--- a/examples/build.gradle
+++ b/examples/build.gradle
@@ -25,7 +25,7 @@ subprojects {
}
checkstyle {
- toolVersion = "10.12.4"
+ toolVersion = "10.23.0"
configFile = rootProject.file('../config/checkstyle/checkstyle.xml')
}
}
diff --git a/examples/cucumber/build.gradle b/examples/cucumber/build.gradle
index 2710e259e9b..530cf60616c 100644
--- a/examples/cucumber/build.gradle
+++ b/examples/cucumber/build.gradle
@@ -12,9 +12,16 @@ dependencies {
implementation 'org.seleniumhq.selenium:selenium-firefox-driver'
implementation 'org.seleniumhq.selenium:selenium-chrome-driver'
- testImplementation platform('io.cucumber:cucumber-bom:7.18.1')
+ testImplementation platform('org.junit:junit-bom:5.10.3')
+ testImplementation 'org.junit.platform:junit-platform-suite'
+ testImplementation platform('io.cucumber:cucumber-bom:7.23.0')
testImplementation 'io.cucumber:cucumber-java'
- testImplementation 'io.cucumber:cucumber-junit'
+ testImplementation 'io.cucumber:cucumber-junit-platform-engine'
testImplementation 'org.testcontainers:selenium'
testImplementation 'org.assertj:assertj-core:3.26.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.10.3'
+}
+
+test {
+ useJUnitPlatform()
}
diff --git a/examples/cucumber/src/test/java/org/testcontainers/examples/CucumberTest.java b/examples/cucumber/src/test/java/org/testcontainers/examples/CucumberTest.java
index 717c42351a8..5d6fe8c3f9c 100644
--- a/examples/cucumber/src/test/java/org/testcontainers/examples/CucumberTest.java
+++ b/examples/cucumber/src/test/java/org/testcontainers/examples/CucumberTest.java
@@ -1,9 +1,11 @@
package org.testcontainers.examples;
-import io.cucumber.junit.Cucumber;
-import io.cucumber.junit.CucumberOptions;
-import org.junit.runner.RunWith;
+import io.cucumber.junit.platform.engine.Constants;
+import org.junit.platform.suite.api.ConfigurationParameter;
+import org.junit.platform.suite.api.SelectPackages;
+import org.junit.platform.suite.api.Suite;
-@RunWith(Cucumber.class)
-@CucumberOptions(plugin = { "pretty" })
+@Suite
+@SelectPackages("org.testcontainers.examples")
+@ConfigurationParameter(key = Constants.PLUGIN_PROPERTY_NAME, value = "pretty")
public class CucumberTest {}
diff --git a/examples/gradle/wrapper/gradle-wrapper.jar b/examples/gradle/wrapper/gradle-wrapper.jar
index 2c3521197d7..1b33c55baab 100644
Binary files a/examples/gradle/wrapper/gradle-wrapper.jar and b/examples/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/examples/gradle/wrapper/gradle-wrapper.properties b/examples/gradle/wrapper/gradle-wrapper.properties
index 68e8816d71c..78cb6e16a49 100644
--- a/examples/gradle/wrapper/gradle-wrapper.properties
+++ b/examples/gradle/wrapper/gradle-wrapper.properties
@@ -1,7 +1,7 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
-distributionSha256Sum=d725d707bfabd4dfdc958c624003b3c80accc03f7037b5122c4b1d0ef15cecab
-distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-bin.zip
+distributionSha256Sum=bd71102213493060956ec229d946beee57158dbd89d0e62b91bca0fa2c5f3531
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.14.3-bin.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME
diff --git a/examples/gradlew b/examples/gradlew
index f5feea6d6b1..23d15a93670 100755
--- a/examples/gradlew
+++ b/examples/gradlew
@@ -86,8 +86,7 @@ done
# shellcheck disable=SC2034
APP_BASE_NAME=${0##*/}
# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036)
-APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s
-' "$PWD" ) || exit
+APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD=maximum
@@ -115,7 +114,7 @@ case "$( uname )" in #(
NONSTOP* ) nonstop=true ;;
esac
-CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+CLASSPATH="\\\"\\\""
# Determine the Java command to use to start the JVM.
@@ -206,7 +205,7 @@ fi
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
# Collect all arguments for the java command:
-# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
+# * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
# and any embedded shellness will be escaped.
# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be
# treated as '${Hostname}' itself on the command line.
@@ -214,7 +213,7 @@ DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
set -- \
"-Dorg.gradle.appname=$APP_BASE_NAME" \
-classpath "$CLASSPATH" \
- org.gradle.wrapper.GradleWrapperMain \
+ -jar "$APP_HOME/gradle/wrapper/gradle-wrapper.jar" \
"$@"
# Stop when "xargs" is not available.
diff --git a/examples/gradlew.bat b/examples/gradlew.bat
index 9b42019c791..5eed7ee8452 100644
--- a/examples/gradlew.bat
+++ b/examples/gradlew.bat
@@ -70,11 +70,11 @@ goto fail
:execute
@rem Setup the command line
-set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+set CLASSPATH=
@rem Execute Gradle
-"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" -jar "%APP_HOME%\gradle\wrapper\gradle-wrapper.jar" %*
:end
@rem End local scope for the variables with windows NT shell
diff --git a/examples/hazelcast/build.gradle b/examples/hazelcast/build.gradle
index 5b3789480ad..0a33949201e 100644
--- a/examples/hazelcast/build.gradle
+++ b/examples/hazelcast/build.gradle
@@ -11,7 +11,8 @@ dependencies {
testImplementation 'com.hazelcast:hazelcast:5.3.8'
testImplementation 'ch.qos.logback:logback-classic:1.3.14'
testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
}
test {
diff --git a/examples/immudb/build.gradle b/examples/immudb/build.gradle
index c99dceac836..ee39cf51865 100644
--- a/examples/immudb/build.gradle
+++ b/examples/immudb/build.gradle
@@ -13,7 +13,9 @@ dependencies {
testImplementation 'org.assertj:assertj-core:3.26.3'
testImplementation 'com.google.guava:guava:23.0'
testImplementation 'ch.qos.logback:logback-classic:1.3.14'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
}
test {
diff --git a/examples/kafka-cluster/build.gradle b/examples/kafka-cluster/build.gradle
index 623fcd66175..2efabca33a3 100644
--- a/examples/kafka-cluster/build.gradle
+++ b/examples/kafka-cluster/build.gradle
@@ -10,12 +10,13 @@ dependencies {
testCompileOnly "org.projectlombok:lombok:1.18.34"
testAnnotationProcessor "org.projectlombok:lombok:1.18.34"
testImplementation 'org.testcontainers:kafka'
- testImplementation 'org.apache.kafka:kafka-clients:3.8.0'
+ testImplementation 'org.apache.kafka:kafka-clients:4.0.0'
testImplementation 'org.assertj:assertj-core:3.26.3'
testImplementation 'com.google.guava:guava:23.0'
testImplementation 'ch.qos.logback:logback-classic:1.3.14'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
testImplementation 'org.awaitility:awaitility:4.2.2'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
}
test {
diff --git a/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ApacheKafkaContainerCluster.java b/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ApacheKafkaContainerCluster.java
new file mode 100644
index 00000000000..a8616d2d4bb
--- /dev/null
+++ b/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ApacheKafkaContainerCluster.java
@@ -0,0 +1,104 @@
+package com.example.kafkacluster;
+
+import org.apache.kafka.common.Uuid;
+import org.awaitility.Awaitility;
+import org.testcontainers.containers.Container;
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.containers.Network;
+import org.testcontainers.kafka.KafkaContainer;
+import org.testcontainers.lifecycle.Startable;
+import org.testcontainers.utility.DockerImageName;
+
+import java.time.Duration;
+import java.util.Collection;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class ApacheKafkaContainerCluster implements Startable {
+
+ private final int brokersNum;
+
+ private final Network network;
+
+ private final Collection brokers;
+
+ public ApacheKafkaContainerCluster(String version, int brokersNum, int internalTopicsRf) {
+ if (brokersNum < 0) {
+ throw new IllegalArgumentException("brokersNum '" + brokersNum + "' must be greater than 0");
+ }
+ if (internalTopicsRf < 0 || internalTopicsRf > brokersNum) {
+ throw new IllegalArgumentException(
+ "internalTopicsRf '" + internalTopicsRf + "' must be less than brokersNum and greater than 0"
+ );
+ }
+
+ this.brokersNum = brokersNum;
+ this.network = Network.newNetwork();
+
+ String controllerQuorumVoters = IntStream
+ .range(0, brokersNum)
+ .mapToObj(brokerNum -> String.format("%d@broker-%d:9094", brokerNum, brokerNum))
+ .collect(Collectors.joining(","));
+
+ String clusterId = Uuid.randomUuid().toString();
+
+ this.brokers =
+ IntStream
+ .range(0, brokersNum)
+ .mapToObj(brokerNum -> {
+ return new KafkaContainer(DockerImageName.parse("apache/kafka").withTag(version))
+ .withNetwork(this.network)
+ .withNetworkAliases("broker-" + brokerNum)
+ .withEnv("CLUSTER_ID", clusterId)
+ .withEnv("KAFKA_BROKER_ID", brokerNum + "")
+ .withEnv("KAFKA_NODE_ID", brokerNum + "")
+ .withEnv("KAFKA_CONTROLLER_QUORUM_VOTERS", controllerQuorumVoters)
+ .withEnv("KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", internalTopicsRf + "")
+ .withEnv("KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS", "0")
+ .withEnv("KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS", internalTopicsRf + "")
+ .withEnv("KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", internalTopicsRf + "")
+ .withEnv("KAFKA_TRANSACTION_STATE_LOG_MIN_ISR", internalTopicsRf + "")
+ .withStartupTimeout(Duration.ofMinutes(1));
+ })
+ .collect(Collectors.toList());
+ }
+
+ public Collection getBrokers() {
+ return this.brokers;
+ }
+
+ public String getBootstrapServers() {
+ return brokers.stream().map(KafkaContainer::getBootstrapServers).collect(Collectors.joining(","));
+ }
+
+ @Override
+ public void start() {
+ // Needs to start all the brokers at once
+ brokers.parallelStream().forEach(GenericContainer::start);
+
+ Awaitility
+ .await()
+ .atMost(Duration.ofSeconds(30))
+ .untilAsserted(() -> {
+ Container.ExecResult result =
+ this.brokers.stream()
+ .findFirst()
+ .get()
+ .execInContainer(
+ "sh",
+ "-c",
+ "/opt/kafka/bin/kafka-log-dirs.sh --bootstrap-server localhost:9093 --describe | grep -o '\"broker\"' | wc -l"
+ );
+ String brokers = result.getStdout().replace("\n", "");
+
+ assertThat(brokers).asInt().isEqualTo(this.brokersNum);
+ });
+ }
+
+ @Override
+ public void stop() {
+ this.brokers.stream().parallel().forEach(GenericContainer::stop);
+ }
+}
diff --git a/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ApacheKafkaContainerClusterTest.java b/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ApacheKafkaContainerClusterTest.java
new file mode 100644
index 00000000000..38ac274706b
--- /dev/null
+++ b/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ApacheKafkaContainerClusterTest.java
@@ -0,0 +1,94 @@
+package com.example.kafkacluster;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.kafka.clients.admin.AdminClient;
+import org.apache.kafka.clients.admin.AdminClientConfig;
+import org.apache.kafka.clients.admin.NewTopic;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.awaitility.Awaitility;
+import org.junit.jupiter.api.Test;
+
+import java.time.Duration;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.tuple;
+
+class ApacheKafkaContainerClusterTest {
+
+ @Test
+ void testKafkaContainerCluster() throws Exception {
+ try (ApacheKafkaContainerCluster cluster = new ApacheKafkaContainerCluster("3.8.0", 3, 2)) {
+ cluster.start();
+ String bootstrapServers = cluster.getBootstrapServers();
+
+ assertThat(cluster.getBrokers()).hasSize(3);
+
+ testKafkaFunctionality(bootstrapServers, 3, 2);
+ }
+ }
+
+ protected void testKafkaFunctionality(String bootstrapServers, int partitions, int rf) throws Exception {
+ try (
+ AdminClient adminClient = AdminClient.create(
+ ImmutableMap.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers)
+ );
+ KafkaProducer producer = new KafkaProducer<>(
+ ImmutableMap.of(
+ ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
+ bootstrapServers,
+ ProducerConfig.CLIENT_ID_CONFIG,
+ UUID.randomUUID().toString()
+ ),
+ new StringSerializer(),
+ new StringSerializer()
+ );
+ KafkaConsumer consumer = new KafkaConsumer<>(
+ ImmutableMap.of(
+ ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
+ bootstrapServers,
+ ConsumerConfig.GROUP_ID_CONFIG,
+ "tc-" + UUID.randomUUID(),
+ ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
+ "earliest"
+ ),
+ new StringDeserializer(),
+ new StringDeserializer()
+ );
+ ) {
+ String topicName = "messages";
+
+ Collection topics = Collections.singletonList(new NewTopic(topicName, partitions, (short) rf));
+ adminClient.createTopics(topics).all().get(30, TimeUnit.SECONDS);
+
+ consumer.subscribe(Collections.singletonList(topicName));
+
+ producer.send(new ProducerRecord<>(topicName, "testcontainers", "rulezzz")).get();
+
+ Awaitility
+ .await()
+ .atMost(Duration.ofSeconds(10))
+ .untilAsserted(() -> {
+ ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
+
+ assertThat(records)
+ .hasSize(1)
+ .extracting(ConsumerRecord::topic, ConsumerRecord::key, ConsumerRecord::value)
+ .containsExactly(tuple(topicName, "testcontainers", "rulezzz"));
+ });
+
+ consumer.unsubscribe();
+ }
+ }
+}
diff --git a/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ConfluentKafkaContainerCluster.java b/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ConfluentKafkaContainerCluster.java
new file mode 100644
index 00000000000..222050c76c0
--- /dev/null
+++ b/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ConfluentKafkaContainerCluster.java
@@ -0,0 +1,105 @@
+package com.example.kafkacluster;
+
+import org.apache.kafka.common.Uuid;
+import org.awaitility.Awaitility;
+import org.testcontainers.containers.Container;
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.containers.Network;
+import org.testcontainers.kafka.ConfluentKafkaContainer;
+import org.testcontainers.lifecycle.Startable;
+import org.testcontainers.utility.DockerImageName;
+
+import java.time.Duration;
+import java.util.Collection;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class ConfluentKafkaContainerCluster implements Startable {
+
+ private final int brokersNum;
+
+ private final Network network;
+
+ private final Collection brokers;
+
+ public ConfluentKafkaContainerCluster(String confluentPlatformVersion, int brokersNum, int internalTopicsRf) {
+ if (brokersNum < 0) {
+ throw new IllegalArgumentException("brokersNum '" + brokersNum + "' must be greater than 0");
+ }
+ if (internalTopicsRf < 0 || internalTopicsRf > brokersNum) {
+ throw new IllegalArgumentException(
+ "internalTopicsRf '" + internalTopicsRf + "' must be less than brokersNum and greater than 0"
+ );
+ }
+
+ this.brokersNum = brokersNum;
+ this.network = Network.newNetwork();
+
+ String controllerQuorumVoters = IntStream
+ .range(0, brokersNum)
+ .mapToObj(brokerNum -> String.format("%d@broker-%d:9094", brokerNum, brokerNum))
+ .collect(Collectors.joining(","));
+
+ String clusterId = Uuid.randomUuid().toString();
+
+ this.brokers =
+ IntStream
+ .range(0, brokersNum)
+ .mapToObj(brokerNum -> {
+ return new ConfluentKafkaContainer(
+ DockerImageName.parse("confluentinc/cp-kafka").withTag(confluentPlatformVersion)
+ )
+ .withNetwork(this.network)
+ .withNetworkAliases("broker-" + brokerNum)
+ .withEnv("CLUSTER_ID", clusterId)
+ .withEnv("KAFKA_BROKER_ID", brokerNum + "")
+ .withEnv("KAFKA_NODE_ID", brokerNum + "")
+ .withEnv("KAFKA_CONTROLLER_QUORUM_VOTERS", controllerQuorumVoters)
+ .withEnv("KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", internalTopicsRf + "")
+ .withEnv("KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS", internalTopicsRf + "")
+ .withEnv("KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", internalTopicsRf + "")
+ .withEnv("KAFKA_TRANSACTION_STATE_LOG_MIN_ISR", internalTopicsRf + "")
+ .withStartupTimeout(Duration.ofMinutes(1));
+ })
+ .collect(Collectors.toList());
+ }
+
+ public Collection getBrokers() {
+ return this.brokers;
+ }
+
+ public String getBootstrapServers() {
+ return brokers.stream().map(ConfluentKafkaContainer::getBootstrapServers).collect(Collectors.joining(","));
+ }
+
+ @Override
+ public void start() {
+ // Needs to start all the brokers at once
+ brokers.parallelStream().forEach(GenericContainer::start);
+
+ Awaitility
+ .await()
+ .atMost(Duration.ofSeconds(30))
+ .untilAsserted(() -> {
+ Container.ExecResult result =
+ this.brokers.stream()
+ .findFirst()
+ .get()
+ .execInContainer(
+ "sh",
+ "-c",
+ "kafka-metadata-shell --snapshot /var/lib/kafka/data/__cluster_metadata-0/00000000000000000000.log ls /brokers | wc -l"
+ );
+ String brokers = result.getStdout().replace("\n", "");
+
+ assertThat(brokers).asInt().isEqualTo(this.brokersNum);
+ });
+ }
+
+ @Override
+ public void stop() {
+ this.brokers.stream().parallel().forEach(GenericContainer::stop);
+ }
+}
diff --git a/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ConfluentKafkaContainerClusterTest.java b/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ConfluentKafkaContainerClusterTest.java
new file mode 100644
index 00000000000..3bb38cb7152
--- /dev/null
+++ b/examples/kafka-cluster/src/test/java/com/example/kafkacluster/ConfluentKafkaContainerClusterTest.java
@@ -0,0 +1,94 @@
+package com.example.kafkacluster;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.kafka.clients.admin.AdminClient;
+import org.apache.kafka.clients.admin.AdminClientConfig;
+import org.apache.kafka.clients.admin.NewTopic;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.awaitility.Awaitility;
+import org.junit.jupiter.api.Test;
+
+import java.time.Duration;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.tuple;
+
+class ConfluentKafkaContainerClusterTest {
+
+ @Test
+ void testKafkaContainerCluster() throws Exception {
+ try (ConfluentKafkaContainerCluster cluster = new ConfluentKafkaContainerCluster("7.4.0", 3, 2)) {
+ cluster.start();
+ String bootstrapServers = cluster.getBootstrapServers();
+
+ assertThat(cluster.getBrokers()).hasSize(3);
+
+ testKafkaFunctionality(bootstrapServers, 3, 2);
+ }
+ }
+
+ protected void testKafkaFunctionality(String bootstrapServers, int partitions, int rf) throws Exception {
+ try (
+ AdminClient adminClient = AdminClient.create(
+ ImmutableMap.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers)
+ );
+ KafkaProducer producer = new KafkaProducer<>(
+ ImmutableMap.of(
+ ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
+ bootstrapServers,
+ ProducerConfig.CLIENT_ID_CONFIG,
+ UUID.randomUUID().toString()
+ ),
+ new StringSerializer(),
+ new StringSerializer()
+ );
+ KafkaConsumer consumer = new KafkaConsumer<>(
+ ImmutableMap.of(
+ ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
+ bootstrapServers,
+ ConsumerConfig.GROUP_ID_CONFIG,
+ "tc-" + UUID.randomUUID(),
+ ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
+ "earliest"
+ ),
+ new StringDeserializer(),
+ new StringDeserializer()
+ );
+ ) {
+ String topicName = "messages";
+
+ Collection topics = Collections.singletonList(new NewTopic(topicName, partitions, (short) rf));
+ adminClient.createTopics(topics).all().get(30, TimeUnit.SECONDS);
+
+ consumer.subscribe(Collections.singletonList(topicName));
+
+ producer.send(new ProducerRecord<>(topicName, "testcontainers", "rulezzz")).get();
+
+ Awaitility
+ .await()
+ .atMost(Duration.ofSeconds(10))
+ .untilAsserted(() -> {
+ ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
+
+ assertThat(records)
+ .hasSize(1)
+ .extracting(ConsumerRecord::topic, ConsumerRecord::key, ConsumerRecord::value)
+ .containsExactly(tuple(topicName, "testcontainers", "rulezzz"));
+ });
+
+ consumer.unsubscribe();
+ }
+ }
+}
diff --git a/examples/linked-container/build.gradle b/examples/linked-container/build.gradle
deleted file mode 100644
index 028e457fc5d..00000000000
--- a/examples/linked-container/build.gradle
+++ /dev/null
@@ -1,17 +0,0 @@
-plugins {
- id 'java'
-}
-
-repositories {
- mavenCentral()
-}
-dependencies {
- compileOnly 'org.slf4j:slf4j-api:1.7.36'
- implementation 'com.squareup.okhttp3:okhttp:4.12.0'
- implementation 'org.json:json:20240303'
- testRuntimeOnly 'org.postgresql:postgresql:42.7.4'
- testImplementation 'ch.qos.logback:logback-classic:1.3.14'
- testImplementation 'org.testcontainers:postgresql'
- testImplementation 'org.assertj:assertj-core:3.26.3'
-}
-
diff --git a/examples/linked-container/src/main/java/com/example/linkedcontainer/RedmineClient.java b/examples/linked-container/src/main/java/com/example/linkedcontainer/RedmineClient.java
deleted file mode 100644
index c95ec133e2f..00000000000
--- a/examples/linked-container/src/main/java/com/example/linkedcontainer/RedmineClient.java
+++ /dev/null
@@ -1,31 +0,0 @@
-package com.example.linkedcontainer;
-
-import okhttp3.OkHttpClient;
-import okhttp3.Request;
-import okhttp3.Response;
-import org.json.JSONObject;
-
-import java.io.IOException;
-
-/**
- * A crude, partially implemented Redmine client.
- */
-public class RedmineClient {
-
- private String url;
-
- private OkHttpClient client;
-
- public RedmineClient(String url) {
- this.url = url;
- client = new OkHttpClient();
- }
-
- public int getIssueCount() throws IOException {
- Request request = new Request.Builder().url(url + "/issues.json").build();
-
- Response response = client.newCall(request).execute();
- JSONObject jsonObject = new JSONObject(response.body().string());
- return jsonObject.getInt("total_count");
- }
-}
diff --git a/examples/linked-container/src/test/java/com/example/linkedcontainer/LinkedContainerTestImages.java b/examples/linked-container/src/test/java/com/example/linkedcontainer/LinkedContainerTestImages.java
deleted file mode 100644
index af8a823c1de..00000000000
--- a/examples/linked-container/src/test/java/com/example/linkedcontainer/LinkedContainerTestImages.java
+++ /dev/null
@@ -1,9 +0,0 @@
-package com.example.linkedcontainer;
-
-import org.testcontainers.utility.DockerImageName;
-
-public interface LinkedContainerTestImages {
- DockerImageName POSTGRES_TEST_IMAGE = DockerImageName.parse("postgres:9.6.12");
-
- DockerImageName REDMINE_TEST_IMAGE = DockerImageName.parse("redmine:3.3.2");
-}
diff --git a/examples/linked-container/src/test/java/com/example/linkedcontainer/RedmineClientTest.java b/examples/linked-container/src/test/java/com/example/linkedcontainer/RedmineClientTest.java
deleted file mode 100644
index 721c00835c9..00000000000
--- a/examples/linked-container/src/test/java/com/example/linkedcontainer/RedmineClientTest.java
+++ /dev/null
@@ -1,39 +0,0 @@
-package com.example.linkedcontainer;
-
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.RuleChain;
-import org.testcontainers.containers.PostgreSQLContainer;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-/**
- * Tests for RedmineClient.
- */
-public class RedmineClientTest {
-
- private static final String POSTGRES_USERNAME = "redmine";
-
- private static final String POSTGRES_PASSWORD = "secret";
-
- private PostgreSQLContainer> postgreSQLContainer = new PostgreSQLContainer<>(
- LinkedContainerTestImages.POSTGRES_TEST_IMAGE
- )
- .withUsername(POSTGRES_USERNAME)
- .withPassword(POSTGRES_PASSWORD);
-
- private RedmineContainer redmineContainer = new RedmineContainer(LinkedContainerTestImages.REDMINE_TEST_IMAGE)
- .withLinkToContainer(postgreSQLContainer, "postgres")
- .withEnv("POSTGRES_ENV_POSTGRES_USER", POSTGRES_USERNAME)
- .withEnv("POSTGRES_ENV_POSTGRES_PASSWORD", POSTGRES_PASSWORD);
-
- @Rule
- public RuleChain chain = RuleChain.outerRule(postgreSQLContainer).around(redmineContainer);
-
- @Test
- public void canGetIssueCount() throws Exception {
- RedmineClient redmineClient = new RedmineClient(redmineContainer.getRedmineUrl());
-
- assertThat(redmineClient.getIssueCount()).as("The issue count can be retrieved.").isZero();
- }
-}
diff --git a/examples/linked-container/src/test/java/com/example/linkedcontainer/RedmineContainer.java b/examples/linked-container/src/test/java/com/example/linkedcontainer/RedmineContainer.java
deleted file mode 100644
index 0f0bea36c1a..00000000000
--- a/examples/linked-container/src/test/java/com/example/linkedcontainer/RedmineContainer.java
+++ /dev/null
@@ -1,33 +0,0 @@
-package com.example.linkedcontainer;
-
-import org.testcontainers.containers.GenericContainer;
-import org.testcontainers.containers.traits.LinkableContainer;
-import org.testcontainers.containers.wait.strategy.Wait;
-import org.testcontainers.utility.DockerImageName;
-
-/**
- * A Redmine container.
- */
-public class RedmineContainer extends GenericContainer {
-
- private static final int REDMINE_PORT = 3000;
-
- public RedmineContainer(DockerImageName dockerImageName) {
- super(dockerImageName);
- }
-
- @Override
- protected void configure() {
- addExposedPort(REDMINE_PORT);
- waitingFor(Wait.forHttp("/"));
- }
-
- public RedmineContainer withLinkToContainer(LinkableContainer otherContainer, String alias) {
- addLink(otherContainer, alias);
- return this;
- }
-
- public String getRedmineUrl() {
- return String.format("http://%s:%d", this.getHost(), this.getMappedPort(REDMINE_PORT));
- }
-}
diff --git a/examples/nats/build.gradle b/examples/nats/build.gradle
index 5814d592c20..21b0baa3a3a 100644
--- a/examples/nats/build.gradle
+++ b/examples/nats/build.gradle
@@ -12,7 +12,8 @@ dependencies {
testImplementation 'io.nats:jnats:2.20.2'
testImplementation 'ch.qos.logback:logback-classic:1.3.14'
testImplementation 'org.apache.httpcomponents:httpclient:4.5.14'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
}
test {
diff --git a/examples/neo4j-container/build.gradle b/examples/neo4j-container/build.gradle
index 186a910be2f..7f85f5162a1 100644
--- a/examples/neo4j-container/build.gradle
+++ b/examples/neo4j-container/build.gradle
@@ -11,5 +11,6 @@ dependencies {
testImplementation 'org.neo4j.driver:neo4j-java-driver:4.4.18'
testImplementation 'org.testcontainers:neo4j'
testImplementation 'org.testcontainers:junit-jupiter'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
}
diff --git a/examples/neo4j-container/src/test/java/org/testcontainers/containers/Neo4jExampleTest.java b/examples/neo4j-container/src/test/java/org/testcontainers/containers/Neo4jExampleTest.java
index 38c22a0ff45..abf42a115de 100644
--- a/examples/neo4j-container/src/test/java/org/testcontainers/containers/Neo4jExampleTest.java
+++ b/examples/neo4j-container/src/test/java/org/testcontainers/containers/Neo4jExampleTest.java
@@ -27,7 +27,7 @@
class Neo4jExampleTest {
@Container
- private static Neo4jContainer> neo4jContainer = new Neo4jContainer<>(DockerImageName.parse("neo4j:4.4"))
+ private static Neo4jContainer neo4jContainer = new Neo4jContainer(DockerImageName.parse("neo4j:4.4"))
.withoutAuthentication(); // Disable password
@Test
diff --git a/examples/ollama-hugging-face/build.gradle b/examples/ollama-hugging-face/build.gradle
index 77fe23ea1c2..4bfe199e62b 100644
--- a/examples/ollama-hugging-face/build.gradle
+++ b/examples/ollama-hugging-face/build.gradle
@@ -10,8 +10,9 @@ dependencies {
testImplementation 'org.testcontainers:ollama'
testImplementation 'org.assertj:assertj-core:3.26.3'
testImplementation 'ch.qos.logback:logback-classic:1.3.14'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
testImplementation 'io.rest-assured:rest-assured:5.5.0'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
}
test {
diff --git a/examples/redis-backed-cache-testng/build.gradle b/examples/redis-backed-cache-testng/build.gradle
index d28b2a0cc66..3fda7078fa4 100644
--- a/examples/redis-backed-cache-testng/build.gradle
+++ b/examples/redis-backed-cache-testng/build.gradle
@@ -8,7 +8,7 @@ repositories {
dependencies {
compileOnly 'org.slf4j:slf4j-api:1.7.36'
- implementation 'redis.clients:jedis:5.1.5'
+ implementation 'redis.clients:jedis:6.0.0'
implementation 'com.google.code.gson:gson:2.11.0'
implementation 'com.google.guava:guava:23.0'
testImplementation 'org.testcontainers:testcontainers'
diff --git a/examples/redis-backed-cache/build.gradle b/examples/redis-backed-cache/build.gradle
index 782416a239b..26dd05f170d 100644
--- a/examples/redis-backed-cache/build.gradle
+++ b/examples/redis-backed-cache/build.gradle
@@ -8,14 +8,15 @@ repositories {
dependencies {
compileOnly 'org.slf4j:slf4j-api:1.7.36'
- implementation 'redis.clients:jedis:5.1.5'
+ implementation 'redis.clients:jedis:6.0.0'
implementation 'com.google.code.gson:gson:2.11.0'
implementation 'com.google.guava:guava:23.0'
testImplementation 'org.testcontainers:testcontainers'
testImplementation 'org.testcontainers:junit-jupiter'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
testImplementation 'ch.qos.logback:logback-classic:1.3.14'
testImplementation 'org.assertj:assertj-core:3.26.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
}
test {
diff --git a/examples/selenium-container/build.gradle b/examples/selenium-container/build.gradle
index b7528880ca3..823a7ccfc41 100644
--- a/examples/selenium-container/build.gradle
+++ b/examples/selenium-container/build.gradle
@@ -17,7 +17,7 @@ dependencies {
testImplementation 'org.testcontainers:selenium'
testImplementation 'org.testcontainers:junit-jupiter'
testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testRuntimeOnly "org.junit.platform:junit-platform-launcher:1.8.2"
}
test {
diff --git a/examples/settings.gradle b/examples/settings.gradle
index 8aed2430fb5..8d144867bbd 100644
--- a/examples/settings.gradle
+++ b/examples/settings.gradle
@@ -20,7 +20,6 @@ includeBuild '..'
// explicit include to allow Dependabot to autodiscover subprojects
include 'kafka-cluster'
-include 'linked-container'
include 'neo4j-container'
include 'redis-backed-cache'
include 'redis-backed-cache-testng'
diff --git a/examples/sftp/build.gradle b/examples/sftp/build.gradle
index d8ed52d255f..fbb5f6cbe40 100644
--- a/examples/sftp/build.gradle
+++ b/examples/sftp/build.gradle
@@ -11,7 +11,8 @@ dependencies {
testImplementation 'org.testcontainers:testcontainers'
testImplementation 'org.assertj:assertj-core:3.26.3'
testImplementation 'ch.qos.logback:logback-classic:1.3.14'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
}
test {
diff --git a/examples/sftp/src/test/java/org/example/SftpContainerTest.java b/examples/sftp/src/test/java/org/example/SftpContainerTest.java
index e54b5b72036..3a6593ea736 100644
--- a/examples/sftp/src/test/java/org/example/SftpContainerTest.java
+++ b/examples/sftp/src/test/java/org/example/SftpContainerTest.java
@@ -1,6 +1,7 @@
package org.example;
import com.jcraft.jsch.ChannelSftp;
+import com.jcraft.jsch.HostKey;
import com.jcraft.jsch.JSch;
import com.jcraft.jsch.Session;
import org.junit.jupiter.api.Test;
@@ -10,6 +11,7 @@
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
+import java.util.Base64;
import java.util.stream.Collectors;
import static org.assertj.core.api.Assertions.assertThat;
@@ -49,4 +51,55 @@ void test() throws Exception {
.noneMatch(item -> item.toString().contains("testcontainers/file.txt"));
}
}
+
+ @Test
+ void testHostKeyCheck() throws Exception {
+ try (
+ GenericContainer> sftp = new GenericContainer<>("atmoz/sftp:alpine-3.7")
+ .withCopyFileToContainer(
+ MountableFile.forClasspathResource("testcontainers/", 0777),
+ "/home/foo/upload/testcontainers"
+ )
+ .withCopyFileToContainer(
+ MountableFile.forClasspathResource("./ssh_host_rsa_key", 0400),
+ "/etc/ssh/ssh_host_rsa_key"
+ )
+ .withExposedPorts(22)
+ .withCommand("foo:pass:::upload")
+ ) {
+ sftp.start();
+ JSch jsch = new JSch();
+ Session jschSession = jsch.getSession("foo", sftp.getHost(), sftp.getMappedPort(22));
+ jschSession.setPassword("pass");
+ // hostKeyString is string starting with AAAA from file known_hosts or ssh_host_*_key.pub
+ // generate the files with:
+ // ssh-keygen -t rsa -b 3072 -f ssh_host_rsa_key < /dev/null
+ String hostKeyString =
+ "AAAAB3NzaC1yc2EAAAADAQABAAABgQCXMxVRzmFWxfrRB9XiZ/3HNM+xkYYE+IMGuOZD" +
+ "04M2ezU25XjT6cPajzpFmzTxR2qEpRCKHeVnSG5nT6UXQp7760brTN7m5sDasbMnHgYh" +
+ "fC/3of2k6qTR9X/JHRpgwzq5+6FtEe41w1H1dXoNIr4YTKnLijSp8MKqBtPPNUpzEVb9" +
+ "5YKZGdCDoCbbYOyS/Dc8azUDo0mqM542J3nA2Sq9HCP0BAv43hrTAtCZodkB5wo18exb" +
+ "fPKsjGtA3de2npybFoSRbavZmT8L/b2iHZX6FRaqLsbYGKtszCWu5OU7WBX5g5QVlLfO" +
+ "nGQ+LsF6d6pX5LlMwEU14uu4gNPvZFOaZXtHNHZqnBcjd/sMaw5N/atFsPgtQ0vYnrEA" +
+ "D6oDjj0uXMsnmgUWTZBi3q2GBWWPqhE+0ASb2xBQGa+tWWTVYbuuYlA7hUX0URK8FcLw" +
+ "4UOYJjscDjnjlvQkghd2esP5NxV1NXkG2XYNHnf1E/tH4+AHJzy+qOQom7ehda96FZ8=";
+ HostKey hostKey = new HostKey(sftp.getHost(), Base64.getDecoder().decode(hostKeyString));
+ jschSession.getHostKeyRepository().add(hostKey, null);
+ jschSession.connect();
+ ChannelSftp channel = (ChannelSftp) jschSession.openChannel("sftp");
+ channel.connect();
+ assertThat(channel.ls("/upload/testcontainers")).anyMatch(item -> item.toString().contains("file.txt"));
+ assertThat(
+ new BufferedReader(
+ new InputStreamReader(channel.get("/upload/testcontainers/file.txt"), StandardCharsets.UTF_8)
+ )
+ .lines()
+ .collect(Collectors.joining("\n"))
+ )
+ .contains("Testcontainers");
+ channel.rm("/upload/testcontainers/file.txt");
+ assertThat(channel.ls("/upload/testcontainers/"))
+ .noneMatch(item -> item.toString().contains("testcontainers/file.txt"));
+ }
+ }
}
diff --git a/examples/sftp/src/test/resources/ssh_host_rsa_key b/examples/sftp/src/test/resources/ssh_host_rsa_key
new file mode 100644
index 00000000000..9987990b63d
--- /dev/null
+++ b/examples/sftp/src/test/resources/ssh_host_rsa_key
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEAlzMVUc5hVsX60QfV4mf9xzTPsZGGBPiDBrjmQ9ODNns1NuV40+nD
+2o86RZs08UdqhKUQih3lZ0huZ0+lF0Ke++tG60ze5ubA2rGzJx4GIXwv96H9pOqk0fV/yR
+0aYMM6ufuhbRHuNcNR9XV6DSK+GEypy4o0qfDCqgbTzzVKcxFW/eWCmRnQg6Am22Dskvw3
+PGs1A6NJqjOeNid5wNkqvRwj9AQL+N4a0wLQmaHZAecKNfHsW3zyrIxrQN3Xtp6cmxaEkW
+2r2Zk/C/29oh2V+hUWqi7G2BirbMwlruTlO1gV+YOUFZS3zpxkPi7BeneqV+S5TMBFNeLr
+uIDT72RTmmV7RzR2apwXI3f7DGsOTf2rRbD4LUNL2J6xAA+qA449LlzLJ5oFFk2QYt6thg
+Vlj6oRPtAEm9sQUBmvrVlk1WG7rmJQO4VF9FESvBXC8OFDmCY7HA4545b0JIIXdnrD+TcV
+dTV5Btl2DR539RP7R+PgByc8vqjkKJu3oXWvehWfAAAFiPUCzjT1As40AAAAB3NzaC1yc2
+EAAAGBAJczFVHOYVbF+tEH1eJn/cc0z7GRhgT4gwa45kPTgzZ7NTbleNPpw9qPOkWbNPFH
+aoSlEIod5WdIbmdPpRdCnvvrRutM3ubmwNqxsyceBiF8L/eh/aTqpNH1f8kdGmDDOrn7oW
+0R7jXDUfV1eg0ivhhMqcuKNKnwwqoG0881SnMRVv3lgpkZ0IOgJttg7JL8NzxrNQOjSaoz
+njYnecDZKr0cI/QEC/jeGtMC0Jmh2QHnCjXx7Ft88qyMa0Dd17aenJsWhJFtq9mZPwv9va
+IdlfoVFqouxtgYq2zMJa7k5TtYFfmDlBWUt86cZD4uwXp3qlfkuUzARTXi67iA0+9kU5pl
+e0c0dmqcFyN3+wxrDk39q0Ww+C1DS9iesQAPqgOOPS5cyyeaBRZNkGLerYYFZY+qET7QBJ
+vbEFAZr61ZZNVhu65iUDuFRfRRErwVwvDhQ5gmOxwOOeOW9CSCF3Z6w/k3FXU1eQbZdg0e
+d/UT+0fj4AcnPL6o5Cibt6F1r3oVnwAAAAMBAAEAAAGALcv8wKcUx6423tqTN70M2qpN4H
+h2Egpd0YruwAuQWk+uWh7eXr2XI5uvaEbvHcfmZSAEJvmQMxz2x9cRZ763nhFxDTNe7qxl
+LLiXTZlj/P97HfQUej/SRYApQPbONxHbN1sW1Y0RTHqJWCJJojHsRzrtUSfe9Lxmkg54WH
+JJRxow8b1zNcFibYP0UQ2GCq1XY7cLOztZxDJXUQra74U300jzQOV65NoNYO2g1m/15YQg
+DR/mWf26GXZ8xAyN2pQm3wiI86kY1UP+2kVr38tGcJ+Xrm08Pav06IiEUdFAdDRLL0AWXY
+ZG25BBJn2VaPZoE5+MH7xRQ2BrqNUZ6ec8jTPZXWN6VyZCmn06KRblIRnv/NcMV5GH/lE9
+JbP/MnQQzsQAO0REfhcrdb66I6l0jMTwQcvSJyPXLVl1UvobzcF+CpcExsoaQj5U9cwhkG
+XRLqPhI76+L0L2kNefQ4yN5MhxWiajKUOknRITkvmNR+jJYsUN/ziODRevbakBzyqtAAAA
+wCpC6P+iJg19HdhNf6I2IUQErPoltUhA5bsUGmuseCn19Y3V5RmNa8+HHfbnMkUSoFzTvS
+j0l7rkxl0vvPmz0zr/2ehWiMbReFRy3hGl55AGPLE7pjIy08JIUcQm2jH8C3oeSKNwCrYV
++HWsOsQu4+/uOTgp6I46+iSLLG+xjH+5zLtvxa6+o+zLjAOSW4aweAw1WAXy8J4ylAv2nA
+n3g3Rfa7C0qZG1bZ63phcgv2BNzN+QgmORoh5v5ICvT+qJ5wAAAMEAwvdI3XsLV0uzNkAq
+C9aWyK4cAdphvCb8n0oz5Vrm6j/qFRXzcDZLtkMboCRE2qVqNLQjMiTJo/QjX9jxe7LD6c
+Vxtlcl2Ts8qrixFhKXJNwC/lq/TTe2dpMSYm61OINK3TiofZi6eff/ubcpq7zr3iVyWk5b
+wAVSun8q+Su7ziYYb+MuBQsKn5VWyoYK+E/LFItY26ulOxbrntB805JsXpjbYrL0KoXJCx
+6ZWdBVsvbD733WipNbPQZ+4JYDbun7AAAAwQDGiFOALlS5nidWFqMeMm/dGsHpwri0b10Z
+Bf/DPPxK6EuFKLUppt6KMl2zJjwVa2NqSTppz7TpUP6jC5pSglxtcvatEIRVF8KBxuIJ/G
+8Wav3Xuxu9nrRyKAzXjrjU+4TjAH1jBfTj3/tDdRagxt7JESirE+sYW5nie9XpzW4ehsf6
+fJacmwoiGdSCc4dldD8ZkEXcmCChFTH+PY3uYtiJr+znzbUZ1RLL3Uk2xHWOWSHz/1tUBy
+BFP58e3rYvNa0AAAAPYWFAMjMtMDcxNTMtMDA5AQIDBA==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/examples/sftp/src/test/resources/ssh_host_rsa_key.pub b/examples/sftp/src/test/resources/ssh_host_rsa_key.pub
new file mode 100644
index 00000000000..57b3aebb050
--- /dev/null
+++ b/examples/sftp/src/test/resources/ssh_host_rsa_key.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCXMxVRzmFWxfrRB9XiZ/3HNM+xkYYE+IMGuOZD04M2ezU25XjT6cPajzpFmzTxR2qEpRCKHeVnSG5nT6UXQp7760brTN7m5sDasbMnHgYhfC/3of2k6qTR9X/JHRpgwzq5+6FtEe41w1H1dXoNIr4YTKnLijSp8MKqBtPPNUpzEVb95YKZGdCDoCbbYOyS/Dc8azUDo0mqM542J3nA2Sq9HCP0BAv43hrTAtCZodkB5wo18exbfPKsjGtA3de2npybFoSRbavZmT8L/b2iHZX6FRaqLsbYGKtszCWu5OU7WBX5g5QVlLfOnGQ+LsF6d6pX5LlMwEU14uu4gNPvZFOaZXtHNHZqnBcjd/sMaw5N/atFsPgtQ0vYnrEAD6oDjj0uXMsnmgUWTZBi3q2GBWWPqhE+0ASb2xBQGa+tWWTVYbuuYlA7hUX0URK8FcLw4UOYJjscDjnjlvQkghd2esP5NxV1NXkG2XYNHnf1E/tH4+AHJzy+qOQom7ehda96FZ8= someone@localhost
diff --git a/examples/singleton-container/build.gradle b/examples/singleton-container/build.gradle
index 4b73e1125c6..fecaa3d0499 100644
--- a/examples/singleton-container/build.gradle
+++ b/examples/singleton-container/build.gradle
@@ -8,7 +8,7 @@ repositories {
dependencies {
- implementation 'redis.clients:jedis:5.1.5'
+ implementation 'redis.clients:jedis:6.0.0'
implementation 'com.google.code.gson:gson:2.11.0'
implementation 'com.google.guava:guava:23.0'
compileOnly 'org.slf4j:slf4j-api:1.7.36'
@@ -16,7 +16,8 @@ dependencies {
testImplementation 'ch.qos.logback:logback-classic:1.3.14'
testImplementation 'org.testcontainers:testcontainers'
testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
}
test {
diff --git a/examples/solr-container/build.gradle b/examples/solr-container/build.gradle
index 3b61202818e..58c380676d2 100644
--- a/examples/solr-container/build.gradle
+++ b/examples/solr-container/build.gradle
@@ -10,12 +10,13 @@ dependencies {
compileOnly "org.projectlombok:lombok:1.18.34"
annotationProcessor "org.projectlombok:lombok:1.18.34"
- implementation 'org.apache.solr:solr-solrj:8.11.3'
+ implementation 'org.apache.solr:solr-solrj:8.11.4'
testImplementation 'org.testcontainers:testcontainers'
testImplementation 'org.testcontainers:solr'
testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
}
test {
diff --git a/examples/spring-boot-kotlin-redis/build.gradle.kts b/examples/spring-boot-kotlin-redis/build.gradle.kts
index 7a4aa917f35..2d5d6bf9640 100644
--- a/examples/spring-boot-kotlin-redis/build.gradle.kts
+++ b/examples/spring-boot-kotlin-redis/build.gradle.kts
@@ -1,7 +1,7 @@
import org.jetbrains.kotlin.gradle.tasks.KotlinCompile
plugins {
- id("org.springframework.boot") version "2.7.10"
+ id("org.springframework.boot") version "2.7.18"
kotlin("jvm") version "1.8.22"
kotlin("plugin.spring") version "1.8.22"
}
@@ -21,9 +21,7 @@ dependencies {
testImplementation("org.springframework.boot:spring-boot-starter-test")
testImplementation("org.testcontainers:testcontainers")
- testImplementation("org.junit.jupiter:junit-jupiter:5.10.0")
-
-
+ testRuntimeOnly("org.junit.platform:junit-platform-launcher:1.8.2")
}
tasks.withType {
diff --git a/examples/spring-boot/build.gradle b/examples/spring-boot/build.gradle
index 657b544e119..2001c393afa 100644
--- a/examples/spring-boot/build.gradle
+++ b/examples/spring-boot/build.gradle
@@ -17,7 +17,7 @@ dependencies {
runtimeOnly 'org.postgresql:postgresql'
testImplementation 'org.springframework.boot:spring-boot-starter-test'
testImplementation 'org.testcontainers:postgresql'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testRuntimeOnly "org.junit.platform:junit-platform-launcher:1.8.2"
}
test {
diff --git a/examples/zookeeper/build.gradle b/examples/zookeeper/build.gradle
index 50555fe8f85..835280cdfc0 100644
--- a/examples/zookeeper/build.gradle
+++ b/examples/zookeeper/build.gradle
@@ -7,11 +7,12 @@ repositories {
}
dependencies {
- testImplementation 'org.apache.curator:curator-framework:5.7.0'
+ testImplementation 'org.apache.curator:curator-framework:5.8.0'
testImplementation 'org.testcontainers:testcontainers'
testImplementation 'org.assertj:assertj-core:3.26.3'
testImplementation 'ch.qos.logback:logback-classic:1.3.14'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.11.0'
}
test {
diff --git a/gradle.properties b/gradle.properties
index be57d9abd98..d83ed4b9998 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -3,4 +3,4 @@ org.gradle.caching=true
org.gradle.configureondemand=true
org.gradle.jvmargs=-Xmx2g
-testcontainers.version=1.20.4
+testcontainers.version=1.21.3
diff --git a/gradle/publishing.gradle b/gradle/publishing.gradle
index 618ada345cc..9ecd231d3e1 100644
--- a/gradle/publishing.gradle
+++ b/gradle/publishing.gradle
@@ -1,4 +1,5 @@
apply plugin: 'maven-publish'
+apply plugin: 'org.jreleaser'
task sourceJar(type: Jar) {
archiveClassifier.set( 'sources')
@@ -95,10 +96,27 @@ publishing {
}
repositories {
maven {
- url("https://oss.sonatype.org/service/local/staging/deploy/maven2")
- credentials {
- username = System.getenv("OSSRH_USERNAME")
- password = System.getenv("OSSRH_PASSWORD")
+ url = layout.buildDirectory.dir('staging-deploy')
+ }
+ }
+}
+
+jreleaser {
+ signing {
+ active = 'ALWAYS'
+ armored = true
+ }
+ deploy {
+ maven {
+ mavenCentral {
+ central {
+ active = 'ALWAYS'
+ url = 'https://central.sonatype.com/api/v1/publisher'
+ stagingRepository(layout.buildDirectory.dir("staging-deploy").get().toString())
+ stage = 'UPLOAD'
+ applyMavenCentralRules = true
+ namespace = 'org.testcontainers'
+ }
}
}
}
diff --git a/gradle/shading.gradle b/gradle/shading.gradle
index 10ceb5086a4..88f25cc3f22 100644
--- a/gradle/shading.gradle
+++ b/gradle/shading.gradle
@@ -4,7 +4,7 @@ apply plugin: 'com.gradleup.shadow'
configurations {
shaded
- [apiElements, implementation, compileOnly, testCompile]*.extendsFrom shaded
+ [apiElements, implementation]*.extendsFrom shaded
}
configurations.api.canBeResolved = true
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
index 2c3521197d7..1b33c55baab 100644
Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
index efe2ff34492..dbc089ed3d7 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -1,7 +1,7 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
-distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70
-distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip
+distributionSha256Sum=ed1a8d686605fd7c23bdf62c7fc7add1c5b23b2bbc3721e661934ef4a4911d7c
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.14.3-all.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME
diff --git a/gradlew b/gradlew
index f5feea6d6b1..23d15a93670 100755
--- a/gradlew
+++ b/gradlew
@@ -86,8 +86,7 @@ done
# shellcheck disable=SC2034
APP_BASE_NAME=${0##*/}
# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036)
-APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s
-' "$PWD" ) || exit
+APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD=maximum
@@ -115,7 +114,7 @@ case "$( uname )" in #(
NONSTOP* ) nonstop=true ;;
esac
-CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+CLASSPATH="\\\"\\\""
# Determine the Java command to use to start the JVM.
@@ -206,7 +205,7 @@ fi
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
# Collect all arguments for the java command:
-# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
+# * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
# and any embedded shellness will be escaped.
# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be
# treated as '${Hostname}' itself on the command line.
@@ -214,7 +213,7 @@ DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
set -- \
"-Dorg.gradle.appname=$APP_BASE_NAME" \
-classpath "$CLASSPATH" \
- org.gradle.wrapper.GradleWrapperMain \
+ -jar "$APP_HOME/gradle/wrapper/gradle-wrapper.jar" \
"$@"
# Stop when "xargs" is not available.
diff --git a/gradlew.bat b/gradlew.bat
index 9b42019c791..5eed7ee8452 100644
--- a/gradlew.bat
+++ b/gradlew.bat
@@ -70,11 +70,11 @@ goto fail
:execute
@rem Setup the command line
-set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+set CLASSPATH=
@rem Execute Gradle
-"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" -jar "%APP_HOME%\gradle\wrapper\gradle-wrapper.jar" %*
:end
@rem End local scope for the variables with windows NT shell
diff --git a/mkdocs.yml b/mkdocs.yml
index c308128969b..3e39a67f959 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -72,6 +72,7 @@ nav:
- modules/databases/postgres.md
- modules/databases/presto.md
- modules/databases/questdb.md
+ - modules/databases/scylladb.md
- modules/databases/tidb.md
- modules/databases/timeplus.md
- modules/databases/trino.md
@@ -81,6 +82,8 @@ nav:
- modules/chromadb.md
- modules/consul.md
- modules/docker_compose.md
+ - modules/docker_mcp_gateway.md
+ - modules/docker_model_runner.md
- modules/elasticsearch.md
- modules/gcloud.md
- modules/grafana.md
@@ -88,6 +91,7 @@ nav:
- modules/k3s.md
- modules/k6.md
- modules/kafka.md
+ - modules/ldap.md
- modules/localstack.md
- modules/milvus.md
- modules/minio.md
@@ -95,6 +99,7 @@ nav:
- modules/nginx.md
- modules/ollama.md
- modules/openfga.md
+ - modules/pinecone.md
- modules/pulsar.md
- modules/qdrant.md
- modules/rabbitmq.md
@@ -136,4 +141,4 @@ nav:
- bounty.md
edit_uri: edit/main/docs/
extra:
- latest_version: 1.20.4
+ latest_version: 1.21.3
diff --git a/modules/activemq/build.gradle b/modules/activemq/build.gradle
index dc20113a84b..da683e0ad13 100644
--- a/modules/activemq/build.gradle
+++ b/modules/activemq/build.gradle
@@ -3,7 +3,7 @@ description = "Testcontainers :: ActiveMQ"
dependencies {
api project(':testcontainers')
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation "org.apache.activemq:activemq-client:6.1.2"
- testImplementation "org.apache.activemq:artemis-jakarta-client:2.37.0"
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation "org.apache.activemq:activemq-client:6.1.7"
+ testImplementation "org.apache.activemq:artemis-jakarta-client:2.41.0"
}
diff --git a/modules/azure/build.gradle b/modules/azure/build.gradle
index 3dc97d03fce..d3baf572321 100644
--- a/modules/azure/build.gradle
+++ b/modules/azure/build.gradle
@@ -2,13 +2,17 @@ description = "Testcontainers :: Azure"
dependencies {
api project(':testcontainers')
+ api project(':mssqlserver')
// TODO use JDK's HTTP client and/or Apache HttpClient5
shaded 'com.squareup.okhttp3:okhttp:4.12.0'
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'com.azure:azure-cosmos:4.63.3'
- testImplementation 'com.azure:azure-storage-blob:12.29.0'
- testImplementation 'com.azure:azure-storage-queue:12.24.0'
- testImplementation 'com.azure:azure-data-tables:12.5.0'
- testImplementation 'com.azure:azure-messaging-eventhubs:5.19.2'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation platform("com.azure:azure-sdk-bom:1.2.32")
+ testImplementation 'com.azure:azure-cosmos'
+ testImplementation 'com.azure:azure-storage-blob'
+ testImplementation 'com.azure:azure-storage-queue'
+ testImplementation 'com.azure:azure-data-tables'
+ testImplementation 'com.azure:azure-messaging-eventhubs'
+ testImplementation 'com.azure:azure-messaging-servicebus'
+ testImplementation 'com.microsoft.sqlserver:mssql-jdbc:13.1.0.jre8-preview'
}
diff --git a/modules/azure/src/main/java/org/testcontainers/azure/AzureEventHubsContainer.java b/modules/azure/src/main/java/org/testcontainers/azure/EventHubsEmulatorContainer.java
similarity index 87%
rename from modules/azure/src/main/java/org/testcontainers/azure/AzureEventHubsContainer.java
rename to modules/azure/src/main/java/org/testcontainers/azure/EventHubsEmulatorContainer.java
index d611a2442cc..257f71a1424 100644
--- a/modules/azure/src/main/java/org/testcontainers/azure/AzureEventHubsContainer.java
+++ b/modules/azure/src/main/java/org/testcontainers/azure/EventHubsEmulatorContainer.java
@@ -16,7 +16,7 @@
* AMQP: 5672
*
*/
-public class AzureEventHubsContainer extends GenericContainer {
+public class EventHubsEmulatorContainer extends GenericContainer {
private static final int DEFAULT_AMQP_PORT = 5672;
@@ -32,14 +32,14 @@ public class AzureEventHubsContainer extends GenericContainer
+ * Supported image: {@code mcr.microsoft.com/azure-messaging/servicebus-emulator}
+ *
+ * Exposed port: 5672
+ */
+public class ServiceBusEmulatorContainer extends GenericContainer {
+
+ private static final String CONNECTION_STRING_FORMAT =
+ "Endpoint=sb://%s:%d;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=SAS_KEY_VALUE;UseDevelopmentEmulator=true;";
+
+ private static final int DEFAULT_PORT = 5672;
+
+ private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse(
+ "mcr.microsoft.com/azure-messaging/servicebus-emulator"
+ );
+
+ private MSSQLServerContainer> msSqlServerContainer;
+
+ /**
+ * @param dockerImageName The specified docker image name to run
+ */
+ public ServiceBusEmulatorContainer(final String dockerImageName) {
+ this(DockerImageName.parse(dockerImageName));
+ }
+
+ /**
+ * @param dockerImageName The specified docker image name to run
+ */
+ public ServiceBusEmulatorContainer(final DockerImageName dockerImageName) {
+ super(dockerImageName);
+ dockerImageName.assertCompatibleWith(DEFAULT_IMAGE_NAME);
+ withExposedPorts(DEFAULT_PORT);
+ withEnv("SQL_WAIT_INTERVAL", "0");
+ waitingFor(Wait.forLogMessage(".*Emulator Service is Successfully Up!.*", 1));
+ }
+
+ /**
+ * Sets the MS SQL Server dependency needed by the Service Bus Container,
+ *
+ * @param msSqlServerContainer The MS SQL Server container used by Service Bus as a dependency
+ * @return this
+ */
+ public ServiceBusEmulatorContainer withMsSqlServerContainer(final MSSQLServerContainer> msSqlServerContainer) {
+ dependsOn(msSqlServerContainer);
+ this.msSqlServerContainer = msSqlServerContainer;
+ return this;
+ }
+
+ /**
+ * Provide the Service Bus configuration JSON.
+ *
+ * @param config The configuration
+ * @return this
+ */
+ public ServiceBusEmulatorContainer withConfig(final Transferable config) {
+ withCopyToContainer(config, "/ServiceBus_Emulator/ConfigFiles/Config.json");
+ return this;
+ }
+
+ /**
+ * Accepts the EULA of the container.
+ *
+ * @return this
+ */
+ public ServiceBusEmulatorContainer acceptLicense() {
+ withEnv("ACCEPT_EULA", "Y");
+ return this;
+ }
+
+ @Override
+ protected void configure() {
+ if (msSqlServerContainer == null) {
+ throw new IllegalStateException(
+ "The image " +
+ getDockerImageName() +
+ " requires a Microsoft SQL Server container. Please provide one with the withMsSqlServerContainer method!"
+ );
+ }
+ withEnv("SQL_SERVER", msSqlServerContainer.getNetworkAliases().get(0));
+ withEnv("MSSQL_SA_PASSWORD", msSqlServerContainer.getPassword());
+ // If license was not accepted programmatically, check if it was accepted via resource file
+ if (!getEnvMap().containsKey("ACCEPT_EULA")) {
+ LicenseAcceptance.assertLicenseAccepted(this.getDockerImageName());
+ acceptLicense();
+ }
+ }
+
+ /**
+ * Returns the connection string.
+ *
+ * @return connection string
+ */
+ public String getConnectionString() {
+ return String.format(CONNECTION_STRING_FORMAT, getHost(), getMappedPort(DEFAULT_PORT));
+ }
+}
diff --git a/modules/azure/src/test/java/org/testcontainers/azure/AzureEventHubsContainerTest.java b/modules/azure/src/test/java/org/testcontainers/azure/EventHubsEmulatorContainerTest.java
similarity index 95%
rename from modules/azure/src/test/java/org/testcontainers/azure/AzureEventHubsContainerTest.java
rename to modules/azure/src/test/java/org/testcontainers/azure/EventHubsEmulatorContainerTest.java
index c0febc2ed86..58d1d7ad59c 100644
--- a/modules/azure/src/test/java/org/testcontainers/azure/AzureEventHubsContainerTest.java
+++ b/modules/azure/src/test/java/org/testcontainers/azure/EventHubsEmulatorContainerTest.java
@@ -19,7 +19,7 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.awaitility.Awaitility.waitAtMost;
-public class AzureEventHubsContainerTest {
+public class EventHubsEmulatorContainerTest {
@Rule
// network {
@@ -36,7 +36,7 @@ public class AzureEventHubsContainerTest {
@Rule
// emulatorContainer {
- public AzureEventHubsContainer emulator = new AzureEventHubsContainer(
+ public EventHubsEmulatorContainer emulator = new EventHubsEmulatorContainer(
"mcr.microsoft.com/azure-messaging/eventhubs-emulator:2.0.1"
)
.acceptLicense()
diff --git a/modules/azure/src/test/java/org/testcontainers/azure/ServiceBusEmulatorContainerTest.java b/modules/azure/src/test/java/org/testcontainers/azure/ServiceBusEmulatorContainerTest.java
new file mode 100644
index 00000000000..4676e41784a
--- /dev/null
+++ b/modules/azure/src/test/java/org/testcontainers/azure/ServiceBusEmulatorContainerTest.java
@@ -0,0 +1,105 @@
+package org.testcontainers.azure;
+
+import com.azure.messaging.servicebus.ServiceBusClientBuilder;
+import com.azure.messaging.servicebus.ServiceBusErrorContext;
+import com.azure.messaging.servicebus.ServiceBusException;
+import com.azure.messaging.servicebus.ServiceBusMessage;
+import com.azure.messaging.servicebus.ServiceBusProcessorClient;
+import com.azure.messaging.servicebus.ServiceBusReceivedMessageContext;
+import com.azure.messaging.servicebus.ServiceBusSenderClient;
+import com.github.dockerjava.api.model.Capability;
+import org.assertj.core.api.Assertions;
+import org.junit.Rule;
+import org.junit.Test;
+import org.testcontainers.containers.MSSQLServerContainer;
+import org.testcontainers.containers.Network;
+import org.testcontainers.utility.MountableFile;
+
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.awaitility.Awaitility.await;
+
+public class ServiceBusEmulatorContainerTest {
+
+ @Rule
+ // network {
+ public Network network = Network.newNetwork();
+
+ // }
+
+ @Rule
+ // sqlContainer {
+ public MSSQLServerContainer> mssqlServerContainer = new MSSQLServerContainer<>(
+ "mcr.microsoft.com/mssql/server:2022-CU14-ubuntu-22.04"
+ )
+ .acceptLicense()
+ .withPassword("yourStrong(!)Password")
+ .withCreateContainerCmdModifier(cmd -> {
+ cmd.getHostConfig().withCapAdd(Capability.SYS_PTRACE);
+ })
+ .withNetwork(network);
+
+ // }
+
+ @Rule
+ // emulatorContainer {
+ public ServiceBusEmulatorContainer emulator = new ServiceBusEmulatorContainer(
+ "mcr.microsoft.com/azure-messaging/servicebus-emulator:1.1.2"
+ )
+ .acceptLicense()
+ .withConfig(MountableFile.forClasspathResource("/service-bus-config.json"))
+ .withNetwork(network)
+ .withMsSqlServerContainer(mssqlServerContainer);
+
+ // }
+
+ @Test
+ public void testWithClient() {
+ assertThat(emulator.getConnectionString()).startsWith("Endpoint=sb://");
+
+ // senderClient {
+ ServiceBusSenderClient senderClient = new ServiceBusClientBuilder()
+ .connectionString(emulator.getConnectionString())
+ .sender()
+ .queueName("queue.1")
+ .buildClient();
+ // }
+
+ await()
+ .atMost(20, TimeUnit.SECONDS)
+ .ignoreException(ServiceBusException.class)
+ .until(() -> {
+ senderClient.sendMessage(new ServiceBusMessage("Hello, Testcontainers!"));
+ return true;
+ });
+ senderClient.close();
+
+ final List received = new CopyOnWriteArrayList<>();
+ Consumer messageConsumer = m -> {
+ received.add(m.getMessage().getBody().toString());
+ m.complete();
+ };
+ Consumer errorConsumer = e -> Assertions.fail("Unexpected error: " + e);
+ // processorClient {
+ ServiceBusProcessorClient processorClient = new ServiceBusClientBuilder()
+ .connectionString(emulator.getConnectionString())
+ .processor()
+ .queueName("queue.1")
+ .processMessage(messageConsumer)
+ .processError(errorConsumer)
+ .buildProcessorClient();
+ // }
+ processorClient.start();
+
+ await()
+ .atMost(20, TimeUnit.SECONDS)
+ .untilAsserted(() -> {
+ assertThat(received).hasSize(1).containsExactlyInAnyOrder("Hello, Testcontainers!");
+ });
+ processorClient.close();
+ }
+}
diff --git a/modules/azure/src/test/resources/service-bus-config.json b/modules/azure/src/test/resources/service-bus-config.json
new file mode 100644
index 00000000000..18ac2e69c7b
--- /dev/null
+++ b/modules/azure/src/test/resources/service-bus-config.json
@@ -0,0 +1,29 @@
+{
+ "UserConfig": {
+ "Namespaces": [
+ {
+ "Name": "sbemulatorns",
+ "Queues": [
+ {
+ "Name": "queue.1",
+ "Properties": {
+ "DeadLetteringOnMessageExpiration": false,
+ "DefaultMessageTimeToLive": "PT1H",
+ "DuplicateDetectionHistoryTimeWindow": "PT20S",
+ "ForwardDeadLetteredMessagesTo": "",
+ "ForwardTo": "",
+ "LockDuration": "PT1M",
+ "MaxDeliveryCount": 3,
+ "RequiresDuplicateDetection": false,
+ "RequiresSession": false
+ }
+ }
+ ],
+ "Topics": []
+ }
+ ],
+ "Logging": {
+ "Type": "File"
+ }
+ }
+}
diff --git a/modules/cassandra/build.gradle b/modules/cassandra/build.gradle
index 3d27fe65cac..e146c26e69e 100644
--- a/modules/cassandra/build.gradle
+++ b/modules/cassandra/build.gradle
@@ -11,5 +11,5 @@ dependencies {
api "com.datastax.cassandra:cassandra-driver-core:3.10.0"
testImplementation 'com.datastax.oss:java-driver-core:4.17.0'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
diff --git a/modules/cassandra/src/main/java/org/testcontainers/cassandra/CassandraContainer.java b/modules/cassandra/src/main/java/org/testcontainers/cassandra/CassandraContainer.java
index c687c63eaa3..73a0c83443e 100644
--- a/modules/cassandra/src/main/java/org/testcontainers/cassandra/CassandraContainer.java
+++ b/modules/cassandra/src/main/java/org/testcontainers/cassandra/CassandraContainer.java
@@ -9,10 +9,7 @@
import org.testcontainers.utility.DockerImageName;
import org.testcontainers.utility.MountableFile;
-import java.io.File;
import java.net.InetSocketAddress;
-import java.net.URISyntaxException;
-import java.net.URL;
import java.util.Optional;
/**
@@ -30,6 +27,8 @@ public class CassandraContainer extends GenericContainer {
private static final String DEFAULT_LOCAL_DATACENTER = "datacenter1";
+ private static final String DEFAULT_INIT_SCRIPT_FILENAME = "init.cql";
+
private static final String CONTAINER_CONFIG_LOCATION = "/etc/cassandra";
private static final String USERNAME = "cassandra";
@@ -80,26 +79,25 @@ protected void containerIsStarted(InspectContainerResponse containerInfo) {
* Load init script content and apply it to the database if initScriptPath is set
*/
private void runInitScriptIfRequired() {
- if (initScriptPath != null) {
+ if (this.initScriptPath != null) {
try {
- URL resource = Thread.currentThread().getContextClassLoader().getResource(initScriptPath);
- if (resource == null) {
- logger().warn("Could not load classpath init script: {}", initScriptPath);
- throw new ScriptLoadException(
- "Could not load classpath init script: " + initScriptPath + ". Resource not found."
- );
- }
- // The init script is executed as is by the cqlsh command, so copy it into the container.
- String targetInitScriptName = new File(resource.toURI()).getName();
- copyFileToContainer(MountableFile.forClasspathResource(initScriptPath), targetInitScriptName);
- new CassandraDatabaseDelegate(this).execute(null, targetInitScriptName, -1, false, false);
- } catch (URISyntaxException e) {
- logger().warn("Could not copy init script into container: {}", initScriptPath);
- throw new ScriptLoadException("Could not copy init script into container: " + initScriptPath, e);
+ final MountableFile originalInitScript = MountableFile.forClasspathResource(this.initScriptPath);
+ // The init script is executed as is by the cqlsh command, so copy it into the container. The name
+ // of the script is generic since it's not important to keep the original name.
+ copyFileToContainer(originalInitScript, DEFAULT_INIT_SCRIPT_FILENAME);
+ new CassandraDatabaseDelegate(this).execute(null, DEFAULT_INIT_SCRIPT_FILENAME, -1, false, false);
+ } catch (IllegalArgumentException e) {
+ // MountableFile.forClasspathResource will throw an IllegalArgumentException if the resource cannot
+ // be found.
+ logger().warn("Could not load classpath init script: {}", this.initScriptPath);
+ throw new ScriptLoadException(
+ "Could not load classpath init script: " + this.initScriptPath + ". Resource not found.",
+ e
+ );
} catch (ScriptUtils.ScriptStatementFailedException e) {
- logger().error("Error while executing init script: {}", initScriptPath, e);
+ logger().error("Error while executing init script: {}", this.initScriptPath, e);
throw new ScriptUtils.UncategorizedScriptException(
- "Error while executing init script: " + initScriptPath,
+ "Error while executing init script: " + this.initScriptPath,
e
);
}
diff --git a/modules/cassandra/src/test/java/org/testcontainers/cassandra/CassandraContainerTest.java b/modules/cassandra/src/test/java/org/testcontainers/cassandra/CassandraContainerTest.java
index 632966d1bb3..d156a28ef0c 100644
--- a/modules/cassandra/src/test/java/org/testcontainers/cassandra/CassandraContainerTest.java
+++ b/modules/cassandra/src/test/java/org/testcontainers/cassandra/CassandraContainerTest.java
@@ -8,6 +8,7 @@
import org.testcontainers.utility.DockerImageName;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.catchThrowable;
public class CassandraContainerTest {
@@ -81,6 +82,16 @@ public void testInitScript() {
}
}
+ @Test
+ public void testNonexistentInitScript() {
+ try (
+ CassandraContainer cassandraContainer = new CassandraContainer(CASSANDRA_IMAGE)
+ .withInitScript("unknown_script.cql")
+ ) {
+ assertThat(catchThrowable(cassandraContainer::start)).isInstanceOf(ContainerLaunchException.class);
+ }
+ }
+
@Test
public void testInitScriptWithRequiredAuthentication() {
try (
diff --git a/modules/cassandra/src/test/resources/cassandra-auth-required-configuration/cassandra.yaml b/modules/cassandra/src/test/resources/cassandra-auth-required-configuration/cassandra.yaml
index 14095d4c627..7425881b8fe 100644
--- a/modules/cassandra/src/test/resources/cassandra-auth-required-configuration/cassandra.yaml
+++ b/modules/cassandra/src/test/resources/cassandra-auth-required-configuration/cassandra.yaml
@@ -250,7 +250,7 @@ commit_failure_policy: stop
#
# Valid values are either "auto" (omitting the value) or a value greater 0.
#
-# Note that specifying a too large value will result in long running GCs and possbily
+# Note that specifying a too large value will result in long running GCs and possibly
# out-of-memory errors. Keep the value at a small fraction of the heap.
#
# If you constantly see "prepared statements discarded in the last minute because
@@ -259,7 +259,7 @@ commit_failure_policy: stop
# i.e. use bind markers for variable parts.
#
# Do only change the default value, if you really have more prepared statements than
-# fit in the cache. In most cases it is not neccessary to change this value.
+# fit in the cache. In most cases it is not necessary to change this value.
# Constantly re-preparing statements is a performance penalty.
#
# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
@@ -309,7 +309,7 @@ key_cache_save_period: 14400
# Fully off-heap row cache implementation (default).
#
# org.apache.cassandra.cache.SerializingCacheProvider
-# This is the row cache implementation availabile
+# This is the row cache implementation available
# in previous releases of Cassandra.
# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
@@ -444,7 +444,7 @@ concurrent_counter_writes: 32
concurrent_materialized_view_writes: 32
# Maximum memory to use for sstable chunk cache and buffer pooling.
-# 32MB of this are reserved for pooling buffers, the rest is used as an
+# 32MB of this are reserved for pooling buffers, the rest is used as a
# cache that holds uncompressed sstable chunks.
# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap,
# so is in addition to the memory allocated for heap. The cache also has on-heap
@@ -553,7 +553,7 @@ memtable_allocation_type: heap_buffers
# new space for cdc-tracked tables has been made available. Default to 250ms
# cdc_free_space_check_interval_ms: 250
-# A fixed memory pool size in MB for for SSTable index summaries. If left
+# A fixed memory pool size in MB for SSTable index summaries. If left
# empty, this will default to 5% of the heap size. If the memory usage of
# all index summaries exceeds this limit, SSTables with low read rates will
# shrink their index summaries in order to meet this limit. However, this
@@ -778,7 +778,7 @@ auto_snapshot: true
# number of rows per partition. The competing goals are these:
#
# - a smaller granularity means more index entries are generated
-# and looking up rows withing the partition by collation column
+# and looking up rows within the partition by collation column
# is faster
# - but, Cassandra will keep the collation index in memory for hot
# rows (as part of the key cache), so a larger granularity means
@@ -1109,7 +1109,7 @@ windows_timer_interval: 1
# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
-# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys
+# the "key_alias" is the only key that will be used for encrypt operations; previously used keys
# can still (and should!) be in the keystore and will be used on decrypt operations
# (to handle the case of key rotation).
#
@@ -1143,7 +1143,7 @@ transparent_data_encryption_options:
# tombstones seen in memory so we can return them to the coordinator, which
# will use them to make sure other replicas also know about the deleted rows.
# With workloads that generate a lot of tombstones, this can cause performance
-# problems and even exaust the server heap.
+# problems and even exhaust the server heap.
# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
# Adjust the thresholds here if you understand the dangers and want to
# scan more tombstones anyway. These thresholds may also be adjusted at runtime
diff --git a/modules/cassandra/src/test/resources/cassandra-test-configuration-example/cassandra.yaml b/modules/cassandra/src/test/resources/cassandra-test-configuration-example/cassandra.yaml
index 5b57b2a8e58..426dea64771 100644
--- a/modules/cassandra/src/test/resources/cassandra-test-configuration-example/cassandra.yaml
+++ b/modules/cassandra/src/test/resources/cassandra-test-configuration-example/cassandra.yaml
@@ -250,7 +250,7 @@ commit_failure_policy: stop
#
# Valid values are either "auto" (omitting the value) or a value greater 0.
#
-# Note that specifying a too large value will result in long running GCs and possbily
+# Note that specifying a too large value will result in long running GCs and possibly
# out-of-memory errors. Keep the value at a small fraction of the heap.
#
# If you constantly see "prepared statements discarded in the last minute because
@@ -259,7 +259,7 @@ commit_failure_policy: stop
# i.e. use bind markers for variable parts.
#
# Do only change the default value, if you really have more prepared statements than
-# fit in the cache. In most cases it is not neccessary to change this value.
+# fit in the cache. In most cases it is not necessary to change this value.
# Constantly re-preparing statements is a performance penalty.
#
# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
@@ -309,7 +309,7 @@ key_cache_save_period: 14400
# Fully off-heap row cache implementation (default).
#
# org.apache.cassandra.cache.SerializingCacheProvider
-# This is the row cache implementation availabile
+# This is the row cache implementation available
# in previous releases of Cassandra.
# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
@@ -444,7 +444,7 @@ concurrent_counter_writes: 32
concurrent_materialized_view_writes: 32
# Maximum memory to use for sstable chunk cache and buffer pooling.
-# 32MB of this are reserved for pooling buffers, the rest is used as an
+# 32MB of this are reserved for pooling buffers, the rest is used as a
# cache that holds uncompressed sstable chunks.
# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap,
# so is in addition to the memory allocated for heap. The cache also has on-heap
@@ -553,7 +553,7 @@ memtable_allocation_type: heap_buffers
# new space for cdc-tracked tables has been made available. Default to 250ms
# cdc_free_space_check_interval_ms: 250
-# A fixed memory pool size in MB for for SSTable index summaries. If left
+# A fixed memory pool size in MB for SSTable index summaries. If left
# empty, this will default to 5% of the heap size. If the memory usage of
# all index summaries exceeds this limit, SSTables with low read rates will
# shrink their index summaries in order to meet this limit. However, this
@@ -778,7 +778,7 @@ auto_snapshot: true
# number of rows per partition. The competing goals are these:
#
# - a smaller granularity means more index entries are generated
-# and looking up rows withing the partition by collation column
+# and looking up rows within the partition by collation column
# is faster
# - but, Cassandra will keep the collation index in memory for hot
# rows (as part of the key cache), so a larger granularity means
@@ -1109,7 +1109,7 @@ windows_timer_interval: 1
# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
-# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys
+# the "key_alias" is the only key that will be used for encrypt operations; previously used keys
# can still (and should!) be in the keystore and will be used on decrypt operations
# (to handle the case of key rotation).
#
@@ -1143,7 +1143,7 @@ transparent_data_encryption_options:
# tombstones seen in memory so we can return them to the coordinator, which
# will use them to make sure other replicas also know about the deleted rows.
# With workloads that generate a lot of tombstones, this can cause performance
-# problems and even exaust the server heap.
+# problems and even exhaust the server heap.
# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
# Adjust the thresholds here if you understand the dangers and want to
# scan more tombstones anyway. These thresholds may also be adjusted at runtime
diff --git a/modules/chromadb/build.gradle b/modules/chromadb/build.gradle
index bd6ae52d90f..5017c481ee2 100644
--- a/modules/chromadb/build.gradle
+++ b/modules/chromadb/build.gradle
@@ -3,6 +3,6 @@ description = "Testcontainers :: ChromaDB"
dependencies {
api project(':testcontainers')
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'io.rest-assured:rest-assured:5.5.0'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'io.rest-assured:rest-assured:5.5.5'
}
diff --git a/modules/chromadb/src/main/java/org/testcontainers/chromadb/ChromaDBContainer.java b/modules/chromadb/src/main/java/org/testcontainers/chromadb/ChromaDBContainer.java
index a1bccf3904f..af6c3df33fc 100644
--- a/modules/chromadb/src/main/java/org/testcontainers/chromadb/ChromaDBContainer.java
+++ b/modules/chromadb/src/main/java/org/testcontainers/chromadb/ChromaDBContainer.java
@@ -1,7 +1,9 @@
package org.testcontainers.chromadb;
+import lombok.extern.slf4j.Slf4j;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.wait.strategy.Wait;
+import org.testcontainers.utility.ComparableVersion;
import org.testcontainers.utility.DockerImageName;
/**
@@ -11,6 +13,7 @@
*
* Exposed ports: 8000
*/
+@Slf4j
public class ChromaDBContainer extends GenericContainer {
private static final DockerImageName DEFAULT_DOCKER_IMAGE = DockerImageName.parse("chromadb/chroma");
@@ -22,13 +25,32 @@ public ChromaDBContainer(String dockerImageName) {
}
public ChromaDBContainer(DockerImageName dockerImageName) {
+ this(dockerImageName, isVersion2(dockerImageName.getVersionPart()));
+ }
+
+ public ChromaDBContainer(DockerImageName dockerImageName, boolean isVersion2) {
super(dockerImageName);
+ String apiPath = isVersion2 ? "/api/v2/heartbeat" : "/api/v1/heartbeat";
dockerImageName.assertCompatibleWith(DEFAULT_DOCKER_IMAGE, GHCR_DOCKER_IMAGE);
withExposedPorts(8000);
- waitingFor(Wait.forHttp("/api/v1/heartbeat"));
+ waitingFor(Wait.forHttp(apiPath));
}
public String getEndpoint() {
return "http://" + getHost() + ":" + getFirstMappedPort();
}
+
+ private static boolean isVersion2(String version) {
+ if (version.equals("latest")) {
+ return true;
+ }
+
+ ComparableVersion comparableVersion = new ComparableVersion(version);
+ if (comparableVersion.isGreaterThanOrEqualTo("1.0.0")) {
+ return true;
+ }
+
+ log.warn("Version {} is less than 1.0.0 or not a semantic version.", version);
+ return false;
+ }
}
diff --git a/modules/chromadb/src/test/java/org/testcontainers/chromadb/ChromaDBContainerTest.java b/modules/chromadb/src/test/java/org/testcontainers/chromadb/ChromaDBContainerTest.java
index 6cc01ac4d59..0ec6b00601c 100644
--- a/modules/chromadb/src/test/java/org/testcontainers/chromadb/ChromaDBContainerTest.java
+++ b/modules/chromadb/src/test/java/org/testcontainers/chromadb/ChromaDBContainerTest.java
@@ -27,4 +27,22 @@ public void test() {
given().baseUri(chroma.getEndpoint()).when().get("/api/v1/databases/test").then().statusCode(200);
}
}
+
+ @Test
+ public void testVersion2() {
+ try (ChromaDBContainer chroma = new ChromaDBContainer("chromadb/chroma:1.0.0")) {
+ chroma.start();
+
+ given()
+ .baseUri(chroma.getEndpoint())
+ .when()
+ .body("{\"name\": \"test\"}")
+ .contentType(ContentType.JSON)
+ .post("/api/v2/tenants")
+ .then()
+ .statusCode(200);
+
+ given().baseUri(chroma.getEndpoint()).when().get("/api/v2/tenants/test").then().statusCode(200);
+ }
+ }
}
diff --git a/modules/clickhouse/build.gradle b/modules/clickhouse/build.gradle
index 6dec2131bde..d366825e261 100644
--- a/modules/clickhouse/build.gradle
+++ b/modules/clickhouse/build.gradle
@@ -5,11 +5,14 @@ dependencies {
api project(':jdbc')
compileOnly project(':r2dbc')
- compileOnly(group: 'com.clickhouse', name: 'clickhouse-r2dbc', version: '0.7.0', classifier: 'http')
+ compileOnly(group: 'com.clickhouse', name: 'clickhouse-r2dbc', version: '0.7.2', classifier: 'http')
testImplementation project(':jdbc-test')
- testRuntimeOnly(group: 'com.clickhouse', name: 'clickhouse-jdbc', version: '0.7.0', classifier: 'http')
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testRuntimeOnly(group: 'com.clickhouse', name: 'clickhouse-jdbc', version: '0.7.2', classifier: 'http')
+ testRuntimeOnly(group: 'com.clickhouse', name: 'jdbc-v2', version: '0.7.2', classifier: 'http')
+
+ testImplementation 'org.apache.httpcomponents.client5:httpclient5:5.4.2'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
testImplementation testFixtures(project(':r2dbc'))
- testRuntimeOnly(group: 'com.clickhouse', name: 'clickhouse-r2dbc', version: '0.7.0', classifier: 'http')
+ testRuntimeOnly(group: 'com.clickhouse', name: 'clickhouse-r2dbc', version: '0.7.2', classifier: 'http')
}
diff --git a/modules/clickhouse/src/main/java/org/testcontainers/clickhouse/ClickHouseContainer.java b/modules/clickhouse/src/main/java/org/testcontainers/clickhouse/ClickHouseContainer.java
index 65cb2db5738..d3256124de1 100644
--- a/modules/clickhouse/src/main/java/org/testcontainers/clickhouse/ClickHouseContainer.java
+++ b/modules/clickhouse/src/main/java/org/testcontainers/clickhouse/ClickHouseContainer.java
@@ -21,8 +21,6 @@
*/
public class ClickHouseContainer extends JdbcDatabaseContainer {
- private static final String NAME = "clickhouse";
-
static final String CLICKHOUSE_CLICKHOUSE_SERVER = "clickhouse/clickhouse-server";
private static final DockerImageName CLICKHOUSE_IMAGE_NAME = DockerImageName.parse(CLICKHOUSE_CLICKHOUSE_SERVER);
@@ -31,15 +29,17 @@ public class ClickHouseContainer extends JdbcDatabaseContainer getLivenessCheckPortNumbers() {
@Override
public String getDriverClassName() {
- return DRIVER_CLASS_NAME;
+ try {
+ Class.forName(DRIVER_CLASS_NAME);
+ return DRIVER_CLASS_NAME;
+ } catch (ClassNotFoundException e) {
+ return LEGACY_V1_DRIVER_CLASS_NAME;
+ }
}
@Override
diff --git a/modules/clickhouse/src/main/java/org/testcontainers/containers/ClickHouseProvider.java b/modules/clickhouse/src/main/java/org/testcontainers/containers/ClickHouseProvider.java
index 80fb71bd5da..250631c1500 100644
--- a/modules/clickhouse/src/main/java/org/testcontainers/containers/ClickHouseProvider.java
+++ b/modules/clickhouse/src/main/java/org/testcontainers/containers/ClickHouseProvider.java
@@ -1,16 +1,24 @@
package org.testcontainers.containers;
+import org.testcontainers.clickhouse.ClickHouseContainer;
import org.testcontainers.utility.DockerImageName;
public class ClickHouseProvider extends JdbcDatabaseContainerProvider {
+ private static final String DEFAULT_TAG = "24.12-alpine";
+
@Override
public boolean supports(String databaseType) {
- return databaseType.equals(ClickHouseContainer.NAME);
+ return databaseType.equals("clickhouse");
+ }
+
+ @Override
+ public JdbcDatabaseContainer> newInstance() {
+ return newInstance(DEFAULT_TAG);
}
@Override
- public JdbcDatabaseContainer newInstance(String tag) {
- return new ClickHouseContainer(DockerImageName.parse(ClickHouseContainer.IMAGE).withTag(tag));
+ public JdbcDatabaseContainer> newInstance(String tag) {
+ return new ClickHouseContainer(DockerImageName.parse("clickhouse/clickhouse-server").withTag(tag));
}
}
diff --git a/modules/clickhouse/src/test/java/org/testcontainers/ClickhouseTestImages.java b/modules/clickhouse/src/test/java/org/testcontainers/ClickhouseTestImages.java
index f79fb24c7b4..eff4e19f70b 100644
--- a/modules/clickhouse/src/test/java/org/testcontainers/ClickhouseTestImages.java
+++ b/modules/clickhouse/src/test/java/org/testcontainers/ClickhouseTestImages.java
@@ -3,5 +3,7 @@
import org.testcontainers.utility.DockerImageName;
public interface ClickhouseTestImages {
- DockerImageName CLICKHOUSE_IMAGE = DockerImageName.parse("clickhouse/clickhouse-server:21.9.2-alpine");
+ DockerImageName CLICKHOUSE_IMAGE = DockerImageName.parse("clickhouse/clickhouse-server:21.11.11-alpine");
+
+ DockerImageName CLICKHOUSE_24_12_IMAGE = DockerImageName.parse("clickhouse/clickhouse-server:24.12-alpine");
}
diff --git a/modules/clickhouse/src/test/java/org/testcontainers/clickhouse/ClickHouseContainerTest.java b/modules/clickhouse/src/test/java/org/testcontainers/clickhouse/ClickHouseContainerTest.java
index e440af648af..d5389b610c4 100644
--- a/modules/clickhouse/src/test/java/org/testcontainers/clickhouse/ClickHouseContainerTest.java
+++ b/modules/clickhouse/src/test/java/org/testcontainers/clickhouse/ClickHouseContainerTest.java
@@ -1,6 +1,7 @@
package org.testcontainers.clickhouse;
import org.junit.Test;
+import org.testcontainers.ClickhouseTestImages;
import org.testcontainers.db.AbstractContainerDatabaseTest;
import java.sql.ResultSet;
@@ -12,7 +13,7 @@ public class ClickHouseContainerTest extends AbstractContainerDatabaseTest {
@Test
public void testSimple() throws SQLException {
- try (ClickHouseContainer clickhouse = new ClickHouseContainer("clickhouse/clickhouse-server:21.9.2-alpine")) {
+ try (ClickHouseContainer clickhouse = new ClickHouseContainer("clickhouse/clickhouse-server:21.11-alpine")) {
clickhouse.start();
ResultSet resultSet = performQuery(clickhouse, "SELECT 1");
@@ -25,11 +26,12 @@ public void testSimple() throws SQLException {
@Test
public void customCredentialsWithUrlParams() throws SQLException {
try (
- ClickHouseContainer clickhouse = new ClickHouseContainer("clickhouse/clickhouse-server:21.9.2-alpine")
- .withUsername("test")
- .withPassword("test")
+ ClickHouseContainer clickhouse = new ClickHouseContainer("clickhouse/clickhouse-server:21.11.2-alpine")
+ .withUsername("default")
+ .withPassword("")
.withDatabaseName("test")
- .withUrlParam("max_result_rows", "5")
+ // The new driver uses the prefix `clickhouse_setting_` for session settings
+ .withUrlParam("clickhouse_setting_max_result_rows", "5")
) {
clickhouse.start();
@@ -42,4 +44,16 @@ public void customCredentialsWithUrlParams() throws SQLException {
assertThat(resultSetInt).isEqualTo(5);
}
}
+
+ @Test
+ public void testNewAuth() throws SQLException {
+ try (ClickHouseContainer clickhouse = new ClickHouseContainer(ClickhouseTestImages.CLICKHOUSE_24_12_IMAGE)) {
+ clickhouse.start();
+
+ ResultSet resultSet = performQuery(clickhouse, "SELECT 1");
+
+ int resultSetInt = resultSet.getInt(1);
+ assertThat(resultSetInt).isEqualTo(1);
+ }
+ }
}
diff --git a/modules/clickhouse/src/test/java/org/testcontainers/clickhouse/ClickHouseR2DBCDatabaseContainerTest.java b/modules/clickhouse/src/test/java/org/testcontainers/clickhouse/ClickHouseR2DBCDatabaseContainerTest.java
index a0333e44f5c..000fff8983a 100644
--- a/modules/clickhouse/src/test/java/org/testcontainers/clickhouse/ClickHouseR2DBCDatabaseContainerTest.java
+++ b/modules/clickhouse/src/test/java/org/testcontainers/clickhouse/ClickHouseR2DBCDatabaseContainerTest.java
@@ -12,11 +12,11 @@ protected ConnectionFactoryOptions getOptions(ClickHouseContainer container) {
@Override
protected String createR2DBCUrl() {
- return "r2dbc:tc:clickhouse:///db?TC_IMAGE_TAG=21.9.2-alpine";
+ return "r2dbc:tc:clickhouse:///db?TC_IMAGE_TAG=21.11.11-alpine";
}
@Override
protected ClickHouseContainer createContainer() {
- return new ClickHouseContainer("clickhouse/clickhouse-server:21.9.2-alpine");
+ return new ClickHouseContainer("clickhouse/clickhouse-server:21.11.11-alpine");
}
}
diff --git a/modules/cockroachdb/build.gradle b/modules/cockroachdb/build.gradle
index fa1e7f06839..6340d918e7e 100644
--- a/modules/cockroachdb/build.gradle
+++ b/modules/cockroachdb/build.gradle
@@ -4,6 +4,12 @@ dependencies {
api project(':jdbc')
testImplementation project(':jdbc-test')
- testRuntimeOnly 'org.postgresql:postgresql:42.7.4'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testRuntimeOnly 'org.postgresql:postgresql:42.7.7'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.CockroachContainer"
+ ]
}
diff --git a/modules/cockroachdb/src/main/java/org/testcontainers/containers/CockroachContainer.java b/modules/cockroachdb/src/main/java/org/testcontainers/containers/CockroachContainer.java
index a7bb1fef7ed..f0c0cf123fb 100644
--- a/modules/cockroachdb/src/main/java/org/testcontainers/containers/CockroachContainer.java
+++ b/modules/cockroachdb/src/main/java/org/testcontainers/containers/CockroachContainer.java
@@ -52,14 +52,6 @@ public class CockroachContainer extends JdbcDatabaseContainer
* .withConsulCommand("secrets enable pki")
* .withConsulCommand("secrets enable transit")
diff --git a/modules/couchbase/build.gradle b/modules/couchbase/build.gradle
index 973af4b2ffb..47b8a28b8fa 100644
--- a/modules/couchbase/build.gradle
+++ b/modules/couchbase/build.gradle
@@ -5,7 +5,13 @@ dependencies {
// TODO use JDK's HTTP client and/or Apache HttpClient5
shaded 'com.squareup.okhttp3:okhttp:4.12.0'
- testImplementation 'com.couchbase.client:java-client:3.7.3'
- testImplementation 'org.awaitility:awaitility:4.2.0'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'com.couchbase.client:java-client:3.8.3'
+ testImplementation 'org.awaitility:awaitility:4.3.0'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.couchbase.CouchbaseContainer"
+ ]
}
diff --git a/modules/couchbase/src/main/java/org/testcontainers/couchbase/CouchbaseContainer.java b/modules/couchbase/src/main/java/org/testcontainers/couchbase/CouchbaseContainer.java
index 25e7637bbaf..14997d253af 100644
--- a/modules/couchbase/src/main/java/org/testcontainers/couchbase/CouchbaseContainer.java
+++ b/modules/couchbase/src/main/java/org/testcontainers/couchbase/CouchbaseContainer.java
@@ -92,8 +92,6 @@ public class CouchbaseContainer extends GenericContainer {
private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("couchbase/server");
- private static final String DEFAULT_TAG = "6.5.1";
-
private static final ObjectMapper MAPPER = new ObjectMapper();
private static final OkHttpClient HTTP_CLIENT = new OkHttpClient();
@@ -125,15 +123,6 @@ public class CouchbaseContainer extends GenericContainer {
private boolean hasTlsPorts = false;
- /**
- * Creates a new couchbase container with the default image and version.
- * @deprecated use {@link #CouchbaseContainer(DockerImageName)} instead
- */
- @Deprecated
- public CouchbaseContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
/**
* Creates a new couchbase container with the specified image name.
*
@@ -341,6 +330,13 @@ private void exposePorts() {
}
}
+ @Override
+ protected void containerIsStarting(InspectContainerResponse containerInfo, boolean reused) {
+ if (!reused) {
+ containerIsStarting(containerInfo);
+ }
+ }
+
@Override
protected void containerIsStarting(final InspectContainerResponse containerInfo) {
logger().debug("Couchbase container is starting, performing configuration.");
@@ -359,6 +355,13 @@ protected void containerIsStarting(final InspectContainerResponse containerInfo)
}
}
+ @Override
+ protected void containerIsStarted(InspectContainerResponse containerInfo, boolean reused) {
+ if (!reused) {
+ this.containerIsStarted(containerInfo);
+ }
+ }
+
@Override
protected void containerIsStarted(InspectContainerResponse containerInfo) {
timePhase("createBuckets", this::createBuckets);
diff --git a/modules/cratedb/build.gradle b/modules/cratedb/build.gradle
index 9e269731b4b..79ca4864f46 100644
--- a/modules/cratedb/build.gradle
+++ b/modules/cratedb/build.gradle
@@ -4,7 +4,7 @@ dependencies {
api project(':jdbc')
testImplementation project(':jdbc-test')
- testRuntimeOnly 'org.postgresql:postgresql:42.7.4'
+ testRuntimeOnly 'org.postgresql:postgresql:42.7.7'
- compileOnly 'org.jetbrains:annotations:24.1.0'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
}
diff --git a/modules/cratedb/src/test/java/org/testcontainers/junit/cratedb/SimpleCrateDBTest.java b/modules/cratedb/src/test/java/org/testcontainers/junit/cratedb/SimpleCrateDBTest.java
index 19ce83cd8ae..c36b1432100 100644
--- a/modules/cratedb/src/test/java/org/testcontainers/junit/cratedb/SimpleCrateDBTest.java
+++ b/modules/cratedb/src/test/java/org/testcontainers/junit/cratedb/SimpleCrateDBTest.java
@@ -40,7 +40,7 @@ public void testCommandOverride() throws SQLException {
ResultSet resultSet = performQuery(cratedb, "select name from sys.cluster");
String result = resultSet.getString(1);
- assertThat(result).as("cluster name should be overriden").isEqualTo("testcontainers");
+ assertThat(result).as("cluster name should be overridden").isEqualTo("testcontainers");
}
}
diff --git a/modules/database-commons/build.gradle b/modules/database-commons/build.gradle
index 2a1ead4c215..e6bb0697252 100644
--- a/modules/database-commons/build.gradle
+++ b/modules/database-commons/build.gradle
@@ -3,5 +3,5 @@ description = "Testcontainers :: Database-Commons"
dependencies {
api project(':testcontainers')
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
diff --git a/modules/databend/build.gradle b/modules/databend/build.gradle
index d3daa5575a2..8eac99519c4 100644
--- a/modules/databend/build.gradle
+++ b/modules/databend/build.gradle
@@ -4,6 +4,6 @@ dependencies {
api project(':jdbc')
testImplementation project(':jdbc-test')
- testRuntimeOnly 'com.databend:databend-jdbc:0.3.2'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testRuntimeOnly 'com.databend:databend-jdbc:0.3.9'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
diff --git a/modules/db2/build.gradle b/modules/db2/build.gradle
index 3a773f21534..0163eb17bd6 100644
--- a/modules/db2/build.gradle
+++ b/modules/db2/build.gradle
@@ -4,6 +4,12 @@ dependencies {
api project(':jdbc')
testImplementation project(':jdbc-test')
- testRuntimeOnly 'com.ibm.db2:jcc:11.5.9.0'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testRuntimeOnly 'com.ibm.db2:jcc:12.1.2.0'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.Db2Container"
+ ]
}
diff --git a/modules/db2/src/main/java/org/testcontainers/containers/Db2Container.java b/modules/db2/src/main/java/org/testcontainers/containers/Db2Container.java
index d04835cbc02..18fb4aac0e6 100644
--- a/modules/db2/src/main/java/org/testcontainers/containers/Db2Container.java
+++ b/modules/db2/src/main/java/org/testcontainers/containers/Db2Container.java
@@ -42,14 +42,6 @@ public class Db2Container extends JdbcDatabaseContainer {
private String password = "foobar1234";
- /**
- * @deprecated use {@link #Db2Container(DockerImageName)} instead
- */
- @Deprecated
- public Db2Container() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
public Db2Container(String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
@@ -79,7 +71,7 @@ protected Set getLivenessCheckPorts() {
@Override
protected void configure() {
- // If license was not accepted programatically, check if it was accepted via resource file
+ // If license was not accepted programmatically, check if it was accepted via resource file
if (!getEnvMap().containsKey("LICENSE")) {
LicenseAcceptance.assertLicenseAccepted(this.getDockerImageName());
acceptLicense();
diff --git a/modules/dynalite/build.gradle b/modules/dynalite/build.gradle
deleted file mode 100644
index d8fe2377b76..00000000000
--- a/modules/dynalite/build.gradle
+++ /dev/null
@@ -1,9 +0,0 @@
-description = "Testcontainers :: Dynalite (deprecated)"
-
-dependencies {
- api project(':testcontainers')
-
- compileOnly 'com.amazonaws:aws-java-sdk-dynamodb:1.12.772'
- testImplementation 'com.amazonaws:aws-java-sdk-dynamodb:1.12.772'
- testImplementation 'org.assertj:assertj-core:3.26.3'
-}
diff --git a/modules/dynalite/src/main/java/org/testcontainers/dynamodb/DynaliteContainer.java b/modules/dynalite/src/main/java/org/testcontainers/dynamodb/DynaliteContainer.java
deleted file mode 100644
index 2a73ce6f022..00000000000
--- a/modules/dynalite/src/main/java/org/testcontainers/dynamodb/DynaliteContainer.java
+++ /dev/null
@@ -1,79 +0,0 @@
-package org.testcontainers.dynamodb;
-
-import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.auth.AWSStaticCredentialsProvider;
-import com.amazonaws.auth.BasicAWSCredentials;
-import com.amazonaws.client.builder.AwsClientBuilder;
-import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
-import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
-import org.testcontainers.containers.GenericContainer;
-import org.testcontainers.utility.DockerImageName;
-
-/**
- * Container for Dynalite, a DynamoDB clone.
- *
- * @deprecated use localstack module instead
- */
-public class DynaliteContainer extends GenericContainer {
-
- private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("quay.io/testcontainers/dynalite");
-
- private static final String DEFAULT_TAG = "v1.2.1-1";
-
- private static final int MAPPED_PORT = 4567;
-
- /**
- * @deprecated use {@link #DynaliteContainer(DockerImageName)} instead
- */
- @Deprecated
- public DynaliteContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
- public DynaliteContainer(String dockerImageName) {
- this(DockerImageName.parse(dockerImageName));
- }
-
- public DynaliteContainer(final DockerImageName dockerImageName) {
- super(dockerImageName);
- dockerImageName.assertCompatibleWith(DEFAULT_IMAGE_NAME);
-
- withExposedPorts(MAPPED_PORT);
- }
-
- /**
- * Gets a preconfigured {@link AmazonDynamoDB} client object for connecting to this
- * container.
- *
- * @return preconfigured client
- */
- public AmazonDynamoDB getClient() {
- return AmazonDynamoDBClientBuilder
- .standard()
- .withEndpointConfiguration(getEndpointConfiguration())
- .withCredentials(getCredentials())
- .build();
- }
-
- /**
- * Gets {@link AwsClientBuilder.EndpointConfiguration}
- * that may be used to connect to this container.
- *
- * @return endpoint configuration
- */
- public AwsClientBuilder.EndpointConfiguration getEndpointConfiguration() {
- return new AwsClientBuilder.EndpointConfiguration(
- "http://" + this.getHost() + ":" + this.getMappedPort(MAPPED_PORT),
- null
- );
- }
-
- /**
- * Gets an {@link AWSCredentialsProvider} that may be used to connect to this container.
- *
- * @return dummy AWS credentials
- */
- public AWSCredentialsProvider getCredentials() {
- return new AWSStaticCredentialsProvider(new BasicAWSCredentials("dummy", "dummy"));
- }
-}
diff --git a/modules/dynalite/src/test/java/org/testcontainers/dynamodb/DynaliteContainerTest.java b/modules/dynalite/src/test/java/org/testcontainers/dynamodb/DynaliteContainerTest.java
deleted file mode 100644
index 2a4e94e9516..00000000000
--- a/modules/dynalite/src/test/java/org/testcontainers/dynamodb/DynaliteContainerTest.java
+++ /dev/null
@@ -1,64 +0,0 @@
-package org.testcontainers.dynamodb;
-
-import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
-import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
-import com.amazonaws.services.dynamodbv2.model.AttributeDefinition;
-import com.amazonaws.services.dynamodbv2.model.CreateTableRequest;
-import com.amazonaws.services.dynamodbv2.model.KeySchemaElement;
-import com.amazonaws.services.dynamodbv2.model.KeyType;
-import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughput;
-import com.amazonaws.services.dynamodbv2.model.ScalarAttributeType;
-import com.amazonaws.services.dynamodbv2.model.TableDescription;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.testcontainers.utility.DockerImageName;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-@Ignore("Image is not compatible with the latest Docker version provided by GH Actions")
-public class DynaliteContainerTest {
-
- private static final DockerImageName DYNALITE_IMAGE = DockerImageName.parse(
- "quay.io/testcontainers/dynalite:v1.2.1-1"
- );
-
- @Rule
- public DynaliteContainer dynamoDB = new DynaliteContainer(DYNALITE_IMAGE);
-
- @Test
- public void simpleTestWithManualClientCreation() {
- final AmazonDynamoDB client = AmazonDynamoDBClientBuilder
- .standard()
- .withEndpointConfiguration(dynamoDB.getEndpointConfiguration())
- .withCredentials(dynamoDB.getCredentials())
- .build();
-
- runTest(client);
- }
-
- @Test
- public void simpleTestWithProvidedClient() {
- final AmazonDynamoDB client = dynamoDB.getClient();
-
- runTest(client);
- }
-
- private void runTest(AmazonDynamoDB client) {
- CreateTableRequest request = new CreateTableRequest()
- .withAttributeDefinitions(new AttributeDefinition("Name", ScalarAttributeType.S))
- .withKeySchema(new KeySchemaElement("Name", KeyType.HASH))
- .withProvisionedThroughput(new ProvisionedThroughput(10L, 10L))
- .withTableName("foo");
-
- client.createTable(request);
-
- final TableDescription tableDescription = client.describeTable("foo").getTable();
-
- assertThat(tableDescription).as("the description is not null").isNotNull();
- assertThat(tableDescription.getTableName()).as("the table has the right name").isEqualTo("foo");
- assertThat(tableDescription.getKeySchema().get(0).getAttributeName())
- .as("the name has the right primary key")
- .isEqualTo("Name");
- }
-}
diff --git a/modules/elasticsearch/build.gradle b/modules/elasticsearch/build.gradle
index 79faefdde24..0370bab602f 100644
--- a/modules/elasticsearch/build.gradle
+++ b/modules/elasticsearch/build.gradle
@@ -2,7 +2,13 @@ description = "Testcontainers :: elasticsearch"
dependencies {
api project(':testcontainers')
- testImplementation "org.elasticsearch.client:elasticsearch-rest-client:8.15.1"
- testImplementation "org.elasticsearch.client:transport:7.17.24"
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation "org.elasticsearch.client:elasticsearch-rest-client:9.0.3"
+ testImplementation "org.elasticsearch.client:transport:7.17.29"
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.elasticsearch.ElasticsearchContainer"
+ ]
}
diff --git a/modules/elasticsearch/src/main/java/org/testcontainers/elasticsearch/ElasticsearchContainer.java b/modules/elasticsearch/src/main/java/org/testcontainers/elasticsearch/ElasticsearchContainer.java
index 99f8cd75b4f..54f1a4c17f8 100644
--- a/modules/elasticsearch/src/main/java/org/testcontainers/elasticsearch/ElasticsearchContainer.java
+++ b/modules/elasticsearch/src/main/java/org/testcontainers/elasticsearch/ElasticsearchContainer.java
@@ -68,12 +68,6 @@ public class ElasticsearchContainer extends GenericContainer= 8
private static final String DEFAULT_CERT_PATH = "/usr/share/elasticsearch/config/certs/http_ca.crt";
- /**
- * Elasticsearch Default version
- */
- @Deprecated
- protected static final String DEFAULT_TAG = "7.9.2";
-
@Deprecated
private boolean isOss = false;
@@ -81,14 +75,6 @@ public class ElasticsearchContainer extends GenericContainer e.contains("--database-mode datastore-mode"));
+ }
+ }
}
diff --git a/modules/grafana/build.gradle b/modules/grafana/build.gradle
index 72082e4cfec..d4dbe68355d 100644
--- a/modules/grafana/build.gradle
+++ b/modules/grafana/build.gradle
@@ -3,8 +3,13 @@ description = "Testcontainers :: Grafana"
dependencies {
api project(':testcontainers')
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'io.rest-assured:rest-assured:5.5.0'
- testImplementation 'io.micrometer:micrometer-registry-otlp:1.13.4'
- testImplementation 'uk.org.webcompere:system-stubs-junit4:2.1.6'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'io.rest-assured:rest-assured:5.5.5'
+ testImplementation 'io.micrometer:micrometer-registry-otlp:1.15.1'
+ testImplementation 'uk.org.webcompere:system-stubs-junit4:2.1.8'
+
+ testImplementation platform('io.opentelemetry:opentelemetry-bom:1.51.0')
+ testImplementation 'io.opentelemetry:opentelemetry-api'
+ testImplementation 'io.opentelemetry:opentelemetry-sdk'
+ testImplementation 'io.opentelemetry:opentelemetry-exporter-otlp'
}
diff --git a/modules/grafana/src/main/java/org/testcontainers/grafana/LgtmStackContainer.java b/modules/grafana/src/main/java/org/testcontainers/grafana/LgtmStackContainer.java
index 080be6d6eaf..52443d7ba8a 100644
--- a/modules/grafana/src/main/java/org/testcontainers/grafana/LgtmStackContainer.java
+++ b/modules/grafana/src/main/java/org/testcontainers/grafana/LgtmStackContainer.java
@@ -14,6 +14,7 @@
* Exposed ports:
*
* - Grafana: 3000
+ * - Tempo: 3200
* - OTel Http: 4317
* - OTel Grpc: 4318
* - Prometheus: 9090
@@ -30,6 +31,10 @@ public class LgtmStackContainer extends GenericContainer {
private static final int OTLP_HTTP_PORT = 4318;
+ private static final int LOKI_PORT = 3100;
+
+ private static final int TEMPO_PORT = 3200;
+
private static final int PROMETHEUS_PORT = 9090;
public LgtmStackContainer(String image) {
@@ -39,7 +44,7 @@ public LgtmStackContainer(String image) {
public LgtmStackContainer(DockerImageName image) {
super(image);
image.assertCompatibleWith(DEFAULT_IMAGE_NAME);
- withExposedPorts(GRAFANA_PORT, OTLP_GRPC_PORT, OTLP_HTTP_PORT, PROMETHEUS_PORT);
+ withExposedPorts(GRAFANA_PORT, TEMPO_PORT, LOKI_PORT, OTLP_GRPC_PORT, OTLP_HTTP_PORT, PROMETHEUS_PORT);
waitingFor(
Wait.forLogMessage(".*The OpenTelemetry collector and the Grafana LGTM stack are up and running.*\\s", 1)
);
@@ -54,11 +59,19 @@ public String getOtlpGrpcUrl() {
return "http://" + getHost() + ":" + getMappedPort(OTLP_GRPC_PORT);
}
+ public String getTempoUrl() {
+ return "http://" + getHost() + ":" + getMappedPort(TEMPO_PORT);
+ }
+
+ public String getLokiUrl() {
+ return "http://" + getHost() + ":" + getMappedPort(LOKI_PORT);
+ }
+
public String getOtlpHttpUrl() {
return "http://" + getHost() + ":" + getMappedPort(OTLP_HTTP_PORT);
}
- public String getPromehteusHttpUrl() {
+ public String getPrometheusHttpUrl() {
return "http://" + getHost() + ":" + getMappedPort(PROMETHEUS_PORT);
}
diff --git a/modules/grafana/src/test/java/org/testcontainers/grafana/LgtmStackContainerTest.java b/modules/grafana/src/test/java/org/testcontainers/grafana/LgtmStackContainerTest.java
index cb5dbaa630b..ba4fb94856c 100644
--- a/modules/grafana/src/test/java/org/testcontainers/grafana/LgtmStackContainerTest.java
+++ b/modules/grafana/src/test/java/org/testcontainers/grafana/LgtmStackContainerTest.java
@@ -5,6 +5,19 @@
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.registry.otlp.OtlpConfig;
import io.micrometer.registry.otlp.OtlpMeterRegistry;
+import io.opentelemetry.api.common.AttributeKey;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.logs.Logger;
+import io.opentelemetry.api.trace.Span;
+import io.opentelemetry.api.trace.Tracer;
+import io.opentelemetry.exporter.otlp.logs.OtlpGrpcLogRecordExporter;
+import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporter;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.logs.SdkLoggerProvider;
+import io.opentelemetry.sdk.logs.export.SimpleLogRecordProcessor;
+import io.opentelemetry.sdk.resources.Resource;
+import io.opentelemetry.sdk.trace.SdkTracerProvider;
+import io.opentelemetry.sdk.trace.export.BatchSpanProcessor;
import io.restassured.RestAssured;
import io.restassured.response.Response;
import org.awaitility.Awaitility;
@@ -12,24 +25,59 @@
import uk.org.webcompere.systemstubs.SystemStubs;
import java.time.Duration;
+import java.util.concurrent.TimeUnit;
import static org.assertj.core.api.Assertions.assertThat;
public class LgtmStackContainerTest {
@Test
- public void shouldPublishMetric() throws Exception {
+ public void shouldPublishMetricsTracesAndLogs() throws Exception {
try ( // container {
- LgtmStackContainer lgtm = new LgtmStackContainer("grafana/otel-lgtm:0.6.0")
+ LgtmStackContainer lgtm = new LgtmStackContainer("grafana/otel-lgtm:0.11.1")
// }
) {
lgtm.start();
+ OtlpGrpcSpanExporter spanExporter = OtlpGrpcSpanExporter
+ .builder()
+ .setTimeout(Duration.ofSeconds(1))
+ .setEndpoint(lgtm.getOtlpGrpcUrl())
+ .build();
+
+ OtlpGrpcLogRecordExporter logExporter = OtlpGrpcLogRecordExporter
+ .builder()
+ .setTimeout(Duration.ofSeconds(1))
+ .setEndpoint(lgtm.getOtlpGrpcUrl())
+ .build();
+
+ BatchSpanProcessor spanProcessor = BatchSpanProcessor
+ .builder(spanExporter)
+ .setScheduleDelay(500, TimeUnit.MILLISECONDS)
+ .build();
+
+ SdkTracerProvider tracerProvider = SdkTracerProvider
+ .builder()
+ .addSpanProcessor(spanProcessor)
+ .setResource(Resource.create(Attributes.of(AttributeKey.stringKey("service.name"), "test-service")))
+ .build();
+
+ SdkLoggerProvider loggerProvider = SdkLoggerProvider
+ .builder()
+ .addLogRecordProcessor(SimpleLogRecordProcessor.create(logExporter))
+ .build();
+
+ OpenTelemetrySdk openTelemetry = OpenTelemetrySdk
+ .builder()
+ .setTracerProvider(tracerProvider)
+ .setLoggerProvider(loggerProvider)
+ .build();
+
String version = RestAssured
.get(String.format("http://%s:%s/api/health", lgtm.getHost(), lgtm.getMappedPort(3000)))
.jsonPath()
.get("version");
- assertThat(version).isEqualTo("11.0.0");
+ assertThat(version).isEqualTo("12.0.0");
OtlpConfig otlpConfig = createOtlpConfig(lgtm);
MeterRegistry meterRegistry = SystemStubs
@@ -37,21 +85,53 @@ public void shouldPublishMetric() throws Exception {
.execute(() -> new OtlpMeterRegistry(otlpConfig, Clock.SYSTEM));
Counter.builder("test.counter").register(meterRegistry).increment(2);
+ Logger logger = openTelemetry.getSdkLoggerProvider().loggerBuilder("test").build();
+ logger
+ .logRecordBuilder()
+ .setBody("Test log!")
+ .setAttribute(AttributeKey.stringKey("job"), "test-job")
+ .emit();
+
+ Tracer tracer = openTelemetry.getTracer("test");
+ Span span = tracer.spanBuilder("test").startSpan();
+ span.end();
+
Awaitility
.given()
.pollInterval(Duration.ofSeconds(2))
.atMost(Duration.ofSeconds(5))
.ignoreExceptions()
.untilAsserted(() -> {
- Response response = RestAssured
+ Response metricResponse = RestAssured
.given()
.queryParam("query", "test_counter_total{job=\"testcontainers\"}")
- .get(String.format("%s/api/v1/query", lgtm.getPromehteusHttpUrl()))
+ .get(String.format("%s/api/v1/query", lgtm.getPrometheusHttpUrl()))
.prettyPeek()
.thenReturn();
- assertThat(response.getStatusCode()).isEqualTo(200);
- assertThat(response.body().jsonPath().getList("data.result[0].value")).contains("2");
+ assertThat(metricResponse.getStatusCode()).isEqualTo(200);
+ assertThat(metricResponse.body().jsonPath().getList("data.result[0].value")).contains("2");
+
+ Response logResponse = RestAssured
+ .given()
+ .queryParam("query", "{service_name=\"unknown_service:java\"}")
+ .get(String.format("%s/loki/api/v1/query_range", lgtm.getLokiUrl()))
+ .prettyPeek()
+ .thenReturn();
+ assertThat(logResponse.getStatusCode()).isEqualTo(200);
+ assertThat(logResponse.body().jsonPath().getString("data.result[0].values[0][1]"))
+ .isEqualTo("Test log!");
+
+ Response traceResponse = RestAssured
+ .given()
+ .get(String.format("%s/api/search", lgtm.getTempoUrl()))
+ .prettyPeek()
+ .thenReturn();
+ assertThat(traceResponse.getStatusCode()).isEqualTo(200);
+ assertThat(traceResponse.body().jsonPath().getString("traces[0].rootServiceName"))
+ .isEqualTo("test-service");
});
+
+ openTelemetry.close();
}
}
diff --git a/modules/hivemq/build.gradle b/modules/hivemq/build.gradle
index 1c748a8d2e3..898f0ba5639 100644
--- a/modules/hivemq/build.gradle
+++ b/modules/hivemq/build.gradle
@@ -2,22 +2,23 @@ description = "Testcontainers :: HiveMQ"
dependencies {
api(project(":testcontainers"))
- api("org.jetbrains:annotations:24.1.0")
+ api("org.jetbrains:annotations:26.0.2")
shaded("org.apache.commons:commons-lang3:3.17.0")
- shaded("commons-io:commons-io:2.17.0")
+ shaded("commons-io:commons-io:2.19.0")
shaded("org.javassist:javassist:3.30.2-GA")
shaded("org.jboss.shrinkwrap:shrinkwrap-api:1.2.6")
shaded("org.jboss.shrinkwrap:shrinkwrap-impl-base:1.2.6")
shaded("net.lingala.zip4j:zip4j:2.11.5")
- testImplementation("org.junit.jupiter:junit-jupiter:5.10.3")
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.10.3'
testImplementation(project(":junit-jupiter"))
- testImplementation("com.hivemq:hivemq-extension-sdk:4.32.0")
- testImplementation("com.hivemq:hivemq-mqtt-client:1.3.3")
+ testImplementation("com.hivemq:hivemq-extension-sdk:4.41.0")
+ testImplementation("com.hivemq:hivemq-mqtt-client:1.3.7")
testImplementation("org.apache.httpcomponents:httpclient:4.5.14")
- testImplementation("ch.qos.logback:logback-classic:1.5.8")
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation("ch.qos.logback:logback-classic:1.5.18")
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
test {
diff --git a/modules/influxdb/build.gradle b/modules/influxdb/build.gradle
index 9cf1e8d5ab0..7a5aa985beb 100644
--- a/modules/influxdb/build.gradle
+++ b/modules/influxdb/build.gradle
@@ -3,9 +3,15 @@ description = "Testcontainers :: InfluxDB"
dependencies {
api project(':testcontainers')
- compileOnly 'org.influxdb:influxdb-java:2.24'
+ compileOnly 'org.influxdb:influxdb-java:2.25'
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'org.influxdb:influxdb-java:2.24'
- testImplementation "com.influxdb:influxdb-client-java:6.12.0"
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'org.influxdb:influxdb-java:2.25'
+ testImplementation "com.influxdb:influxdb-client-java:7.3.0"
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.InfluxDBContainer"
+ ]
}
diff --git a/modules/influxdb/src/main/java/org/testcontainers/containers/InfluxDBContainer.java b/modules/influxdb/src/main/java/org/testcontainers/containers/InfluxDBContainer.java
index 8e908e8e229..f54971dfc7a 100644
--- a/modules/influxdb/src/main/java/org/testcontainers/containers/InfluxDBContainer.java
+++ b/modules/influxdb/src/main/java/org/testcontainers/containers/InfluxDBContainer.java
@@ -24,11 +24,6 @@ public class InfluxDBContainer> extends Gen
private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("influxdb");
- private static final String DEFAULT_TAG = "1.4.3";
-
- @Deprecated
- public static final String VERSION = DEFAULT_TAG;
-
private static final int NO_CONTENT_STATUS_CODE = 204;
@Getter
@@ -66,14 +61,6 @@ public class InfluxDBContainer> extends Gen
private final boolean isAtLeastMajorVersion2;
- /**
- * @deprecated use {@link #InfluxDBContainer(DockerImageName)} instead
- */
- @Deprecated
- public InfluxDBContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
/**
* @deprecated use {@link #InfluxDBContainer(DockerImageName)} instead
*/
diff --git a/modules/jdbc-test/build.gradle b/modules/jdbc-test/build.gradle
index a4c42db38ce..507313e9ad2 100644
--- a/modules/jdbc-test/build.gradle
+++ b/modules/jdbc-test/build.gradle
@@ -1,16 +1,16 @@
dependencies {
api project(':jdbc')
- api 'com.google.guava:guava:33.3.1-jre'
+ api 'com.google.guava:guava:33.4.8-jre'
api 'org.apache.commons:commons-lang3:3.17.0'
api 'com.zaxxer:HikariCP-java6:2.3.13'
api 'commons-dbutils:commons-dbutils:1.8.1'
api 'com.googlecode.junit-toolbox:junit-toolbox:2.4'
- api 'org.assertj:assertj-core:3.26.3'
+ api 'org.assertj:assertj-core:3.27.3'
- api 'org.apache.tomcat:tomcat-jdbc:10.0.27'
- api 'org.vibur:vibur-dbcp:25.0'
+ api 'org.apache.tomcat:tomcat-jdbc:11.0.9'
+ api 'org.vibur:vibur-dbcp:26.0'
api 'mysql:mysql-connector-java:8.0.33'
}
diff --git a/modules/jdbc/build.gradle b/modules/jdbc/build.gradle
index 2b2868ebbc6..c343cd17a4a 100644
--- a/modules/jdbc/build.gradle
+++ b/modules/jdbc/build.gradle
@@ -3,12 +3,12 @@ description = "Testcontainers :: JDBC"
dependencies {
api project(':database-commons')
- compileOnly 'org.jetbrains:annotations:24.1.0'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
testImplementation 'commons-dbutils:commons-dbutils:1.8.1'
- testImplementation 'org.vibur:vibur-dbcp:25.0'
- testImplementation 'org.apache.tomcat:tomcat-jdbc:10.1.30'
+ testImplementation 'org.vibur:vibur-dbcp:26.0'
+ testImplementation 'org.apache.tomcat:tomcat-jdbc:11.0.9'
testImplementation 'com.zaxxer:HikariCP-java6:2.3.13'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
testImplementation ('org.mockito:mockito-core:4.11.0') {
exclude(module: 'hamcrest-core')
}
diff --git a/modules/jdbc/src/main/java/org/testcontainers/jdbc/JdbcDatabaseDelegate.java b/modules/jdbc/src/main/java/org/testcontainers/jdbc/JdbcDatabaseDelegate.java
index 3c33eba8d5b..24c18944292 100644
--- a/modules/jdbc/src/main/java/org/testcontainers/jdbc/JdbcDatabaseDelegate.java
+++ b/modules/jdbc/src/main/java/org/testcontainers/jdbc/JdbcDatabaseDelegate.java
@@ -6,6 +6,7 @@
import org.testcontainers.exception.ConnectionCreationException;
import org.testcontainers.ext.ScriptUtils;
+import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
@@ -17,6 +18,8 @@ public class JdbcDatabaseDelegate extends AbstractDatabaseDelegate {
private JdbcDatabaseContainer container;
+ private Connection connection;
+
private String queryString;
public JdbcDatabaseDelegate(JdbcDatabaseContainer container, String queryString) {
@@ -27,7 +30,8 @@ public JdbcDatabaseDelegate(JdbcDatabaseContainer container, String queryString)
@Override
protected Statement createNewConnection() {
try {
- return container.createConnection(queryString).createStatement();
+ connection = container.createConnection(queryString);
+ return connection.createStatement();
} catch (SQLException e) {
log.error("Could not obtain JDBC connection");
throw new ConnectionCreationException("Could not obtain JDBC connection", e);
@@ -65,6 +69,7 @@ public void execute(
protected void closeConnectionQuietly(Statement statement) {
try {
statement.close();
+ connection.close();
} catch (Exception e) {
log.error("Could not close JDBC connection", e);
}
diff --git a/modules/jdbc/src/test/java/org/testcontainers/jdbc/JdbcDatabaseDelegateTest.java b/modules/jdbc/src/test/java/org/testcontainers/jdbc/JdbcDatabaseDelegateTest.java
new file mode 100644
index 00000000000..f3cf2d01d40
--- /dev/null
+++ b/modules/jdbc/src/test/java/org/testcontainers/jdbc/JdbcDatabaseDelegateTest.java
@@ -0,0 +1,87 @@
+package org.testcontainers.jdbc;
+
+import lombok.NonNull;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.testcontainers.containers.JdbcDatabaseContainer;
+import org.testcontainers.utility.DockerImageName;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class JdbcDatabaseDelegateTest {
+
+ @Test
+ public void testLeakedConnections() {
+ final JdbcDatabaseContainerStub stub = new JdbcDatabaseContainerStub(DockerImageName.parse("something"));
+ try (JdbcDatabaseDelegate delegate = new JdbcDatabaseDelegate(stub, "")) {
+ delegate.execute("foo", null, 0, false, false);
+ }
+ Assert.assertEquals(0, stub.openConnectionsList.size());
+ }
+
+ static class JdbcDatabaseContainerStub extends JdbcDatabaseContainer {
+
+ List openConnectionsList = new ArrayList<>();
+
+ public JdbcDatabaseContainerStub(@NonNull DockerImageName dockerImageName) {
+ super(dockerImageName);
+ }
+
+ @Override
+ public String getDriverClassName() {
+ return null;
+ }
+
+ @Override
+ public String getJdbcUrl() {
+ return null;
+ }
+
+ @Override
+ public String getUsername() {
+ return null;
+ }
+
+ @Override
+ public String getPassword() {
+ return null;
+ }
+
+ @Override
+ protected String getTestQueryString() {
+ return null;
+ }
+
+ @Override
+ public boolean isRunning() {
+ return true;
+ }
+
+ @Override
+ public Connection createConnection(String queryString) throws NoDriverFoundException, SQLException {
+ final Connection connection = mock(Connection.class);
+ openConnectionsList.add(connection);
+ when(connection.createStatement()).thenReturn(mock(Statement.class));
+ connection.close();
+ Mockito.doAnswer(ignore -> openConnectionsList.remove(connection)).when(connection).close();
+ return connection;
+ }
+
+ @Override
+ protected Logger logger() {
+ return mock(Logger.class);
+ }
+
+ @Override
+ public void setDockerImageName(@NonNull String dockerImageName) {}
+ }
+}
diff --git a/modules/junit-jupiter/build.gradle b/modules/junit-jupiter/build.gradle
index 0351a065c34..99ac3119075 100644
--- a/modules/junit-jupiter/build.gradle
+++ b/modules/junit-jupiter/build.gradle
@@ -2,21 +2,22 @@ description = "Testcontainers :: JUnit Jupiter Extension"
dependencies {
api project(':testcontainers')
- implementation platform('org.junit:junit-bom:5.10.3')
+ implementation platform('org.junit:junit-bom:5.13.3')
implementation 'org.junit.jupiter:junit-jupiter-api'
testImplementation project(':mysql')
testImplementation project(':postgresql')
- testImplementation 'com.zaxxer:HikariCP:4.0.3'
- testImplementation 'redis.clients:jedis:5.1.5'
+ testImplementation 'com.zaxxer:HikariCP:6.3.0'
+ testImplementation 'redis.clients:jedis:6.0.0'
testImplementation 'org.apache.httpcomponents:httpclient:4.5.14'
testImplementation ('org.mockito:mockito-core:4.11.0') {
exclude(module: 'hamcrest-core')
}
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
testImplementation 'org.junit.jupiter:junit-jupiter'
- testRuntimeOnly 'org.postgresql:postgresql:42.7.4'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.10.3'
+ testRuntimeOnly 'org.postgresql:postgresql:42.7.7'
testRuntimeOnly 'mysql:mysql-connector-java:8.0.33'
}
diff --git a/modules/junit-jupiter/src/test/java/org/testcontainers/junit/jupiter/MixedLifecycleTests.java b/modules/junit-jupiter/src/test/java/org/testcontainers/junit/jupiter/MixedLifecycleTests.java
index e8ce714ab28..7dcd769bf08 100644
--- a/modules/junit-jupiter/src/test/java/org/testcontainers/junit/jupiter/MixedLifecycleTests.java
+++ b/modules/junit-jupiter/src/test/java/org/testcontainers/junit/jupiter/MixedLifecycleTests.java
@@ -12,11 +12,11 @@ class MixedLifecycleTests {
// will be shared between test methods
@Container
- private static final MySQLContainer MY_SQL_CONTAINER = new MySQLContainer();
+ private static final MySQLContainer MY_SQL_CONTAINER = new MySQLContainer("mysql:8.0.36");
// will be started before and stopped after each test method
@Container
- private PostgreSQLContainer postgresqlContainer = new PostgreSQLContainer()
+ private PostgreSQLContainer postgresqlContainer = new PostgreSQLContainer("postgres:9.6.12")
.withDatabaseName("foo")
.withUsername("foo")
.withPassword("secret");
diff --git a/modules/k3s/build.gradle b/modules/k3s/build.gradle
index 3f795d99743..fd52f632cae 100644
--- a/modules/k3s/build.gradle
+++ b/modules/k3s/build.gradle
@@ -3,12 +3,10 @@ description = "Testcontainers :: K3S"
dependencies {
api project(":testcontainers")
- // https://youtu.be/otCpCn0l4Wo
- // The core module depends on jackson-databind 2.8.x for backward compatibility.
- // Any >2.8 version here is not compatible with jackson-databind 2.8.x.
- shaded 'com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.8.8'
+ // Synchronize with the jackson version, must match major and minor version
+ shaded 'com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.18.4'
- testImplementation 'io.fabric8:kubernetes-client:6.13.1'
- testImplementation 'io.kubernetes:client-java:21.0.1-legacy'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'io.fabric8:kubernetes-client:7.3.1'
+ testImplementation 'io.kubernetes:client-java:24.0.0-legacy'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
diff --git a/modules/k6/build.gradle b/modules/k6/build.gradle
index 5bf8d773da0..0a027d12d55 100644
--- a/modules/k6/build.gradle
+++ b/modules/k6/build.gradle
@@ -3,5 +3,5 @@ description = "Testcontainers :: k6"
dependencies {
api project(':testcontainers')
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
diff --git a/modules/kafka/build.gradle b/modules/kafka/build.gradle
index 5f0658a51d0..d4fb383c3f3 100644
--- a/modules/kafka/build.gradle
+++ b/modules/kafka/build.gradle
@@ -4,7 +4,7 @@ dependencies {
api project(':testcontainers')
testImplementation 'org.apache.kafka:kafka-clients:3.8.0'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
testImplementation 'com.google.guava:guava:23.0'
- testImplementation 'org.awaitility:awaitility:4.2.2'
+ testImplementation 'org.awaitility:awaitility:4.3.0'
}
diff --git a/modules/kafka/src/main/java/org/testcontainers/kafka/ConfluentKafkaContainer.java b/modules/kafka/src/main/java/org/testcontainers/kafka/ConfluentKafkaContainer.java
index 8c2cce855bc..42942cdd3a5 100644
--- a/modules/kafka/src/main/java/org/testcontainers/kafka/ConfluentKafkaContainer.java
+++ b/modules/kafka/src/main/java/org/testcontainers/kafka/ConfluentKafkaContainer.java
@@ -44,9 +44,6 @@ public ConfluentKafkaContainer(DockerImageName dockerImageName) {
@Override
protected void configure() {
KafkaHelper.resolveListeners(this, this.listeners);
-
- String controllerQuorumVoters = String.format("%s@localhost:9094", getEnvMap().get("KAFKA_NODE_ID"));
- withEnv("KAFKA_CONTROLLER_QUORUM_VOTERS", controllerQuorumVoters);
}
@Override
diff --git a/modules/kafka/src/main/java/org/testcontainers/kafka/KafkaContainer.java b/modules/kafka/src/main/java/org/testcontainers/kafka/KafkaContainer.java
index 63e3a0d35cf..e946e0a8992 100644
--- a/modules/kafka/src/main/java/org/testcontainers/kafka/KafkaContainer.java
+++ b/modules/kafka/src/main/java/org/testcontainers/kafka/KafkaContainer.java
@@ -2,7 +2,6 @@
import com.github.dockerjava.api.command.InspectContainerResponse;
import org.testcontainers.containers.GenericContainer;
-import org.testcontainers.containers.wait.strategy.Wait;
import org.testcontainers.images.builder.Transferable;
import org.testcontainers.utility.DockerImageName;
@@ -44,16 +43,13 @@ public KafkaContainer(DockerImageName dockerImageName) {
withExposedPorts(KAFKA_PORT);
withEnv(KafkaHelper.envVars());
- withCommand("sh", "-c", "while [ ! -f " + STARTER_SCRIPT + " ]; do sleep 0.1; done; " + STARTER_SCRIPT);
- waitingFor(Wait.forLogMessage(".*Transitioning from RECOVERY to RUNNING.*", 1));
+ withCommand(KafkaHelper.COMMAND);
+ waitingFor(KafkaHelper.WAIT_STRATEGY);
}
@Override
protected void configure() {
KafkaHelper.resolveListeners(this, this.listeners);
-
- String controllerQuorumVoters = String.format("%s@localhost:9094", getEnvMap().get("KAFKA_NODE_ID"));
- withEnv("KAFKA_CONTROLLER_QUORUM_VOTERS", controllerQuorumVoters);
}
@Override
diff --git a/modules/kafka/src/main/java/org/testcontainers/kafka/KafkaHelper.java b/modules/kafka/src/main/java/org/testcontainers/kafka/KafkaHelper.java
index 00b10a4d519..61e790d474f 100644
--- a/modules/kafka/src/main/java/org/testcontainers/kafka/KafkaHelper.java
+++ b/modules/kafka/src/main/java/org/testcontainers/kafka/KafkaHelper.java
@@ -50,6 +50,10 @@ static Map envVars() {
envVars.put("KAFKA_CONTROLLER_LISTENER_NAMES", "CONTROLLER");
envVars.put("KAFKA_NODE_ID", "1");
+
+ String controllerQuorumVoters = String.format("%s@localhost:9094", envVars.get("KAFKA_NODE_ID"));
+ envVars.put("KAFKA_CONTROLLER_QUORUM_VOTERS", controllerQuorumVoters);
+
envVars.put("KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", DEFAULT_INTERNAL_TOPIC_RF);
envVars.put("KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS", DEFAULT_INTERNAL_TOPIC_RF);
envVars.put("KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", DEFAULT_INTERNAL_TOPIC_RF);
diff --git a/modules/kafka/src/test/java/org/testcontainers/KCatContainer.java b/modules/kafka/src/test/java/org/testcontainers/KCatContainer.java
index 84ad97b52b7..79532ae9f22 100644
--- a/modules/kafka/src/test/java/org/testcontainers/KCatContainer.java
+++ b/modules/kafka/src/test/java/org/testcontainers/KCatContainer.java
@@ -6,7 +6,7 @@
public class KCatContainer extends GenericContainer {
public KCatContainer() {
- super("confluentinc/cp-kcat:7.4.1");
+ super("confluentinc/cp-kcat:7.9.0");
withCreateContainerCmdModifier(cmd -> {
cmd.withEntrypoint("sh");
});
diff --git a/modules/kafka/src/test/java/org/testcontainers/containers/KafkaContainerTest.java b/modules/kafka/src/test/java/org/testcontainers/containers/KafkaContainerTest.java
index 6d3578f7833..49b21108da8 100644
--- a/modules/kafka/src/test/java/org/testcontainers/containers/KafkaContainerTest.java
+++ b/modules/kafka/src/test/java/org/testcontainers/containers/KafkaContainerTest.java
@@ -189,13 +189,11 @@ public void testKraftPrecedenceOverEmbeddedZookeeper() throws Exception {
public void testUsageWithListener() throws Exception {
try (
Network network = Network.newNetwork();
- // registerListener {
KafkaContainer kafka = new KafkaContainer(KAFKA_KRAFT_TEST_IMAGE)
.withListener(() -> "kafka:19092")
.withNetwork(network);
- // }
// createKCatContainer {
- GenericContainer> kcat = new GenericContainer<>("confluentinc/cp-kcat:7.4.1")
+ GenericContainer> kcat = new GenericContainer<>("confluentinc/cp-kcat:7.9.0")
.withCreateContainerCmdModifier(cmd -> {
cmd.withEntrypoint("sh");
})
diff --git a/modules/kafka/src/test/java/org/testcontainers/kafka/KafkaContainerTest.java b/modules/kafka/src/test/java/org/testcontainers/kafka/KafkaContainerTest.java
index b1764d3b522..e81b52574ef 100644
--- a/modules/kafka/src/test/java/org/testcontainers/kafka/KafkaContainerTest.java
+++ b/modules/kafka/src/test/java/org/testcontainers/kafka/KafkaContainerTest.java
@@ -25,9 +25,11 @@ public void testUsage() throws Exception {
public void testUsageWithListener() throws Exception {
try (
Network network = Network.newNetwork();
+ // registerListener {
KafkaContainer kafka = new KafkaContainer("apache/kafka-native:3.8.0")
.withListener("kafka:19092")
.withNetwork(network);
+ // }
KCatContainer kcat = new KCatContainer().withNetwork(network)
) {
kafka.start();
diff --git a/modules/ldap/build.gradle b/modules/ldap/build.gradle
new file mode 100644
index 00000000000..5045bf79266
--- /dev/null
+++ b/modules/ldap/build.gradle
@@ -0,0 +1,8 @@
+description = "Testcontainers :: LDAP"
+
+dependencies {
+ api project(':testcontainers')
+
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'com.unboundid:unboundid-ldapsdk:7.0.3'
+}
diff --git a/modules/ldap/src/main/java/org/testcontainers/ldap/LLdapContainer.java b/modules/ldap/src/main/java/org/testcontainers/ldap/LLdapContainer.java
new file mode 100644
index 00000000000..c811bffa1ec
--- /dev/null
+++ b/modules/ldap/src/main/java/org/testcontainers/ldap/LLdapContainer.java
@@ -0,0 +1,90 @@
+package org.testcontainers.ldap;
+
+import com.github.dockerjava.api.command.InspectContainerResponse;
+import lombok.extern.slf4j.Slf4j;
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.containers.wait.strategy.Wait;
+import org.testcontainers.utility.DockerImageName;
+
+/**
+ * Testcontainers implementation for LLDAP.
+ *
+ * Supported image: {@code lldap/lldap}
+ *
+ * Exposed ports:
+ *
+ * - LDAP: 3890
+ * - UI: 17170
+ *
+ */
+@Slf4j
+public class LLdapContainer extends GenericContainer {
+
+ private static final String IMAGE_VERSION = "lldap/lldap";
+
+ private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse(IMAGE_VERSION);
+
+ private static final int LDAP_PORT = 3890;
+
+ private static final int LDAPS_PORT = 6360;
+
+ private static final int UI_PORT = 17170;
+
+ public LLdapContainer(String image) {
+ this(DockerImageName.parse(image));
+ }
+
+ public LLdapContainer(DockerImageName image) {
+ super(image);
+ image.assertCompatibleWith(DEFAULT_IMAGE_NAME);
+ addExposedPorts(LDAP_PORT, UI_PORT);
+
+ waitingFor(Wait.forHttp("/health").forPort(UI_PORT).forStatusCode(200));
+ }
+
+ @Override
+ protected void containerIsStarted(InspectContainerResponse containerInfo) {
+ log.info("LLDAP container is ready! UI available at http://{}:{}", getHost(), getMappedPort(UI_PORT));
+ }
+
+ public LLdapContainer withBaseDn(String baseDn) {
+ withEnv("LLDAP_LDAP_BASE_DN", baseDn);
+ return this;
+ }
+
+ public LLdapContainer withUserPass(String userPass) {
+ withEnv("LLDAP_LDAP_USER_PASS", userPass);
+ return this;
+ }
+
+ public int getLdapPort() {
+ int port = getEnvMap().getOrDefault("LLDAP_LDAPS_OPTIONS__ENABLED", "false").equals("true")
+ ? LDAPS_PORT
+ : LDAP_PORT;
+ return getMappedPort(port);
+ }
+
+ public String getLdapUrl() {
+ String protocol = getEnvMap().getOrDefault("LLDAP_LDAPS_OPTIONS__ENABLED", "false").equals("true")
+ ? "ldaps"
+ : "ldap";
+ return String.format("%s://%s:%d", protocol, getHost(), getLdapPort());
+ }
+
+ public String getBaseDn() {
+ return getEnvMap().getOrDefault("LLDAP_LDAP_BASE_DN", "dc=example,dc=com");
+ }
+
+ public String getUser() {
+ return String.format("cn=admin,ou=people,%s", getBaseDn());
+ }
+
+ @Deprecated
+ public String getUserPass() {
+ return getEnvMap().getOrDefault("LLDAP_LDAP_USER_PASS", "password");
+ }
+
+ public String getPassword() {
+ return getEnvMap().getOrDefault("LLDAP_LDAP_USER_PASS", "password");
+ }
+}
diff --git a/modules/ldap/src/test/java/org/testcontainers/ldap/LLdapContainerTest.java b/modules/ldap/src/test/java/org/testcontainers/ldap/LLdapContainerTest.java
new file mode 100644
index 00000000000..d8914c793c2
--- /dev/null
+++ b/modules/ldap/src/test/java/org/testcontainers/ldap/LLdapContainerTest.java
@@ -0,0 +1,66 @@
+package org.testcontainers.ldap;
+
+import com.unboundid.ldap.sdk.BindResult;
+import com.unboundid.ldap.sdk.LDAPConnection;
+import com.unboundid.ldap.sdk.LDAPException;
+import com.unboundid.ldap.sdk.LDAPURL;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class LLdapContainerTest {
+
+ @Test
+ public void test() throws LDAPException {
+ try ( // container {
+ LLdapContainer lldap = new LLdapContainer("lldap/lldap:v0.6.1-alpine")
+ // }
+ ) {
+ lldap.start();
+ LDAPConnection connection = new LDAPConnection(lldap.getHost(), lldap.getLdapPort());
+ BindResult result = connection.bind(lldap.getUser(), lldap.getPassword());
+ assertThat(result).isNotNull();
+ }
+ }
+
+ @Test
+ public void testUsingLdapUrl() throws LDAPException {
+ try (LLdapContainer lldap = new LLdapContainer("lldap/lldap:v0.6.1-alpine")) {
+ lldap.start();
+
+ LDAPURL ldapUrl = new LDAPURL(lldap.getLdapUrl());
+ LDAPConnection connection = new LDAPConnection(ldapUrl.getHost(), ldapUrl.getPort());
+ BindResult result = connection.bind(lldap.getUser(), lldap.getPassword());
+ assertThat(result).isNotNull();
+ }
+ }
+
+ @Test
+ public void testWithCustomBaseDn() throws LDAPException {
+ try (
+ LLdapContainer lldap = new LLdapContainer("lldap/lldap:v0.6.1-alpine")
+ .withBaseDn("dc=testcontainers,dc=org")
+ ) {
+ lldap.start();
+
+ assertThat(lldap.getBaseDn()).isEqualTo("dc=testcontainers,dc=org");
+
+ LDAPURL ldapUrl = new LDAPURL(lldap.getLdapUrl());
+ LDAPConnection connection = new LDAPConnection(ldapUrl.getHost(), ldapUrl.getPort());
+ BindResult result = connection.bind(lldap.getUser(), lldap.getPassword());
+ assertThat(result).isNotNull();
+ }
+ }
+
+ @Test
+ public void testWithCustomUserPass() throws LDAPException {
+ try (LLdapContainer lldap = new LLdapContainer("lldap/lldap:v0.6.1-alpine").withUserPass("adminPas$word")) {
+ lldap.start();
+
+ LDAPURL ldapUrl = new LDAPURL(lldap.getLdapUrl());
+ LDAPConnection connection = new LDAPConnection(ldapUrl.getHost(), ldapUrl.getPort());
+ BindResult result = connection.bind(lldap.getUser(), lldap.getPassword());
+ assertThat(result).isNotNull();
+ }
+ }
+}
diff --git a/examples/linked-container/src/test/resources/logback-test.xml b/modules/ldap/src/test/resources/logback-test.xml
similarity index 100%
rename from examples/linked-container/src/test/resources/logback-test.xml
rename to modules/ldap/src/test/resources/logback-test.xml
diff --git a/modules/localstack/build.gradle b/modules/localstack/build.gradle
index 2994bb7d2f2..f104d08e63c 100644
--- a/modules/localstack/build.gradle
+++ b/modules/localstack/build.gradle
@@ -9,6 +9,12 @@ dependencies {
testImplementation 'com.amazonaws:aws-java-sdk-logs'
testImplementation 'com.amazonaws:aws-java-sdk-lambda'
testImplementation 'com.amazonaws:aws-java-sdk-core'
- testImplementation 'software.amazon.awssdk:s3:2.28.6'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'software.amazon.awssdk:s3:2.31.77'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.localstack.LocalStackContainer"
+ ]
}
diff --git a/modules/localstack/src/main/java/org/testcontainers/containers/localstack/LocalStackContainer.java b/modules/localstack/src/main/java/org/testcontainers/containers/localstack/LocalStackContainer.java
index fb7d383898a..c38d5a34728 100644
--- a/modules/localstack/src/main/java/org/testcontainers/containers/localstack/LocalStackContainer.java
+++ b/modules/localstack/src/main/java/org/testcontainers/containers/localstack/LocalStackContainer.java
@@ -83,14 +83,6 @@ public class LocalStackContainer extends GenericContainer {
private final boolean isVersion2;
- /**
- * @deprecated use {@link #LocalStackContainer(DockerImageName)} instead
- */
- @Deprecated
- public LocalStackContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
/**
* @deprecated use {@link #LocalStackContainer(DockerImageName)} instead
*/
diff --git a/modules/mariadb/build.gradle b/modules/mariadb/build.gradle
index 88734091907..416ae4f2707 100644
--- a/modules/mariadb/build.gradle
+++ b/modules/mariadb/build.gradle
@@ -7,8 +7,14 @@ dependencies {
compileOnly 'org.mariadb:r2dbc-mariadb:1.0.3'
testImplementation project(':jdbc-test')
- testImplementation 'org.mariadb.jdbc:mariadb-java-client:3.4.1'
+ testImplementation 'org.mariadb.jdbc:mariadb-java-client:3.5.4'
testImplementation testFixtures(project(':r2dbc'))
testRuntimeOnly 'org.mariadb:r2dbc-mariadb:1.0.3'
}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.MariaDBContainer"
+ ]
+}
diff --git a/modules/mariadb/src/main/java/org/testcontainers/containers/MariaDBContainer.java b/modules/mariadb/src/main/java/org/testcontainers/containers/MariaDBContainer.java
index fccfde6e4dc..bff0faf9d03 100644
--- a/modules/mariadb/src/main/java/org/testcontainers/containers/MariaDBContainer.java
+++ b/modules/mariadb/src/main/java/org/testcontainers/containers/MariaDBContainer.java
@@ -41,14 +41,6 @@ public class MariaDBContainer> extends JdbcD
private static final String MY_CNF_CONFIG_OVERRIDE_PARAM_NAME = "TC_MY_CNF";
- /**
- * @deprecated use {@link #MariaDBContainer(DockerImageName)} instead
- */
- @Deprecated
- public MariaDBContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
public MariaDBContainer(String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
diff --git a/modules/mariadb/src/test/java/org/testcontainers/junit/mariadb/SimpleMariaDBTest.java b/modules/mariadb/src/test/java/org/testcontainers/junit/mariadb/SimpleMariaDBTest.java
index dedfb06cf55..d4bcc529cbc 100644
--- a/modules/mariadb/src/test/java/org/testcontainers/junit/mariadb/SimpleMariaDBTest.java
+++ b/modules/mariadb/src/test/java/org/testcontainers/junit/mariadb/SimpleMariaDBTest.java
@@ -79,7 +79,7 @@ public void testMariaDBWithCommandOverride() throws SQLException {
ResultSet resultSet = performQuery(mariadbCustomConfig, "show variables like 'auto_increment_increment'");
String result = resultSet.getString("Value");
- assertThat(result).as("Auto increment increment should be overriden by command line").isEqualTo("10");
+ assertThat(result).as("Auto increment increment should be overridden by command line").isEqualTo("10");
}
}
diff --git a/modules/milvus/build.gradle b/modules/milvus/build.gradle
index a15d24bbbe5..bb7167babfa 100644
--- a/modules/milvus/build.gradle
+++ b/modules/milvus/build.gradle
@@ -3,6 +3,6 @@ description = "Testcontainers :: Milvus"
dependencies {
api project(':testcontainers')
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'io.milvus:milvus-sdk-java:2.4.4'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'io.milvus:milvus-sdk-java:2.6.0'
}
diff --git a/modules/minio/build.gradle b/modules/minio/build.gradle
index e4ebe5d0b43..8fae8536fb0 100644
--- a/modules/minio/build.gradle
+++ b/modules/minio/build.gradle
@@ -3,6 +3,6 @@ description = "Testcontainers :: MinIO"
dependencies {
api project(':testcontainers')
- testImplementation("io.minio:minio:8.5.12")
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation("io.minio:minio:8.5.17")
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
diff --git a/modules/mockserver/build.gradle b/modules/mockserver/build.gradle
index 31f4944a572..a41957cf57a 100644
--- a/modules/mockserver/build.gradle
+++ b/modules/mockserver/build.gradle
@@ -4,5 +4,11 @@ dependencies {
api project(':testcontainers')
testImplementation 'org.mock-server:mockserver-client-java:5.15.0'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.MockServerContainer"
+ ]
}
diff --git a/modules/mockserver/src/main/java/org/testcontainers/containers/MockServerContainer.java b/modules/mockserver/src/main/java/org/testcontainers/containers/MockServerContainer.java
index 9ca936d1656..00937214445 100644
--- a/modules/mockserver/src/main/java/org/testcontainers/containers/MockServerContainer.java
+++ b/modules/mockserver/src/main/java/org/testcontainers/containers/MockServerContainer.java
@@ -16,14 +16,6 @@ public class MockServerContainer extends GenericContainer {
public static final int PORT = 1080;
- /**
- * @deprecated use {@link #MockServerContainer(DockerImageName)} instead
- */
- @Deprecated
- public MockServerContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
/**
* @deprecated use {@link #MockServerContainer(DockerImageName)} instead
*/
diff --git a/modules/mongodb/build.gradle b/modules/mongodb/build.gradle
index 0904df92824..ad05dbb2e97 100644
--- a/modules/mongodb/build.gradle
+++ b/modules/mongodb/build.gradle
@@ -4,5 +4,11 @@ dependencies {
api project(':testcontainers')
testImplementation("org.mongodb:mongodb-driver-sync:5.1.4")
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.MongoDBContainer"
+ ]
}
diff --git a/modules/mongodb/src/main/java/org/testcontainers/containers/MongoDBContainer.java b/modules/mongodb/src/main/java/org/testcontainers/containers/MongoDBContainer.java
index 1dbf1fcc74e..e0bb04bb166 100644
--- a/modules/mongodb/src/main/java/org/testcontainers/containers/MongoDBContainer.java
+++ b/modules/mongodb/src/main/java/org/testcontainers/containers/MongoDBContainer.java
@@ -30,8 +30,6 @@ public class MongoDBContainer extends GenericContainer {
"mongodb/mongodb-enterprise-server"
);
- private static final String DEFAULT_TAG = "4.0.10";
-
private static final int CONTAINER_EXIT_CODE_OK = 0;
private static final int AWAIT_INIT_REPLICA_SET_ATTEMPTS = 60;
@@ -42,14 +40,6 @@ public class MongoDBContainer extends GenericContainer {
private boolean shardingEnabled;
- /**
- * @deprecated use {@link #MongoDBContainer(DockerImageName)} instead
- */
- @Deprecated
- public MongoDBContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
public MongoDBContainer(@NonNull final String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
diff --git a/modules/mongodb/src/main/java/org/testcontainers/mongodb/MongoDBAtlasLocalContainer.java b/modules/mongodb/src/main/java/org/testcontainers/mongodb/MongoDBAtlasLocalContainer.java
index 52067c01ff2..79e44d3ffb0 100644
--- a/modules/mongodb/src/main/java/org/testcontainers/mongodb/MongoDBAtlasLocalContainer.java
+++ b/modules/mongodb/src/main/java/org/testcontainers/mongodb/MongoDBAtlasLocalContainer.java
@@ -17,6 +17,10 @@ public class MongoDBAtlasLocalContainer extends GenericContainerdatabaseName.
+ *
+ * @param databaseName a database name.
+ * @return a database specific connection string.
+ */
+ public String getDatabaseConnectionString(final String databaseName) {
+ if (!isRunning()) {
+ throw new IllegalStateException("MongoDBContainer should be started first");
+ }
+ return baseConnectionString() + "/" + databaseName + "?" + DIRECT_CONNECTION;
}
}
diff --git a/modules/mongodb/src/test/java/org/testcontainers/mongodb/MongoDBAtlasLocalContainerTest.java b/modules/mongodb/src/test/java/org/testcontainers/mongodb/MongoDBAtlasLocalContainerTest.java
index 2d7664d51fa..720629d214c 100644
--- a/modules/mongodb/src/test/java/org/testcontainers/mongodb/MongoDBAtlasLocalContainerTest.java
+++ b/modules/mongodb/src/test/java/org/testcontainers/mongodb/MongoDBAtlasLocalContainerTest.java
@@ -36,6 +36,26 @@ public void getConnectionString() {
}
}
+ @Test
+ public void getDatabaseConnectionString() {
+ try (
+ MongoDBAtlasLocalContainer container = new MongoDBAtlasLocalContainer("mongodb/mongodb-atlas-local:7.0.9")
+ ) {
+ container.start();
+ String databaseConnectionString = container.getDatabaseConnectionString();
+ assertThat(databaseConnectionString).isNotNull();
+ assertThat(databaseConnectionString).startsWith("mongodb://");
+ assertThat(databaseConnectionString)
+ .isEqualTo(
+ String.format(
+ "mongodb://%s:%d/test?directConnection=true",
+ container.getHost(),
+ container.getFirstMappedPort()
+ )
+ );
+ }
+ }
+
@Test
public void createAtlasIndexAndSearchIt() throws Exception {
try (
diff --git a/modules/mssqlserver/build.gradle b/modules/mssqlserver/build.gradle
index e7541434291..899bbcc88db 100644
--- a/modules/mssqlserver/build.gradle
+++ b/modules/mssqlserver/build.gradle
@@ -7,7 +7,7 @@ dependencies {
compileOnly 'io.r2dbc:r2dbc-mssql:1.0.2.RELEASE'
testImplementation project(':jdbc-test')
- testImplementation 'com.microsoft.sqlserver:mssql-jdbc:12.8.1.jre8'
+ testImplementation 'com.microsoft.sqlserver:mssql-jdbc:13.1.0.jre8-preview'
testImplementation project(':r2dbc')
testRuntimeOnly 'io.r2dbc:r2dbc-mssql:1.0.2.RELEASE'
@@ -15,3 +15,9 @@ dependencies {
// MSSQL's wait strategy requires the JDBC driver
testImplementation testFixtures(project(':r2dbc'))
}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.MSSQLServerContainer"
+ ]
+}
diff --git a/modules/mssqlserver/src/main/java/org/testcontainers/containers/MSSQLServerContainer.java b/modules/mssqlserver/src/main/java/org/testcontainers/containers/MSSQLServerContainer.java
index 6e902656bb2..10ccbff2c36 100644
--- a/modules/mssqlserver/src/main/java/org/testcontainers/containers/MSSQLServerContainer.java
+++ b/modules/mssqlserver/src/main/java/org/testcontainers/containers/MSSQLServerContainer.java
@@ -44,14 +44,6 @@ public class MSSQLServerContainer> exten
Pattern.compile("[^a-zA-Z0-9]+", Pattern.CASE_INSENSITIVE),
};
- /**
- * @deprecated use {@link #MSSQLServerContainer(DockerImageName)} instead
- */
- @Deprecated
- public MSSQLServerContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
public MSSQLServerContainer(final String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
@@ -72,7 +64,7 @@ public Set getLivenessCheckPortNumbers() {
@Override
protected void configure() {
- // If license was not accepted programatically, check if it was accepted via resource file
+ // If license was not accepted programmatically, check if it was accepted via resource file
if (!getEnvMap().containsKey("ACCEPT_EULA")) {
LicenseAcceptance.assertLicenseAccepted(this.getDockerImageName());
acceptLicense();
diff --git a/modules/mysql/build.gradle b/modules/mysql/build.gradle
index d31fbcce266..3055cd141fc 100644
--- a/modules/mysql/build.gradle
+++ b/modules/mysql/build.gradle
@@ -4,13 +4,19 @@ dependencies {
api project(':jdbc')
compileOnly project(':r2dbc')
- compileOnly 'io.asyncer:r2dbc-mysql:1.3.0'
+ compileOnly 'io.asyncer:r2dbc-mysql:1.4.1'
testImplementation project(':jdbc-test')
testRuntimeOnly 'mysql:mysql-connector-java:8.0.33'
testImplementation testFixtures(project(':r2dbc'))
- testRuntimeOnly 'io.asyncer:r2dbc-mysql:1.3.0'
+ testRuntimeOnly 'io.asyncer:r2dbc-mysql:1.4.1'
- compileOnly 'org.jetbrains:annotations:24.1.0'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.MySQLContainer"
+ ]
}
diff --git a/modules/mysql/src/main/java/org/testcontainers/containers/MySQLContainer.java b/modules/mysql/src/main/java/org/testcontainers/containers/MySQLContainer.java
index 76ff754b616..54f3952b6a2 100644
--- a/modules/mysql/src/main/java/org/testcontainers/containers/MySQLContainer.java
+++ b/modules/mysql/src/main/java/org/testcontainers/containers/MySQLContainer.java
@@ -41,14 +41,6 @@ public class MySQLContainer> extends JdbcDatab
private static final String MYSQL_ROOT_USER = "root";
- /**
- * @deprecated use {@link #MySQLContainer(DockerImageName)} instead
- */
- @Deprecated
- public MySQLContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
public MySQLContainer(String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
diff --git a/modules/mysql/src/test/java/org/testcontainers/junit/mysql/SimpleMySQLTest.java b/modules/mysql/src/test/java/org/testcontainers/junit/mysql/SimpleMySQLTest.java
index 3db8da76673..5de23504584 100644
--- a/modules/mysql/src/test/java/org/testcontainers/junit/mysql/SimpleMySQLTest.java
+++ b/modules/mysql/src/test/java/org/testcontainers/junit/mysql/SimpleMySQLTest.java
@@ -91,7 +91,7 @@ public void testCommandOverride() throws SQLException {
ResultSet resultSet = performQuery(mysqlCustomConfig, "show variables like 'auto_increment_increment'");
String result = resultSet.getString("Value");
- assertThat(result).as("Auto increment increment should be overriden by command line").isEqualTo("42");
+ assertThat(result).as("Auto increment increment should be overridden by command line").isEqualTo("42");
}
}
diff --git a/modules/neo4j/build.gradle b/modules/neo4j/build.gradle
index d70292361d1..9c7a70454ff 100644
--- a/modules/neo4j/build.gradle
+++ b/modules/neo4j/build.gradle
@@ -33,6 +33,13 @@ dependencies {
api project(":testcontainers")
- testImplementation 'org.neo4j.driver:neo4j-java-driver:4.4.18'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.neo4j.driver:neo4j-java-driver:4.4.20'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.Neo4jContainer",
+ "org.testcontainers.containers.Neo4jLabsPlugin"
+ ]
}
diff --git a/modules/neo4j/src/main/java/org/testcontainers/containers/Neo4jContainer.java b/modules/neo4j/src/main/java/org/testcontainers/containers/Neo4jContainer.java
index e59e2ea2820..bd3499567d4 100644
--- a/modules/neo4j/src/main/java/org/testcontainers/containers/Neo4jContainer.java
+++ b/modules/neo4j/src/main/java/org/testcontainers/containers/Neo4jContainer.java
@@ -31,7 +31,7 @@
* - HTTPS: 7473
*
*/
-public class Neo4jContainer> extends GenericContainer {
+public class Neo4jContainer extends GenericContainer {
/**
* The image defaults to the official Neo4j image: Neo4j.
@@ -83,15 +83,6 @@ public class Neo4jContainer> extends GenericContaine
.forPort(DEFAULT_HTTP_PORT)
.forStatusCodeMatching(response -> response == HttpURLConnection.HTTP_OK);
- /**
- * Creates a Neo4jContainer using the official Neo4j docker image.
- * @deprecated use {@link #Neo4jContainer(DockerImageName)} instead
- */
- @Deprecated
- public Neo4jContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
/**
* Creates a Neo4jContainer using a specific docker image.
*
@@ -214,7 +205,7 @@ public String getHttpsUrl() {
*
* @return This container.
*/
- public S withEnterpriseEdition() {
+ public Neo4jContainer withEnterpriseEdition() {
if (!standardImage) {
throw new IllegalStateException(
String.format("Cannot use enterprise version with alternative image %s.", getDockerImageName())
@@ -236,7 +227,7 @@ public S withEnterpriseEdition() {
* @param adminPassword The admin password for the default database account.
* @return This container.
*/
- public S withAdminPassword(final String adminPassword) {
+ public Neo4jContainer withAdminPassword(final String adminPassword) {
if (adminPassword != null && adminPassword.length() < 8) {
logger().warn("Your provided admin password is too short and will not work with Neo4j 5.3+.");
}
@@ -249,7 +240,7 @@ public S withAdminPassword(final String adminPassword) {
*
* @return This container.
*/
- public S withoutAuthentication() {
+ public Neo4jContainer withoutAuthentication() {
return withAdminPassword(null);
}
@@ -273,7 +264,7 @@ public S withoutAuthentication() {
* @throws IllegalArgumentException If the database version is not 3.5.
* @return This container.
*/
- public S withDatabase(MountableFile graphDb) {
+ public Neo4jContainer withDatabase(MountableFile graphDb) {
if (!isNeo4jDatabaseVersionSupportingDbCopy()) {
throw new IllegalArgumentException(
"Copying database folder is not supported for Neo4j instances with version 4.0 or higher."
@@ -292,7 +283,7 @@ public S withDatabase(MountableFile graphDb) {
* @param plugins
* @return This container.
*/
- public S withPlugins(MountableFile plugins) {
+ public Neo4jContainer withPlugins(MountableFile plugins) {
return withCopyFileToContainer(plugins, "/var/lib/neo4j/plugins/");
}
@@ -304,7 +295,7 @@ public S withPlugins(MountableFile plugins) {
* @param value The value to set
* @return This container.
*/
- public S withNeo4jConfig(String key, String value) {
+ public Neo4jContainer withNeo4jConfig(String key, String value) {
addEnv(formatConfigurationKey(key), value);
return self();
}
@@ -316,31 +307,6 @@ public String getAdminPassword() {
return adminPassword;
}
- /**
- * Registers one or more {@link Neo4jLabsPlugin} for download and server startup.
- *
- * @param neo4jLabsPlugins The Neo4j plugins that should get started with the server.
- * @return This container.
- * @deprecated {@link Neo4jLabsPlugin} were deprecated due to naming changes that cannot be solved by this enumeration.
- * Please use the {@link Neo4jContainer#withPlugins(String...)} method.
- */
- public S withLabsPlugins(Neo4jLabsPlugin... neo4jLabsPlugins) {
- List pluginNames = Arrays
- .stream(neo4jLabsPlugins)
- .map(plugin -> plugin.pluginName)
- .collect(Collectors.toList());
-
- this.labsPlugins.addAll(pluginNames);
- return self();
- }
-
- /**
- * @deprecated Please use {@link Neo4jContainer#withPlugins(String...)} for named plugins.
- */
- public S withLabsPlugins(String... neo4jLabsPlugins) {
- return this.withPlugins(neo4jLabsPlugins);
- }
-
/**
* Registers one or more Neo4j plugins for server startup.
* The plugins are listed here
@@ -352,7 +318,7 @@ public S withLabsPlugins(String... neo4jLabsPlugins) {
* @param plugins The Neo4j plugins that should get started with the server.
* @return This container.
*/
- public S withPlugins(String... plugins) {
+ public Neo4jContainer withPlugins(String... plugins) {
this.labsPlugins.addAll(Arrays.asList(plugins));
return self();
}
@@ -385,7 +351,7 @@ private boolean isNeo4jDatabaseVersionSupportingDbCopy() {
return false;
}
- public S withRandomPassword() {
+ public Neo4jContainer withRandomPassword() {
return withAdminPassword(UUID.randomUUID().toString());
}
}
diff --git a/modules/neo4j/src/main/java/org/testcontainers/containers/Neo4jLabsPlugin.java b/modules/neo4j/src/main/java/org/testcontainers/containers/Neo4jLabsPlugin.java
deleted file mode 100644
index 0e3bf634140..00000000000
--- a/modules/neo4j/src/main/java/org/testcontainers/containers/Neo4jLabsPlugin.java
+++ /dev/null
@@ -1,29 +0,0 @@
-package org.testcontainers.containers;
-
-/**
- * Reflects a plugin from the official Neo4j 4.4.
- * Neo4j Labs Plugin list.
- * There might be plugins not supported by your selected version of Neo4j.
- *
- * @deprecated Please use {@link Neo4jContainer#withLabsPlugins(String...)} with the matching plugin name for your Neo4j version.
- * Due to some renaming of the (Docker image) plugin names, there is no naming consistency across versions.
- * The plugins are listed here
- *
- */
-public enum Neo4jLabsPlugin {
- APOC("apoc"),
- APOC_CORE("apoc-core"),
- BLOOM("bloom"),
- STREAMS("streams"),
- GRAPH_DATA_SCIENCE("graph-data-science"),
- NEO_SEMANTICS("n10s");
-
- final String pluginName;
-
- Neo4jLabsPlugin(String pluginName) {
- this.pluginName = pluginName;
- }
-}
diff --git a/modules/neo4j/src/test/java/org/testcontainers/containers/Neo4jContainerJUnitIntegrationTest.java b/modules/neo4j/src/test/java/org/testcontainers/containers/Neo4jContainerJUnitIntegrationTest.java
index b5f9c2ad867..868d7e29f2d 100644
--- a/modules/neo4j/src/test/java/org/testcontainers/containers/Neo4jContainerJUnitIntegrationTest.java
+++ b/modules/neo4j/src/test/java/org/testcontainers/containers/Neo4jContainerJUnitIntegrationTest.java
@@ -18,7 +18,7 @@
public class Neo4jContainerJUnitIntegrationTest {
@ClassRule
- public static Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4");
+ public static Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4");
@Test
public void shouldStart() {
diff --git a/modules/neo4j/src/test/java/org/testcontainers/containers/Neo4jContainerTest.java b/modules/neo4j/src/test/java/org/testcontainers/containers/Neo4jContainerTest.java
index d2497cd900b..9827eb09900 100644
--- a/modules/neo4j/src/test/java/org/testcontainers/containers/Neo4jContainerTest.java
+++ b/modules/neo4j/src/test/java/org/testcontainers/containers/Neo4jContainerTest.java
@@ -39,7 +39,7 @@ public void shouldDisableAuthentication() {
try (
// spotless:off
// withoutAuthentication {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4")
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4")
.withoutAuthentication()
// }
// spotless:on
@@ -58,7 +58,7 @@ public void shouldCopyDatabase() {
assumeThat(DockerClientFactory.instance().getInfo().getArchitecture()).isNotEqualTo("aarch64");
try (
// copyDatabase {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:3.5.30")
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:3.5.30")
.withDatabase(MountableFile.forClasspathResource("/test-graph.db"))
// }
) {
@@ -74,7 +74,9 @@ public void shouldCopyDatabase() {
@Test
public void shouldFailOnCopyDatabaseForDefaultNeo4j4Image() {
assertThatIllegalArgumentException()
- .isThrownBy(() -> new Neo4jContainer<>().withDatabase(MountableFile.forClasspathResource("/test-graph.db")))
+ .isThrownBy(() -> {
+ new Neo4jContainer("neo4j:4.4.1").withDatabase(MountableFile.forClasspathResource("/test-graph.db"));
+ })
.withMessage("Copying database folder is not supported for Neo4j instances with version 4.0 or higher.");
}
@@ -82,7 +84,7 @@ public void shouldFailOnCopyDatabaseForDefaultNeo4j4Image() {
public void shouldFailOnCopyDatabaseForCustomNeo4j4Image() {
assertThatIllegalArgumentException()
.isThrownBy(() -> {
- new Neo4jContainer<>("neo4j:4.4.1").withDatabase(MountableFile.forClasspathResource("/test-graph.db"));
+ new Neo4jContainer("neo4j:4.4.1").withDatabase(MountableFile.forClasspathResource("/test-graph.db"));
})
.withMessage("Copying database folder is not supported for Neo4j instances with version 4.0 or higher.");
}
@@ -91,7 +93,7 @@ public void shouldFailOnCopyDatabaseForCustomNeo4j4Image() {
public void shouldFailOnCopyDatabaseForCustomNonSemverNeo4j4Image() {
assertThatIllegalArgumentException()
.isThrownBy(() -> {
- new Neo4jContainer<>("neo4j:latest").withDatabase(MountableFile.forClasspathResource("/test-graph.db"));
+ new Neo4jContainer("neo4j:latest").withDatabase(MountableFile.forClasspathResource("/test-graph.db"));
})
.withMessage("Copying database folder is not supported for Neo4j instances with version 4.0 or higher.");
}
@@ -100,7 +102,7 @@ public void shouldFailOnCopyDatabaseForCustomNonSemverNeo4j4Image() {
public void shouldCopyPlugins() {
try (
// registerPluginsPath {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4")
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4")
.withPlugins(MountableFile.forClasspathResource("/custom-plugins"))
// }
) {
@@ -115,7 +117,7 @@ public void shouldCopyPlugins() {
public void shouldCopyPlugin() {
try (
// registerPluginsJar {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4")
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4")
.withPlugins(MountableFile.forClasspathResource("/custom-plugins/hello-world.jar"))
// }
) {
@@ -140,7 +142,7 @@ public void shouldCheckEnterpriseLicense() {
String expectedImageName = "neo4j:4.4-enterprise";
assertThatExceptionOfType(IllegalStateException.class)
- .isThrownBy(() -> new Neo4jContainer<>("neo4j:4.4").withEnterpriseEdition())
+ .isThrownBy(() -> new Neo4jContainer("neo4j:4.4").withEnterpriseEdition())
.withMessageContaining("The image " + expectedImageName + " requires you to accept a license agreement.");
}
@@ -150,7 +152,7 @@ public void shouldRunEnterprise() {
try (
// enterpriseEdition {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4")
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4")
.withEnterpriseEdition()
// }
.withAdminPassword("Picard123")
@@ -170,7 +172,7 @@ public void shouldRunEnterprise() {
@Test
public void shouldAddConfigToEnvironment() {
// neo4jConfiguration {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4")
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4")
.withNeo4jConfig("dbms.security.procedures.unrestricted", "apoc.*,algo.*")
.withNeo4jConfig("dbms.tx_log.rotation.size", "42M");
// }
@@ -182,7 +184,7 @@ public void shouldAddConfigToEnvironment() {
@Test
public void shouldRespectEnvironmentAuth() {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4").withEnv("NEO4J_AUTH", "neo4j/secret");
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4").withEnv("NEO4J_AUTH", "neo4j/secret");
neo4jContainer.configure();
@@ -192,7 +194,7 @@ public void shouldRespectEnvironmentAuth() {
@Test
public void shouldSetCustomPasswordCorrectly() {
// withAdminPassword {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4").withAdminPassword("verySecret");
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4").withAdminPassword("verySecret");
// }
neo4jContainer.configure();
@@ -202,7 +204,7 @@ public void shouldSetCustomPasswordCorrectly() {
@Test
public void containerAdminPasswordOverrulesEnvironmentAuth() {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4")
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4")
.withEnv("NEO4J_AUTH", "neo4j/secret")
.withAdminPassword("anotherSecret");
@@ -213,7 +215,7 @@ public void containerAdminPasswordOverrulesEnvironmentAuth() {
@Test
public void containerWithoutAuthenticationOverrulesEnvironmentAuth() {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4")
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4")
.withEnv("NEO4J_AUTH", "neo4j/secret")
.withoutAuthentication();
@@ -224,7 +226,7 @@ public void containerWithoutAuthenticationOverrulesEnvironmentAuth() {
@Test
public void shouldRespectAlreadyDefinedPortMappingsBolt() {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4").withExposedPorts(7687);
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4").withExposedPorts(7687);
neo4jContainer.configure();
@@ -233,7 +235,7 @@ public void shouldRespectAlreadyDefinedPortMappingsBolt() {
@Test
public void shouldRespectAlreadyDefinedPortMappingsHttp() {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4").withExposedPorts(7474);
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4").withExposedPorts(7474);
neo4jContainer.configure();
@@ -242,7 +244,7 @@ public void shouldRespectAlreadyDefinedPortMappingsHttp() {
@Test
public void shouldRespectAlreadyDefinedPortMappingsWithoutHttps() {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4").withExposedPorts(7687, 7474);
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4").withExposedPorts(7687, 7474);
neo4jContainer.configure();
@@ -251,7 +253,7 @@ public void shouldRespectAlreadyDefinedPortMappingsWithoutHttps() {
@Test
public void shouldDefaultExportBoltHttpAndHttps() {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4");
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4");
neo4jContainer.configure();
@@ -260,67 +262,16 @@ public void shouldDefaultExportBoltHttpAndHttps() {
@Test
public void shouldRespectCustomWaitStrategy() {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4").waitingFor(new CustomDummyWaitStrategy());
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4").waitingFor(new CustomDummyWaitStrategy());
neo4jContainer.configure();
assertThat(neo4jContainer.getWaitStrategy()).isInstanceOf(CustomDummyWaitStrategy.class);
}
- // Test for deprecated functionality to be still alive, if `Neo4jLabsPlugin` gets removed, remove this test.
- @Test
- public void shouldConfigureSingleLabsPluginByType() {
- try (
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4").withLabsPlugins(Neo4jLabsPlugin.APOC)
- ) {
- // needs to get called explicitly for setup
- neo4jContainer.configure();
-
- assertThat(neo4jContainer.getEnvMap()).containsEntry("NEO4JLABS_PLUGINS", "[\"apoc\"]");
- }
- }
-
- // Test for deprecated functionality to be still alive, if `Neo4jLabsPlugin` gets removed, remove this test.
- @Test
- public void shouldConfigureMultipleLabsPluginsByType() {
- try (
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4")
- .withLabsPlugins(Neo4jLabsPlugin.APOC, Neo4jLabsPlugin.BLOOM);
- ) {
- // needs to get called explicitly for setup
- neo4jContainer.configure();
-
- assertThat(neo4jContainer.getEnvMap().get("NEO4JLABS_PLUGINS"))
- .containsAnyOf("[\"apoc\",\"bloom\"]", "[\"bloom\",\"apoc\"]");
- }
- }
-
- // Test for deprecated functionality to be still alive, if `Neo4jContainer#withLabsPlugins` gets removed, remove this test.
- @Test
- public void shouldConfigureSingleLabsPlugin() {
- try (Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4").withLabsPlugins("apoc")) {
- // needs to get called explicitly for setup
- neo4jContainer.configure();
-
- assertThat(neo4jContainer.getEnvMap()).containsEntry("NEO4JLABS_PLUGINS", "[\"apoc\"]");
- }
- }
-
- // Test for deprecated functionality to be still alive, if `Neo4jContainer#withLabsPlugins` gets removed, remove this test.
- @Test
- public void shouldConfigureMultipleLabsPlugins() {
- try (Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4").withLabsPlugins("apoc", "bloom");) {
- // needs to get called explicitly for setup
- neo4jContainer.configure();
-
- assertThat(neo4jContainer.getEnvMap().get("NEO4JLABS_PLUGINS"))
- .containsAnyOf("[\"apoc\",\"bloom\"]", "[\"bloom\",\"apoc\"]");
- }
- }
-
@Test
public void shouldConfigureSinglePluginByName() {
- try (Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4").withPlugins("apoc")) {
+ try (Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4").withPlugins("apoc")) {
// needs to get called explicitly for setup
neo4jContainer.configure();
@@ -332,7 +283,7 @@ public void shouldConfigureSinglePluginByName() {
public void shouldConfigureMultiplePluginsByName() {
try (
// configureLabsPlugins {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4") //
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4") //
.withPlugins("apoc", "bloom");
// }
) {
@@ -348,7 +299,7 @@ public void shouldConfigureMultiplePluginsByName() {
public void shouldCreateRandomUuidBasedPasswords() {
try (
// withRandomPassword {
- Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4").withRandomPassword();
+ Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4").withRandomPassword();
// }
) {
// It will throw an exception if it's not UUID parsable.
@@ -361,7 +312,7 @@ public void shouldCreateRandomUuidBasedPasswords() {
@Test
public void shouldWarnOnPasswordTooShort() {
- try (Neo4jContainer> neo4jContainer = new Neo4jContainer<>("neo4j:4.4");) {
+ try (Neo4jContainer neo4jContainer = new Neo4jContainer("neo4j:4.4");) {
Logger logger = (Logger) DockerLoggerFactory.getLogger("neo4j:4.4");
TestLogAppender testLogAppender = new TestLogAppender();
logger.addAppender(testLogAppender);
@@ -401,7 +352,7 @@ protected void append(ILoggingEvent eventObject) {
}
}
- private static Driver getDriver(Neo4jContainer> container) {
+ private static Driver getDriver(Neo4jContainer container) {
AuthToken authToken = AuthTokens.none();
if (container.getAdminPassword() != null) {
authToken = AuthTokens.basic("neo4j", container.getAdminPassword());
diff --git a/modules/nginx/build.gradle b/modules/nginx/build.gradle
index 13232b4bfce..c05010c45e9 100644
--- a/modules/nginx/build.gradle
+++ b/modules/nginx/build.gradle
@@ -2,6 +2,12 @@ description = "Testcontainers :: Nginx"
dependencies {
api project(':testcontainers')
- compileOnly 'org.jetbrains:annotations:24.1.0'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.NginxContainer"
+ ]
}
diff --git a/modules/nginx/src/main/java/org/testcontainers/containers/NginxContainer.java b/modules/nginx/src/main/java/org/testcontainers/containers/NginxContainer.java
index 9a8687aaf8c..7f6b5b0f74c 100644
--- a/modules/nginx/src/main/java/org/testcontainers/containers/NginxContainer.java
+++ b/modules/nginx/src/main/java/org/testcontainers/containers/NginxContainer.java
@@ -1,31 +1,18 @@
package org.testcontainers.containers;
import org.jetbrains.annotations.NotNull;
-import org.testcontainers.containers.traits.LinkableContainer;
import org.testcontainers.utility.DockerImageName;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Set;
-public class NginxContainer>
- extends GenericContainer
- implements LinkableContainer {
+public class NginxContainer extends GenericContainer {
private static final int NGINX_DEFAULT_PORT = 80;
private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("nginx");
- private static final String DEFAULT_TAG = "1.9.4";
-
- /**
- * @deprecated use {@link #NginxContainer(DockerImageName)} instead
- */
- @Deprecated
- public NginxContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
public NginxContainer(String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
@@ -59,7 +46,7 @@ public void setCustomContent(String htmlContentPath) {
}
@Deprecated
- public SELF withCustomContent(String htmlContentPath) {
+ public NginxContainer withCustomContent(String htmlContentPath) {
this.setCustomContent(htmlContentPath);
return self();
}
diff --git a/modules/nginx/src/test/java/org/testcontainers/junit/SimpleNginxTest.java b/modules/nginx/src/test/java/org/testcontainers/junit/SimpleNginxTest.java
index d520419b902..05a5f5767f1 100644
--- a/modules/nginx/src/test/java/org/testcontainers/junit/SimpleNginxTest.java
+++ b/modules/nginx/src/test/java/org/testcontainers/junit/SimpleNginxTest.java
@@ -28,7 +28,7 @@ public class SimpleNginxTest {
// creatingContainer {
@Rule
- public NginxContainer> nginx = new NginxContainer<>(NGINX_IMAGE)
+ public NginxContainer nginx = new NginxContainer(NGINX_IMAGE)
.withCopyFileToContainer(MountableFile.forHostPath(tmpDirectory), "/usr/share/nginx/html")
.waitingFor(new HttpWaitStrategy());
@@ -64,7 +64,7 @@ public void testSimple() throws Exception {
assertHasCorrectExposedAndLivenessCheckPorts(nginx);
}
- private void assertHasCorrectExposedAndLivenessCheckPorts(NginxContainer> nginxContainer) throws Exception {
+ private void assertHasCorrectExposedAndLivenessCheckPorts(NginxContainer nginxContainer) {
assertThat(nginxContainer.getExposedPorts()).containsExactly(80);
assertThat(nginxContainer.getLivenessCheckPortNumbers()).containsExactly(nginxContainer.getMappedPort(80));
}
diff --git a/modules/ollama/build.gradle b/modules/ollama/build.gradle
index b6054a1d3c2..eda3bf5bb7a 100644
--- a/modules/ollama/build.gradle
+++ b/modules/ollama/build.gradle
@@ -3,6 +3,6 @@ description = "Testcontainers :: Ollama"
dependencies {
api project(':testcontainers')
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'io.rest-assured:rest-assured:5.5.0'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'io.rest-assured:rest-assured:5.5.5'
}
diff --git a/modules/openfga/build.gradle b/modules/openfga/build.gradle
index 6cbbaa91e68..45f4cf1244c 100644
--- a/modules/openfga/build.gradle
+++ b/modules/openfga/build.gradle
@@ -3,8 +3,8 @@ description = "Testcontainers :: OpenFGA"
dependencies {
api project(':testcontainers')
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'dev.openfga:openfga-sdk:0.7.0'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'dev.openfga:openfga-sdk:0.8.2'
}
test {
diff --git a/modules/oracle-free/build.gradle b/modules/oracle-free/build.gradle
index ac81dc66f6e..424f6c48616 100644
--- a/modules/oracle-free/build.gradle
+++ b/modules/oracle-free/build.gradle
@@ -4,13 +4,13 @@ dependencies {
api project(':jdbc')
compileOnly project(':r2dbc')
- compileOnly 'com.oracle.database.r2dbc:oracle-r2dbc:1.2.0'
+ compileOnly 'com.oracle.database.r2dbc:oracle-r2dbc:1.3.0'
testImplementation project(':jdbc-test')
- testImplementation 'com.oracle.database.jdbc:ojdbc11:23.5.0.24.07'
+ testImplementation 'com.oracle.database.jdbc:ojdbc11:23.8.0.25.04'
- compileOnly 'org.jetbrains:annotations:24.1.0'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
testImplementation testFixtures(project(':r2dbc'))
- testRuntimeOnly 'com.oracle.database.r2dbc:oracle-r2dbc:1.2.0'
+ testRuntimeOnly 'com.oracle.database.r2dbc:oracle-r2dbc:1.3.0'
}
diff --git a/modules/oracle-xe/build.gradle b/modules/oracle-xe/build.gradle
index 3de6202e869..ff04ca864d1 100644
--- a/modules/oracle-xe/build.gradle
+++ b/modules/oracle-xe/build.gradle
@@ -4,13 +4,19 @@ dependencies {
api project(':jdbc')
compileOnly project(':r2dbc')
- compileOnly 'com.oracle.database.r2dbc:oracle-r2dbc:1.2.0'
+ compileOnly 'com.oracle.database.r2dbc:oracle-r2dbc:1.3.0'
testImplementation project(':jdbc-test')
- testImplementation 'com.oracle.database.jdbc:ojdbc11:23.5.0.24.07'
+ testImplementation 'com.oracle.database.jdbc:ojdbc11:23.8.0.25.04'
- compileOnly 'org.jetbrains:annotations:24.1.0'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
testImplementation testFixtures(project(':r2dbc'))
- testRuntimeOnly 'com.oracle.database.r2dbc:oracle-r2dbc:1.2.0'
+ testRuntimeOnly 'com.oracle.database.r2dbc:oracle-r2dbc:1.3.0'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.OracleContainer"
+ ]
}
diff --git a/modules/oracle-xe/src/main/java/org/testcontainers/containers/OracleContainer.java b/modules/oracle-xe/src/main/java/org/testcontainers/containers/OracleContainer.java
index 75b16779f40..07db62a7b2e 100644
--- a/modules/oracle-xe/src/main/java/org/testcontainers/containers/OracleContainer.java
+++ b/modules/oracle-xe/src/main/java/org/testcontainers/containers/OracleContainer.java
@@ -63,14 +63,6 @@ public class OracleContainer extends JdbcDatabaseContainer {
private boolean usingSid = false;
- /**
- * @deprecated use {@link #OracleContainer(DockerImageName)} instead
- */
- @Deprecated
- public OracleContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
public OracleContainer(String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
diff --git a/modules/orientdb/build.gradle b/modules/orientdb/build.gradle
index d353be80783..587ee684b68 100644
--- a/modules/orientdb/build.gradle
+++ b/modules/orientdb/build.gradle
@@ -3,9 +3,15 @@ description = "Testcontainers :: Orientdb"
dependencies {
api project(":testcontainers")
- api "com.orientechnologies:orientdb-client:3.2.33"
+ api "com.orientechnologies:orientdb-client:3.2.42"
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'org.apache.tinkerpop:gremlin-driver:3.7.2'
- testImplementation "com.orientechnologies:orientdb-gremlin:3.2.33"
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'org.apache.tinkerpop:gremlin-driver:3.7.3'
+ testImplementation "com.orientechnologies:orientdb-gremlin:3.2.42"
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.OrientDBContainer"
+ ]
}
diff --git a/modules/orientdb/src/main/java/org/testcontainers/containers/OrientDBContainer.java b/modules/orientdb/src/main/java/org/testcontainers/containers/OrientDBContainer.java
index aeaf053ff0a..67c81be7e90 100644
--- a/modules/orientdb/src/main/java/org/testcontainers/containers/OrientDBContainer.java
+++ b/modules/orientdb/src/main/java/org/testcontainers/containers/OrientDBContainer.java
@@ -36,8 +36,6 @@ public class OrientDBContainer extends GenericContainer {
private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("orientdb");
- private static final String DEFAULT_TAG = "3.0.24-tp3";
-
private static final String DEFAULT_USERNAME = "admin";
private static final String DEFAULT_PASSWORD = "admin";
@@ -60,14 +58,6 @@ public class OrientDBContainer extends GenericContainer {
private ODatabaseSession session;
- /**
- * @deprecated use {@link #OrientDBContainer(DockerImageName)} instead
- */
- @Deprecated
- public OrientDBContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
public OrientDBContainer(@NonNull String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
diff --git a/modules/pinecone/build.gradle b/modules/pinecone/build.gradle
new file mode 100644
index 00000000000..d293456d91a
--- /dev/null
+++ b/modules/pinecone/build.gradle
@@ -0,0 +1,8 @@
+description = "Testcontainers :: ActiveMQ"
+
+dependencies {
+ api project(':testcontainers')
+
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'io.pinecone:pinecone-client:3.1.0'
+}
diff --git a/modules/pinecone/src/main/java/org/testcontainers/pinecone/PineconeLocalContainer.java b/modules/pinecone/src/main/java/org/testcontainers/pinecone/PineconeLocalContainer.java
new file mode 100644
index 00000000000..56c846872de
--- /dev/null
+++ b/modules/pinecone/src/main/java/org/testcontainers/pinecone/PineconeLocalContainer.java
@@ -0,0 +1,34 @@
+package org.testcontainers.pinecone;
+
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.utility.DockerImageName;
+
+/**
+ * Testcontainers implementation for Pinecone.
+ *
+ * Exposed port: 5080
+ */
+public class PineconeLocalContainer extends GenericContainer {
+
+ private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse(
+ "ghcr.io/pinecone-io/pinecone-local"
+ );
+
+ private static final int PORT = 5080;
+
+ public PineconeLocalContainer(String dockerImageName) {
+ this(DockerImageName.parse(dockerImageName));
+ }
+
+ public PineconeLocalContainer(DockerImageName dockerImageName) {
+ super(dockerImageName);
+ dockerImageName.assertCompatibleWith(DEFAULT_IMAGE_NAME);
+
+ withEnv("PORT", String.valueOf(5080));
+ withExposedPorts(5080);
+ }
+
+ public String getEndpoint() {
+ return "http://" + getHost() + ":" + getMappedPort(PORT);
+ }
+}
diff --git a/modules/pinecone/src/test/java/org/testcontainers/pinecone/PineconeLocalContainerTest.java b/modules/pinecone/src/test/java/org/testcontainers/pinecone/PineconeLocalContainerTest.java
new file mode 100644
index 00000000000..5843dfef81c
--- /dev/null
+++ b/modules/pinecone/src/test/java/org/testcontainers/pinecone/PineconeLocalContainerTest.java
@@ -0,0 +1,33 @@
+package org.testcontainers.pinecone;
+
+import io.pinecone.clients.Pinecone;
+import org.junit.Test;
+import org.openapitools.db_control.client.model.DeletionProtection;
+import org.openapitools.db_control.client.model.IndexModel;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class PineconeLocalContainerTest {
+
+ @Test
+ public void testSimple() {
+ try ( // container {
+ PineconeLocalContainer container = new PineconeLocalContainer("ghcr.io/pinecone-io/pinecone-local:v0.7.0")
+ // }
+ ) {
+ container.start();
+
+ // client {
+ Pinecone pinecone = new Pinecone.Builder("pclocal")
+ .withHost(container.getEndpoint())
+ .withTlsEnabled(false)
+ .build();
+ // }
+
+ String indexName = "example-index";
+ pinecone.createServerlessIndex(indexName, "cosine", 2, "aws", "us-east-1", DeletionProtection.DISABLED);
+ IndexModel indexModel = pinecone.describeIndex(indexName);
+ assertThat(indexModel.getDeletionProtection()).isEqualTo(DeletionProtection.DISABLED);
+ }
+ }
+}
diff --git a/modules/dynalite/src/test/resources/logback-test.xml b/modules/pinecone/src/test/resources/logback-test.xml
similarity index 100%
rename from modules/dynalite/src/test/resources/logback-test.xml
rename to modules/pinecone/src/test/resources/logback-test.xml
diff --git a/modules/postgresql/build.gradle b/modules/postgresql/build.gradle
index 23ad800186b..bbe8e596b95 100644
--- a/modules/postgresql/build.gradle
+++ b/modules/postgresql/build.gradle
@@ -7,10 +7,16 @@ dependencies {
compileOnly 'io.r2dbc:r2dbc-postgresql:0.8.13.RELEASE'
testImplementation project(':jdbc-test')
- testRuntimeOnly 'org.postgresql:postgresql:42.7.4'
+ testRuntimeOnly 'org.postgresql:postgresql:42.7.7'
testImplementation testFixtures(project(':r2dbc'))
testRuntimeOnly 'io.r2dbc:r2dbc-postgresql:0.8.13.RELEASE'
- compileOnly 'org.jetbrains:annotations:24.1.0'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.PostgreSQLContainer"
+ ]
}
diff --git a/modules/postgresql/src/main/java/org/testcontainers/containers/PostgreSQLContainer.java b/modules/postgresql/src/main/java/org/testcontainers/containers/PostgreSQLContainer.java
index 27b3679b626..d96f30864d9 100644
--- a/modules/postgresql/src/main/java/org/testcontainers/containers/PostgreSQLContainer.java
+++ b/modules/postgresql/src/main/java/org/testcontainers/containers/PostgreSQLContainer.java
@@ -41,14 +41,6 @@ public class PostgreSQLContainer> extends
private static final String FSYNC_OFF_OPTION = "fsync=off";
- /**
- * @deprecated use {@link #PostgreSQLContainer(DockerImageName)} or {@link #PostgreSQLContainer(String)} instead
- */
- @Deprecated
- public PostgreSQLContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
public PostgreSQLContainer(final String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
diff --git a/modules/postgresql/src/test/java/org/testcontainers/containers/TimescaleDBContainerTest.java b/modules/postgresql/src/test/java/org/testcontainers/containers/TimescaleDBContainerTest.java
index f49c7dc6aa7..90f3113c7c0 100644
--- a/modules/postgresql/src/test/java/org/testcontainers/containers/TimescaleDBContainerTest.java
+++ b/modules/postgresql/src/test/java/org/testcontainers/containers/TimescaleDBContainerTest.java
@@ -35,7 +35,7 @@ public void testCommandOverride() throws SQLException {
"SELECT current_setting('max_connections')"
);
String result = resultSet.getString(1);
- assertThat(result).as("max_connections should be overriden").isEqualTo("42");
+ assertThat(result).as("max_connections should be overridden").isEqualTo("42");
}
}
@@ -54,7 +54,7 @@ public void testUnsetCommand() throws SQLException {
"SELECT current_setting('max_connections')"
);
String result = resultSet.getString(1);
- assertThat(result).as("max_connections should not be overriden").isNotEqualTo("42");
+ assertThat(result).as("max_connections should not be overridden").isNotEqualTo("42");
}
}
diff --git a/modules/postgresql/src/test/java/org/testcontainers/junit/postgresql/SimplePostgreSQLTest.java b/modules/postgresql/src/test/java/org/testcontainers/junit/postgresql/SimplePostgreSQLTest.java
index 93e49d2d24c..250f19d87a0 100644
--- a/modules/postgresql/src/test/java/org/testcontainers/junit/postgresql/SimplePostgreSQLTest.java
+++ b/modules/postgresql/src/test/java/org/testcontainers/junit/postgresql/SimplePostgreSQLTest.java
@@ -41,7 +41,7 @@ public void testCommandOverride() throws SQLException {
ResultSet resultSet = performQuery(postgres, "SELECT current_setting('max_connections')");
String result = resultSet.getString(1);
- assertThat(result).as("max_connections should be overriden").isEqualTo("42");
+ assertThat(result).as("max_connections should be overridden").isEqualTo("42");
}
}
@@ -56,7 +56,7 @@ public void testUnsetCommand() throws SQLException {
ResultSet resultSet = performQuery(postgres, "SELECT current_setting('max_connections')");
String result = resultSet.getString(1);
- assertThat(result).as("max_connections should not be overriden").isNotEqualTo("42");
+ assertThat(result).as("max_connections should not be overridden").isNotEqualTo("42");
}
}
diff --git a/modules/presto/build.gradle b/modules/presto/build.gradle
index ad35cd6ec25..988b00630d8 100644
--- a/modules/presto/build.gradle
+++ b/modules/presto/build.gradle
@@ -5,5 +5,5 @@ dependencies {
testImplementation project(':jdbc-test')
testRuntimeOnly 'io.prestosql:presto-jdbc:350'
- compileOnly 'org.jetbrains:annotations:24.1.0'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
}
diff --git a/modules/pulsar/build.gradle b/modules/pulsar/build.gradle
index 7af1bc777e2..9aed0587d84 100644
--- a/modules/pulsar/build.gradle
+++ b/modules/pulsar/build.gradle
@@ -3,8 +3,14 @@ description = "Testcontainers :: Pulsar"
dependencies {
api project(':testcontainers')
- testImplementation platform("org.apache.pulsar:pulsar-bom:3.3.1")
+ testImplementation platform("org.apache.pulsar:pulsar-bom:4.0.5")
testImplementation 'org.apache.pulsar:pulsar-client'
testImplementation 'org.apache.pulsar:pulsar-client-admin'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.PulsarContainer"
+ ]
}
diff --git a/modules/pulsar/src/main/java/org/testcontainers/containers/PulsarContainer.java b/modules/pulsar/src/main/java/org/testcontainers/containers/PulsarContainer.java
index 36485ad76a0..027e030c0de 100644
--- a/modules/pulsar/src/main/java/org/testcontainers/containers/PulsarContainer.java
+++ b/modules/pulsar/src/main/java/org/testcontainers/containers/PulsarContainer.java
@@ -21,12 +21,6 @@ public class PulsarContainer extends GenericContainer {
public static final int BROKER_HTTP_PORT = 8080;
- /**
- * @deprecated The metrics endpoint is no longer being used for the WaitStrategy.
- */
- @Deprecated
- public static final String METRICS_ENDPOINT = "/metrics";
-
private static final String ADMIN_CLUSTERS_ENDPOINT = "/admin/v2/clusters";
/**
@@ -37,23 +31,12 @@ public class PulsarContainer extends GenericContainer {
private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("apachepulsar/pulsar");
- @Deprecated
- private static final String DEFAULT_TAG = "3.0.0";
-
private final WaitAllStrategy waitAllStrategy = new WaitAllStrategy();
private boolean functionsWorkerEnabled = false;
private boolean transactionsEnabled = false;
- /**
- * @deprecated use {@link #PulsarContainer(DockerImageName)} instead
- */
- @Deprecated
- public PulsarContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
/**
* @deprecated use {@link #PulsarContainer(DockerImageName)} instead
*/
diff --git a/modules/qdrant/build.gradle b/modules/qdrant/build.gradle
index 790b22520ae..479928a5e0e 100644
--- a/modules/qdrant/build.gradle
+++ b/modules/qdrant/build.gradle
@@ -3,9 +3,9 @@ description = "Testcontainers :: Qdrant"
dependencies {
api project(':testcontainers')
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'io.qdrant:client:1.10.0'
- testImplementation platform('io.grpc:grpc-bom:1.68.0')
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'io.qdrant:client:1.14.1'
+ testImplementation platform('io.grpc:grpc-bom:1.73.0')
testImplementation 'io.grpc:grpc-stub'
testImplementation 'io.grpc:grpc-protobuf'
testImplementation 'io.grpc:grpc-netty-shaded'
diff --git a/modules/questdb/build.gradle b/modules/questdb/build.gradle
index 01bc5473247..7364293bbcc 100644
--- a/modules/questdb/build.gradle
+++ b/modules/questdb/build.gradle
@@ -4,10 +4,10 @@ dependencies {
api project(':testcontainers')
api project(':jdbc')
- testRuntimeOnly 'org.postgresql:postgresql:42.7.4'
+ testRuntimeOnly 'org.postgresql:postgresql:42.7.7'
testImplementation project(':jdbc-test')
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'org.questdb:questdb:7.3.9'
- testImplementation 'org.awaitility:awaitility:4.2.0'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'org.questdb:questdb:9.0.0'
+ testImplementation 'org.awaitility:awaitility:4.3.0'
testImplementation 'org.apache.httpcomponents:httpclient:4.5.14'
}
diff --git a/modules/questdb/src/test/java/org/testcontainers/junit/questdb/SimpleQuestDBTest.java b/modules/questdb/src/test/java/org/testcontainers/junit/questdb/SimpleQuestDBTest.java
index c38524018c3..16198740c0e 100644
--- a/modules/questdb/src/test/java/org/testcontainers/junit/questdb/SimpleQuestDBTest.java
+++ b/modules/questdb/src/test/java/org/testcontainers/junit/questdb/SimpleQuestDBTest.java
@@ -57,7 +57,7 @@ public void testRest() throws IOException {
}
private static void populateByInfluxLineProtocol(QuestDBContainer questdb, int rowCount) {
- try (Sender sender = Sender.builder().address(questdb.getIlpUrl()).build()) {
+ try (Sender sender = Sender.builder(Sender.Transport.TCP).address(questdb.getIlpUrl()).build()) {
for (int i = 0; i < rowCount; i++) {
sender
.table(TABLE_NAME)
diff --git a/modules/r2dbc/build.gradle b/modules/r2dbc/build.gradle
index 1415df161fa..b506f5a2515 100644
--- a/modules/r2dbc/build.gradle
+++ b/modules/r2dbc/build.gradle
@@ -8,10 +8,10 @@ dependencies {
api project(':testcontainers')
api 'io.r2dbc:r2dbc-spi:0.9.0.RELEASE'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
testImplementation 'io.r2dbc:r2dbc-postgresql:0.8.13.RELEASE'
testImplementation project(':postgresql')
- testFixturesImplementation 'io.projectreactor:reactor-core:3.6.10'
- testFixturesImplementation 'org.assertj:assertj-core:3.26.3'
+ testFixturesImplementation 'io.projectreactor:reactor-core:3.7.8'
+ testFixturesImplementation 'org.assertj:assertj-core:3.27.3'
}
diff --git a/modules/rabbitmq/build.gradle b/modules/rabbitmq/build.gradle
index 42a12d68759..3137ec7f7e1 100644
--- a/modules/rabbitmq/build.gradle
+++ b/modules/rabbitmq/build.gradle
@@ -2,7 +2,13 @@ description = "Testcontainers :: RabbitMQ"
dependencies {
api project(":testcontainers")
- testImplementation 'com.rabbitmq:amqp-client:5.22.0'
- testImplementation 'org.assertj:assertj-core:3.26.3'
- compileOnly 'org.jetbrains:annotations:24.1.0'
+ testImplementation 'com.rabbitmq:amqp-client:5.25.0'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.RabbitMQContainer"
+ ]
}
diff --git a/modules/rabbitmq/src/main/java/org/testcontainers/containers/RabbitMQContainer.java b/modules/rabbitmq/src/main/java/org/testcontainers/containers/RabbitMQContainer.java
index 9eff3fa83f2..3bb45632c54 100644
--- a/modules/rabbitmq/src/main/java/org/testcontainers/containers/RabbitMQContainer.java
+++ b/modules/rabbitmq/src/main/java/org/testcontainers/containers/RabbitMQContainer.java
@@ -35,8 +35,6 @@ public class RabbitMQContainer extends GenericContainer {
*/
private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("rabbitmq");
- private static final String DEFAULT_TAG = "3.7.25-management-alpine";
-
private static final int DEFAULT_AMQP_PORT = 5672;
private static final int DEFAULT_AMQPS_PORT = 5671;
@@ -51,15 +49,6 @@ public class RabbitMQContainer extends GenericContainer {
private final List> values = new ArrayList<>();
- /**
- * Creates a RabbitMQ container using the official RabbitMQ docker image.
- * @deprecated use {@link #RabbitMQContainer(DockerImageName)} instead
- */
- @Deprecated
- public RabbitMQContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
/**
* Creates a RabbitMQ container using a specific docker image.
*
diff --git a/modules/redpanda/build.gradle b/modules/redpanda/build.gradle
index 2db469936f6..e3795d93656 100644
--- a/modules/redpanda/build.gradle
+++ b/modules/redpanda/build.gradle
@@ -2,10 +2,10 @@ description = "Testcontainers :: Redpanda"
dependencies {
api project(':testcontainers')
- shaded 'org.freemarker:freemarker:2.3.33'
+ shaded 'org.freemarker:freemarker:2.3.34'
- testImplementation 'org.apache.kafka:kafka-clients:3.8.0'
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'io.rest-assured:rest-assured:5.5.0'
- testImplementation 'org.awaitility:awaitility:4.2.2'
+ testImplementation 'org.apache.kafka:kafka-clients:4.0.0'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'io.rest-assured:rest-assured:5.5.5'
+ testImplementation 'org.awaitility:awaitility:4.3.0'
}
diff --git a/modules/redpanda/src/main/resources/testcontainers/bootstrap.yaml.ftl b/modules/redpanda/src/main/resources/testcontainers/bootstrap.yaml.ftl
index f066cf428aa..616415c359e 100644
--- a/modules/redpanda/src/main/resources/testcontainers/bootstrap.yaml.ftl
+++ b/modules/redpanda/src/main/resources/testcontainers/bootstrap.yaml.ftl
@@ -1,7 +1,7 @@
# Injected by testcontainers
# This file contains cluster properties which will only be considered when
# starting the cluster for the first time. Afterwards, you can configure cluster
-# properties via the Redpanda Admi n API.
+# properties via the Redpanda Admin API.
superusers:
<#if kafkaApi.superusers?has_content >
<#list kafkaApi.superusers as superuser>
diff --git a/modules/redpanda/src/test/java/org/testcontainers/redpanda/RedpandaContainerTest.java b/modules/redpanda/src/test/java/org/testcontainers/redpanda/RedpandaContainerTest.java
index 605ad76510a..d07f33d8b00 100644
--- a/modules/redpanda/src/test/java/org/testcontainers/redpanda/RedpandaContainerTest.java
+++ b/modules/redpanda/src/test/java/org/testcontainers/redpanda/RedpandaContainerTest.java
@@ -111,7 +111,7 @@ public void testUsageWithListener() throws Exception {
RedpandaContainer redpanda = new RedpandaContainer("docker.redpanda.com/redpandadata/redpanda:v23.1.7")
.withListener(() -> "redpanda:19092")
.withNetwork(network);
- GenericContainer> kcat = new GenericContainer<>("confluentinc/cp-kcat:7.4.1")
+ GenericContainer> kcat = new GenericContainer<>("confluentinc/cp-kcat:7.9.0")
.withCreateContainerCmdModifier(cmd -> {
cmd.withEntrypoint("sh");
})
@@ -141,7 +141,7 @@ public void testUsageWithListenerInTheSameNetwork() throws Exception {
.withNetwork(network);
// }
// createKCatContainer {
- GenericContainer> kcat = new GenericContainer<>("confluentinc/cp-kcat:7.4.1")
+ GenericContainer> kcat = new GenericContainer<>("confluentinc/cp-kcat:7.9.0")
.withCreateContainerCmdModifier(cmd -> {
cmd.withEntrypoint("sh");
})
@@ -200,7 +200,7 @@ public void testUsageWithListenerAndSasl() throws Exception {
.withSuperuser("panda")
.withListener("my-panda:29092")
.withNetwork(network);
- GenericContainer> kcat = new GenericContainer<>("confluentinc/cp-kcat:7.4.1")
+ GenericContainer> kcat = new GenericContainer<>("confluentinc/cp-kcat:7.9.0")
.withCreateContainerCmdModifier(cmd -> {
cmd.withEntrypoint("sh");
})
diff --git a/modules/scylladb/build.gradle b/modules/scylladb/build.gradle
new file mode 100644
index 00000000000..4136702010d
--- /dev/null
+++ b/modules/scylladb/build.gradle
@@ -0,0 +1,9 @@
+description = "Testcontainers :: ScyllaDB"
+
+dependencies {
+ api project(":testcontainers")
+
+ testImplementation 'com.scylladb:java-driver-core:4.19.0.1'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'software.amazon.awssdk:dynamodb:2.31.77'
+}
diff --git a/modules/scylladb/src/main/java/org/testcontainers/scylladb/ScyllaDBContainer.java b/modules/scylladb/src/main/java/org/testcontainers/scylladb/ScyllaDBContainer.java
new file mode 100644
index 00000000000..f181b8b9f00
--- /dev/null
+++ b/modules/scylladb/src/main/java/org/testcontainers/scylladb/ScyllaDBContainer.java
@@ -0,0 +1,109 @@
+package org.testcontainers.scylladb;
+
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.containers.wait.strategy.Wait;
+import org.testcontainers.utility.DockerImageName;
+import org.testcontainers.utility.MountableFile;
+
+import java.net.InetSocketAddress;
+import java.util.Optional;
+
+/**
+ * Testcontainers implementation for ScyllaDB.
+ *
+ * Supported image: {@code scylladb/scylla}
+ *
+ * Exposed ports:
+ *
+ * - CQL Port: 9042
+ * - Shard Aware Port: 19042
+ * - Alternator Port: 8000
+ *
+ */
+public class ScyllaDBContainer extends GenericContainer {
+
+ private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("scylladb/scylla");
+
+ private static final Integer CQL_PORT = 9042;
+
+ private static final Integer SHARD_AWARE_PORT = 19042;
+
+ private static final Integer ALTERNATOR_PORT = 8000;
+
+ private static final String COMMAND = "--developer-mode=1 --overprovisioned=1";
+
+ private static final String CONTAINER_CONFIG_LOCATION = "/etc/scylla";
+
+ private boolean alternatorEnabled = false;
+
+ private String configLocation;
+
+ public ScyllaDBContainer(String dockerImageName) {
+ this(DockerImageName.parse(dockerImageName));
+ }
+
+ public ScyllaDBContainer(DockerImageName dockerImageName) {
+ super(dockerImageName);
+ dockerImageName.assertCompatibleWith(DEFAULT_IMAGE_NAME);
+
+ withExposedPorts(CQL_PORT, SHARD_AWARE_PORT);
+
+ withCommand(COMMAND);
+ waitingFor(Wait.forLogMessage(".*initialization completed..*", 1));
+ }
+
+ @Override
+ protected void configure() {
+ if (this.alternatorEnabled) {
+ addExposedPort(8000);
+ String newCommand =
+ COMMAND + " --alternator-port=" + ALTERNATOR_PORT + " --alternator-write-isolation=always";
+ withCommand(newCommand);
+ }
+
+ // Map (effectively replace) directory in Docker with the content of resourceLocation if resource location is
+ // not null.
+ Optional
+ .ofNullable(configLocation)
+ .map(MountableFile::forClasspathResource)
+ .ifPresent(mountableFile -> withCopyFileToContainer(mountableFile, CONTAINER_CONFIG_LOCATION));
+ }
+
+ public ScyllaDBContainer withConfigurationOverride(String configLocation) {
+ this.configLocation = configLocation;
+ return this;
+ }
+
+ public ScyllaDBContainer withSsl(MountableFile certificate, MountableFile keyfile, MountableFile truststore) {
+ withCopyFileToContainer(certificate, "/etc/scylla/scylla.cer.pem");
+ withCopyFileToContainer(keyfile, "/etc/scylla/scylla.key.pem");
+ withCopyFileToContainer(truststore, "/etc/scylla/scylla.truststore");
+ withEnv("SSL_CERTFILE", "/etc/scylla/scylla.cer.pem");
+ return this;
+ }
+
+ public ScyllaDBContainer withAlternator() {
+ this.alternatorEnabled = true;
+ return this;
+ }
+
+ /**
+ * Retrieve an {@link InetSocketAddress} for connecting to the ScyllaDB container via the driver.
+ *
+ * @return A InetSocketAddress representation of this ScyllaDB container's host and port.
+ */
+ public InetSocketAddress getContactPoint() {
+ return new InetSocketAddress(getHost(), getMappedPort(CQL_PORT));
+ }
+
+ public InetSocketAddress getShardAwareContactPoint() {
+ return new InetSocketAddress(getHost(), getMappedPort(SHARD_AWARE_PORT));
+ }
+
+ public String getAlternatorEndpoint() {
+ if (!this.alternatorEnabled) {
+ throw new IllegalStateException("Alternator is not enabled");
+ }
+ return "http://" + getHost() + ":" + getMappedPort(ALTERNATOR_PORT);
+ }
+}
diff --git a/modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java b/modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java
new file mode 100644
index 00000000000..d151424fd20
--- /dev/null
+++ b/modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java
@@ -0,0 +1,203 @@
+package org.testcontainers.scylladb;
+
+import com.datastax.oss.driver.api.core.CqlSession;
+import com.datastax.oss.driver.api.core.cql.ResultSet;
+import org.junit.Test;
+import org.testcontainers.containers.Container;
+import org.testcontainers.utility.DockerImageName;
+import org.testcontainers.utility.MountableFile;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.dynamodb.DynamoDbClient;
+import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition;
+import software.amazon.awssdk.services.dynamodb.model.BillingMode;
+import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest;
+import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement;
+import software.amazon.awssdk.services.dynamodb.model.KeyType;
+import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType;
+
+import java.io.IOException;
+import java.net.URI;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.security.KeyManagementException;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.UnrecoverableKeyException;
+import java.security.cert.CertificateException;
+
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManagerFactory;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+public class ScyllaDBContainerTest {
+
+ private static final DockerImageName SCYLLADB_IMAGE = DockerImageName.parse("scylladb/scylla:6.2");
+
+ private static final String BASIC_QUERY = "SELECT release_version FROM system.local";
+
+ @Test
+ public void testSimple() {
+ try ( // container {
+ ScyllaDBContainer scylladb = new ScyllaDBContainer("scylladb/scylla:6.2")
+ // }
+ ) {
+ scylladb.start();
+ // session {
+ CqlSession session = CqlSession
+ .builder()
+ .addContactPoint(scylladb.getContactPoint())
+ .withLocalDatacenter("datacenter1")
+ .build();
+ // }
+ ResultSet resultSet = session.execute(BASIC_QUERY);
+ assertThat(resultSet.wasApplied()).isTrue();
+ assertThat(resultSet.one().getString(0)).isNotNull();
+ assertThat(session.getMetadata().getNodes().values()).hasSize(1);
+ }
+ }
+
+ @Test
+ public void testSimpleSsl()
+ throws NoSuchAlgorithmException, KeyStoreException, IOException, CertificateException, UnrecoverableKeyException, KeyManagementException {
+ try (
+ // customConfiguration {
+ ScyllaDBContainer scylladb = new ScyllaDBContainer("scylladb/scylla:6.2")
+ .withConfigurationOverride("scylla-test-ssl")
+ .withSsl(
+ MountableFile.forClasspathResource("keys/scylla.cer.pem"),
+ MountableFile.forClasspathResource("keys/scylla.key.pem"),
+ MountableFile.forClasspathResource("keys/scylla.truststore")
+ )
+ // }
+ ) {
+ // sslContext {
+ String testResourcesDir = getClass().getClassLoader().getResource("keys/").getPath();
+
+ KeyStore keyStore = KeyStore.getInstance("PKCS12");
+ keyStore.load(
+ Files.newInputStream(Paths.get(testResourcesDir + "scylla.keystore")),
+ "scylla".toCharArray()
+ );
+
+ KeyStore trustStore = KeyStore.getInstance("PKCS12");
+ trustStore.load(
+ Files.newInputStream(Paths.get(testResourcesDir + "scylla.truststore")),
+ "scylla".toCharArray()
+ );
+
+ KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(
+ KeyManagerFactory.getDefaultAlgorithm()
+ );
+ keyManagerFactory.init(keyStore, "scylla".toCharArray());
+
+ TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(
+ TrustManagerFactory.getDefaultAlgorithm()
+ );
+ trustManagerFactory.init(trustStore);
+
+ SSLContext sslContext = SSLContext.getInstance("TLS");
+ sslContext.init(keyManagerFactory.getKeyManagers(), trustManagerFactory.getTrustManagers(), null);
+ // }
+
+ scylladb.start();
+
+ CqlSession session = CqlSession
+ .builder()
+ .addContactPoint(scylladb.getContactPoint())
+ .withLocalDatacenter("datacenter1")
+ .withSslContext(sslContext)
+ .build();
+ ResultSet resultSet = session.execute(BASIC_QUERY);
+ assertThat(resultSet.wasApplied()).isTrue();
+ assertThat(resultSet.one().getString(0)).isNotNull();
+ assertThat(session.getMetadata().getNodes().values()).hasSize(1);
+ }
+ }
+
+ @Test
+ public void testSimpleSslCqlsh() throws IllegalStateException, InterruptedException, IOException {
+ try (
+ ScyllaDBContainer scylladb = new ScyllaDBContainer(SCYLLADB_IMAGE)
+ .withConfigurationOverride("scylla-test-ssl")
+ .withSsl(
+ MountableFile.forClasspathResource("keys/scylla.cer.pem"),
+ MountableFile.forClasspathResource("keys/scylla.key.pem"),
+ MountableFile.forClasspathResource("keys/scylla.truststore")
+ )
+ ) {
+ scylladb.start();
+
+ Container.ExecResult execResult = scylladb.execInContainer(
+ "cqlsh",
+ "--ssl",
+ "-e",
+ "select * from system_schema.keyspaces;"
+ );
+ assertThat(execResult.getStdout()).contains("keyspace_name");
+ }
+ }
+
+ @Test
+ public void testShardAwareness() {
+ try (ScyllaDBContainer scylladb = new ScyllaDBContainer(SCYLLADB_IMAGE)) {
+ scylladb.start();
+ // shardAwarenessSession {
+ CqlSession session = CqlSession
+ .builder()
+ .addContactPoint(scylladb.getShardAwareContactPoint())
+ .withLocalDatacenter("datacenter1")
+ .build();
+ // }
+ ResultSet resultSet = session.execute("SELECT driver_name FROM system.clients");
+ assertThat(resultSet.one().getString(0)).isNotNull();
+ assertThat(session.getMetadata().getNodes().values()).hasSize(1);
+ }
+ }
+
+ @Test
+ public void testAlternator() {
+ try ( // alternator {
+ ScyllaDBContainer scylladb = new ScyllaDBContainer(SCYLLADB_IMAGE).withAlternator()
+ // }
+ ) {
+ scylladb.start();
+
+ // dynamodDbClient {
+ DynamoDbClient client = DynamoDbClient
+ .builder()
+ .endpointOverride(URI.create(scylladb.getAlternatorEndpoint()))
+ .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("test", "test")))
+ .region(Region.US_EAST_1)
+ .build();
+ // }
+ client.createTable(
+ CreateTableRequest
+ .builder()
+ .tableName("demo_table")
+ .keySchema(KeySchemaElement.builder().attributeName("id").keyType(KeyType.HASH).build())
+ .attributeDefinitions(
+ AttributeDefinition.builder().attributeName("id").attributeType(ScalarAttributeType.S).build()
+ )
+ .billingMode(BillingMode.PAY_PER_REQUEST)
+ .build()
+ );
+ assertThat(client.listTables().tableNames()).containsExactly(("demo_table"));
+ }
+ }
+
+ @Test
+ public void throwExceptionWhenAlternatorDisabled() {
+ try (ScyllaDBContainer scylladb = new ScyllaDBContainer(SCYLLADB_IMAGE)) {
+ scylladb.start();
+ assertThatThrownBy(scylladb::getAlternatorEndpoint)
+ .isInstanceOf(IllegalStateException.class)
+ .hasMessageContaining("Alternator is not enabled");
+ }
+ }
+}
diff --git a/modules/scylladb/src/test/resources/keys/node0.cer b/modules/scylladb/src/test/resources/keys/node0.cer
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/modules/scylladb/src/test/resources/keys/node0.p12 b/modules/scylladb/src/test/resources/keys/node0.p12
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/modules/scylladb/src/test/resources/keys/scylla.cer.pem b/modules/scylladb/src/test/resources/keys/scylla.cer.pem
new file mode 100644
index 00000000000..8f538448288
--- /dev/null
+++ b/modules/scylladb/src/test/resources/keys/scylla.cer.pem
@@ -0,0 +1,30 @@
+Bag Attributes
+ friendlyName: node0
+ localKeyID: 54 69 6D 65 20 31 37 33 35 39 34 30 37 38 39 31 39 34
+subject=C=None, L=None, O=None, OU=None, CN=None
+issuer=C=None, L=None, O=None, OU=None, CN=None
+-----BEGIN CERTIFICATE-----
+MIIEOzCCAqOgAwIBAgIIY4iVNsJSWiEwDQYJKoZIhvcNAQEMBQAwSzENMAsGA1UE
+BhMETm9uZTENMAsGA1UEBxMETm9uZTENMAsGA1UEChMETm9uZTENMAsGA1UECxME
+Tm9uZTENMAsGA1UEAxMETm9uZTAgFw0yNTAxMDMyMTM5MzFaGA8yMTI0MTIxMDIx
+MzkzMVowSzENMAsGA1UEBhMETm9uZTENMAsGA1UEBxMETm9uZTENMAsGA1UEChME
+Tm9uZTENMAsGA1UECxMETm9uZTENMAsGA1UEAxMETm9uZTCCAaIwDQYJKoZIhvcN
+AQEBBQADggGPADCCAYoCggGBAJuC18n+jlDcmR8CWxSK3fR2t1Am8P7IK5FY3ky8
+vEJSCMh+GoiqXVq67zhpOJnlgvEEZIDJGzBmJ/nIZvQwIAMxs792fHIEpEI2GTpf
+oaMf/9AAuPXuscg+5i4us1eVyVbrq3sREJ2NXHIPylcjtbwLjuepvmXTLp1d7oOJ
+Ad0X0W3UN/uwrlV3NPBuVLjJiCvJijWrCv1lFTuIcclqs478ozllp8UfcwJ57OH2
+Hq1ee9Ex9y7HouDPfFzmMRp1/jEcb0xbefpdW3Am6P9AXQuw2JMempwt5KbrAE+Z
+V1JnZCjSYSkspwid2bt5To/o60ypZUUswElasgAV/k8AxxDOkJGZusEqqVH7EFvk
+h3FiY/jb9cM1t5eLcpjx0wA+GOuErW3dgH5/WYugY2iiYjP1IQTb8Pk+gfAvq+2p
+SX3wISDCAh53j+aceUvNf+lItXsz66V9e+VH1xcOZcyO4gAMUVNYQFv/2wZ9knK4
+o30Aiqir1g2Hd5F/rWYNum+UbQIDAQABoyEwHzAdBgNVHQ4EFgQUqAWcYa3l/OHI
+JACasy+bZUwHP9kwDQYJKoZIhvcNAQEMBQADggGBAJQo55VJd8aEv6uiC5bKdACo
+M1GMvxWXUFzTdh2XKTOMF5GWwGJ3WRuW9o9wMZwXjvRihPfnx+DnfCCgZBOTGLXB
+3ObsogR9rij4uquUIkGJsshggY2gO82NVD7dRwGClncwTI+/RU7qGUym4SEdg6GP
+yfad3eTvqscQU1mNTxkaH0IDzPm0SWF8lcgGnrdHWlN+Nb8MJSHL5NFc9DA9pZck
+5/4MG1X8Hsk/UT04ln+8VrhYFkxkDv4fSKlr65slrst5721J0j+VLEwnuEl1onpW
+WHTTTIcOTDR5asrN9ZACCUsBxST8yfoJQ5G4HMO+UI1/1d928Ug6kHNWw2WR5FGG
+pJVu9vpTdA01MNkSeCuZhaPe2XgZcNPyHXcVxslNvFFZ0FVt6pSIhtmZ+4a8dRsm
+eU4NQ+PJ24En/8dErxaPqmi31wRZBg5Y9YlugJV4GQszCKHr0OYNK+Lpdq9dboUj
+6lxX7+gshUgKMzunUl/rTvddG7e/WuZbi9IvmJ4MYw==
+-----END CERTIFICATE-----
diff --git a/modules/scylladb/src/test/resources/keys/scylla.key.pem b/modules/scylladb/src/test/resources/keys/scylla.key.pem
new file mode 100644
index 00000000000..26ff1a8b80d
--- /dev/null
+++ b/modules/scylladb/src/test/resources/keys/scylla.key.pem
@@ -0,0 +1,44 @@
+Bag Attributes
+ friendlyName: node0
+ localKeyID: 54 69 6D 65 20 31 37 33 35 39 34 30 37 38 39 31 39 34
+Key Attributes:
+-----BEGIN PRIVATE KEY-----
+MIIG/AIBADANBgkqhkiG9w0BAQEFAASCBuYwggbiAgEAAoIBgQCbgtfJ/o5Q3Jkf
+AlsUit30drdQJvD+yCuRWN5MvLxCUgjIfhqIql1auu84aTiZ5YLxBGSAyRswZif5
+yGb0MCADMbO/dnxyBKRCNhk6X6GjH//QALj17rHIPuYuLrNXlclW66t7ERCdjVxy
+D8pXI7W8C47nqb5l0y6dXe6DiQHdF9Ft1Df7sK5VdzTwblS4yYgryYo1qwr9ZRU7
+iHHJarOO/KM5ZafFH3MCeezh9h6tXnvRMfcux6Lgz3xc5jEadf4xHG9MW3n6XVtw
+Juj/QF0LsNiTHpqcLeSm6wBPmVdSZ2Qo0mEpLKcIndm7eU6P6OtMqWVFLMBJWrIA
+Ff5PAMcQzpCRmbrBKqlR+xBb5IdxYmP42/XDNbeXi3KY8dMAPhjrhK1t3YB+f1mL
+oGNoomIz9SEE2/D5PoHwL6vtqUl98CEgwgIed4/mnHlLzX/pSLV7M+ulfXvlR9cX
+DmXMjuIADFFTWEBb/9sGfZJyuKN9AIqoq9YNh3eRf61mDbpvlG0CAwEAAQKCAYAT
+SMt3qhB96I04cjNXPc0+ZoZe8yVJgwscEBgpDfKOitu5+SFTN0UyXiISLcIuG278
+cl4ANnAftVtZt0dFGr6thrlSkd/mx7qS12CTg45oyywO4DgPj1UOjvY+Xd4xi0qX
+c8wlC72yu/ft0RV3bt83fXtwMPWCbQjHzQEp4JCRmUWISBvVI1jLEmhHNHdfHua6
+/1gbRaWsPJ/AbTAnGQtBPQUEth1y7W52rSX582pkd2YFUBvl+i2xkSlL3+PQ8zar
+5giPYZrGh5pCu/bflAsBGZyRx9keSsRK/bzqE0xeRAwTOir2V6g7LbSKLC04xKNc
+06/rHf1gslHNNOC3SjHvPyPfTJFHG9Tm+J5OoGo/Rr/W+GNgFMsFJ1fIq1VedpTt
+ov4CBnBgew8uHTwCoiL6T7f/ttd206A6nhEZ9tWFf8v0o6+y6Z7g0VniU9IuLRLr
+hXuKkxbBDZQRO8equlAKtbkqv6YFbGImmF/1YwP1/Ct1TR1BDM3m1UB6eez7BWEC
+gcEAx2RL8dJCVbKoRMjsKqNNh0R3vIz0+S8PTi3yjFjhggUCWOzlwMVFv/y0ztGf
+pj6Y41eaIdTwQu76uZra748Uj1Vwj5zAKXhb/THWoAidONFRj+qJ3ylDobrO5Fme
+RiCFlIfjNc6wYQiGqSMXTF02O67to44G+4zsrz+syIZO3ANOR+uB+LUNqvFKL5Kk
+BUDtU+r9poIoXkgYylzRb/6H0J+D0fcPGg+LHeRvp3DL6uueDN7eGXxdy7hF/q3L
+DqHlAoHBAMepUZUe5m6h6wIYWoaXPwvSeuBSHWUiGEqoNCrA/1tBI49AOjfn6ccy
+vu51ng/hEI/XpQ+QXvM/MNk3wyKe3HMjaPKiRbro9EFtva3pz3SrLoRHHzSGkzW3
+iTavg8RKo76Pz7MNEVqfkFn0pYr85EMIe4hmmrdR6nwd1oJY1CEMf4wllhWG+v1y
+901xLisuRZFE/X4ASvyDyY0Nh+9Cfd+80QS9fpZwuCR+mHQvIpp89F/Ohqyhk9CU
+HLncQD2f6QKBwBJZUX/UeJRIV6HU157o3kaXb2ljk1unEAKCyfJOb5o2ecvTKSV/
+Qfbz+3OY6Nc0pX8uXZnFbcLLGTmhXYp0IVE7bJtasnhegiCfyH97q3RCFv5md/+Y
+XYfxl/59nMoZThGoG6mk9qhHT5UbDJbTcR028Nl/RXc6tcE+29isO2+VwktuCczo
+ZHSZtdkA5qUxH2X8lxEOo0Zh3h4pQoDK7JavR0M4OCSOz5+VmQzQnYNl4WqPy+KO
+hlcsAwz301rqXQKBwHkY2+9q924gbM4vgTBiqY19EqPdihCd1kfprwJDXl21q2Cm
+HulrkqILyDwPQFf3NLlZnLZM5Rn5uKH2rTbhTWnUD0IiY9KSmhrY+ZNy3S2w6Zy3
+GlkcSkrpT6LIX039y0S4Ksw5X84sOzwkIweijLuPeIVpXetUFrlCy6jxQW/uCaox
+3c6euLpiMVZaEBuGjBEo2+rBOLnhIKyZiVn3ZSr/dXK/j/ik0zrnQYYuVHmI0hsN
+wycPNPzr6GReDuSRiQKBwChoS1Vvv49agWjyViIohGm6GHsY1Y1FNIqddHN5KgfA
+LGZRm8JhlTBPX89KgWUpemDjRHw84vqF46Md9+eeuovr697/fEVQ1W4FWJs9JLej
+2zmRlZqQgFnR6hdeeg1l7V8bPLR1zfl0R7+UkguP1xuI55fZc9H5icMCrOOCo1ug
+vdBrhNl4Swzn+wTVY62J/GX86Rfeybvn+BJQW4RCuKFqcxqctPuR5i+wMOxKWZP3
+fMq1U6czbhYvEjp3Y42Exw==
+-----END PRIVATE KEY-----
diff --git a/modules/scylladb/src/test/resources/keys/scylla.keystore b/modules/scylladb/src/test/resources/keys/scylla.keystore
new file mode 100644
index 00000000000..7f027beaf64
Binary files /dev/null and b/modules/scylladb/src/test/resources/keys/scylla.keystore differ
diff --git a/modules/scylladb/src/test/resources/keys/scylla.truststore b/modules/scylladb/src/test/resources/keys/scylla.truststore
new file mode 100644
index 00000000000..a798f8dcce4
Binary files /dev/null and b/modules/scylladb/src/test/resources/keys/scylla.truststore differ
diff --git a/modules/scylladb/src/test/resources/logback-test.xml b/modules/scylladb/src/test/resources/logback-test.xml
new file mode 100644
index 00000000000..83ef7a1a3ef
--- /dev/null
+++ b/modules/scylladb/src/test/resources/logback-test.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+ %d{HH:mm:ss.SSS} %-5level %logger - %msg%n
+
+
+
+
+
+
+
+
+
diff --git a/modules/scylladb/src/test/resources/scylla-test-ssl/scylla.yaml b/modules/scylladb/src/test/resources/scylla-test-ssl/scylla.yaml
new file mode 100644
index 00000000000..7d79fabb70e
--- /dev/null
+++ b/modules/scylladb/src/test/resources/scylla-test-ssl/scylla.yaml
@@ -0,0 +1,662 @@
+# Scylla storage config YAML
+
+#######################################
+# This file is split to two sections:
+# 1. Supported parameters
+# 2. Unsupported parameters: reserved for future use or backwards
+# compatibility.
+# Scylla will only read and use the first segment
+#######################################
+
+### Supported Parameters
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+# It is recommended to change the default value when creating a new cluster.
+# You can NOT modify this value for an existing cluster
+#cluster_name: 'Test Cluster'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+num_tokens: 256
+
+# Directory where Scylla should store all its files, which are commitlog,
+# data, hints, view_hints and saved_caches subdirectories. All of these
+# subs can be overridden by the respective options below.
+# If unset, the value defaults to /var/lib/scylla
+# workdir: /var/lib/scylla
+
+# Directory where Scylla should store data on disk.
+# data_file_directories:
+# - /var/lib/scylla/data
+
+# commit log. when running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# commitlog_directory: /var/lib/scylla/commitlog
+
+# schema commit log. A special commitlog instance
+# used for schema and system tables.
+# When running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# schema_commitlog_directory: /var/lib/scylla/commitlog/schema
+
+# commitlog_sync may be either "periodic" or "batch."
+#
+# When in batch mode, Scylla won't ack writes until the commit log
+# has been fsynced to disk. It will wait
+# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
+# This window should be kept short because the writer threads will
+# be unable to do extra work while waiting. (You may need to increase
+# concurrent_writes for the same reason.)
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 2
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentially from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# The size of the individual schema commitlog file segments.
+#
+# The default size is 128, which is 4 times larger than the default
+# size of the data commitlog. It's because the segment size puts
+# a limit on the mutation size that can be written at once, and some
+# schema mutation writes are much larger than average.
+schema_commitlog_segment_size_in_mb: 128
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: ",,"
+ - seeds: "172.17.0.3,127.0.0.1,172.17.0.2,172.17.0.4,172.17.0.5"
+
+
+# Address to bind to and tell other Scylla nodes to connect to.
+# You _must_ change this if you want multiple nodes to be able to communicate!
+#
+# If you leave broadcast_address (below) empty, then setting listen_address
+# to 0.0.0.0 is wrong as other nodes will not know how to reach this node.
+# If you set broadcast_address, then you can set listen_address to 0.0.0.0.
+listen_address: localhost
+
+# Address to broadcast to other Scylla nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+
+# When using multiple physical network interfaces, set this to true to listen on broadcast_address
+# in addition to the listen_address, allowing nodes to communicate in both interfaces.
+# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2.
+#
+# listen_on_broadcast_address: false
+
+# port for the CQL native transport to listen for clients on
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# To disable the CQL native transport, remove this option and configure native_transport_port_ssl.
+native_transport_port: 9042
+
+# Like native_transport_port, but clients are forwarded to specific shards, based on the
+# client-side port numbers.
+native_shard_aware_transport_port: 19042
+
+# Enabling native transport encryption in client_encryption_options allows you to either use
+# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
+# standard native_transport_port.
+# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
+# for native_transport_port. Setting native_transport_port_ssl to a different value
+# from native_transport_port will use encryption for native_transport_port_ssl while
+# keeping native_transport_port unencrypted.
+#native_transport_port_ssl: 9142
+
+# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the
+# client-side port numbers.
+#native_shard_aware_transport_port_ssl: 19142
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 5000
+
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 2000
+# how long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+cas_contention_timeout_in_ms: 1000
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# IEndpointSnitch. The snitch has two functions:
+# - it teaches Scylla enough about your network topology to route
+# requests efficiently
+# - it allows Scylla to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Scylla will do its best not to have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Scylla provides
+# - SimpleSnitch:
+# Treats Strategy order as proximity. This can improve cache
+# locality when disabling read repair. Only appropriate for
+# single-datacenter deployments.
+# - GossipingPropertyFileSnitch
+# This should be your go-to snitch for production use. The rack
+# and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via
+# gossip. If cassandra-topology.properties exists, it is used as a
+# fallback, allowing migration from the PropertyFileSnitch.
+# - PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+# - Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+# - Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Scylla will switch to the private IP after
+# establishing a connection.)
+# - RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's IP
+# address, respectively. Unless this happens to match your
+# deployment conventions, this is best used as an example of
+# writing a custom Snitch class and is provided in that spirit.
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# The address or interface to bind the native transport server to.
+#
+# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+#
+# Leaving rpc_address blank has the same effect as on listen_address
+# (i.e. it will be based on the configured hostname of the node).
+#
+# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
+# set broadcast_rpc_address to a value other than 0.0.0.0.
+#
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+#
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+rpc_address: localhost
+# rpc_interface: eth1
+# rpc_interface_prefer_ipv6: false
+
+# port for REST API server
+api_port: 10000
+
+# IP for the REST API server
+api_address: 127.0.0.1
+
+# Log WARN on any batch size exceeding this value. 128 kiB per batch by default.
+# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
+batch_size_warn_threshold_in_kb: 128
+
+# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default.
+batch_size_fail_threshold_in_kb: 1024
+
+ # Authentication backend, identifying users
+ # Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+ # PasswordAuthenticator}.
+ #
+ # - AllowAllAuthenticator performs no checks - set it to disable authentication.
+ # - PasswordAuthenticator relies on username/password pairs to authenticate
+ # users. It keeps usernames and hashed passwords in system_auth.credentials table.
+ # Please increase system_auth keyspace replication factor if you use this authenticator.
+ # - com.scylladb.auth.TransitionalAuthenticator requires username/password pair
+ # to authenticate in the same manner as PasswordAuthenticator, but improper credentials
+ # result in being logged in as an anonymous user. Use for upgrading clusters' auth.
+ # authenticator: AllowAllAuthenticator
+
+ # Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+ # Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+ # CassandraAuthorizer}.
+ #
+ # - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+ # - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
+ # increase system_auth keyspace replication factor if you use this authorizer.
+ # - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for
+ # authorizing permission management. Otherwise, it allows all. Use for upgrading
+ # clusters' auth.
+ # authorizer: AllowAllAuthorizer
+
+ # initial_token allows you to specify tokens manually. While you can use # it with
+ # vnodes (num_tokens > 1, above) -- in which case you should provide a
+ # comma-separated list -- it's primarily used when adding nodes # to legacy clusters
+ # that do not have vnodes enabled.
+ # initial_token:
+
+ # RPC address to broadcast to drivers and other Scylla nodes. This cannot
+ # be set to 0.0.0.0. If left blank, this will be set to the value of
+ # rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
+ # be set.
+ # broadcast_rpc_address: 1.2.3.4
+
+ # Uncomment to enable experimental features
+ # experimental_features:
+ # - udf
+ # - alternator-streams
+ # - broadcast-tables
+ # - keyspace-storage-options
+
+ # The directory where hints files are stored if hinted handoff is enabled.
+ # hints_directory: /var/lib/scylla/hints
+
+# The directory where hints files are stored for materialized-view updates
+# view_hints_directory: /var/lib/scylla/view_hints
+
+# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff
+# May either be "true" or "false" to enable globally, or contain a list
+# of data centers to enable per-datacenter.
+# hinted_handoff_enabled: DC1,DC2
+# hinted_handoff_enabled: true
+
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+# max_hint_window_in_ms: 10800000 # 3 hours
+
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 10000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+# permissions_validity_in_ms: 10000
+
+# Refresh interval for permissions cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If permissions_validity_in_ms is non-zero, then this also must have
+# a non-zero value. Defaults to 2000. It's recommended to set this value to
+# be at least 3 times smaller than the permissions_validity_in_ms.
+# permissions_update_interval_in_ms: 2000
+
+# The partitioner is responsible for distributing groups of rows (by
+# partition key) across nodes in the cluster. You should leave this
+# alone for new clusters. The partitioner can NOT be changed without
+# reloading all data, so when upgrading you should set this to the
+# same partitioner you were already using.
+#
+# Murmur3Partitioner is currently the only supported partitioner,
+#
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# Total space to use for commitlogs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Scylla will flush every dirty CF in the oldest
+# segment and remove it. So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+#
+# A value of -1 (default) will automatically equate it to the total amount of memory
+# available for Scylla.
+commitlog_total_space_in_mb: -1
+
+# TCP port, for commands and data
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# storage_port: 7000
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# ssl_storage_port: 7001
+
+# listen_interface: eth0
+# listen_interface_prefer_ipv6: false
+
+# Whether to start the native transport server.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+# start_native_transport: true
+
+# The maximum size of allowed frame. Frame (requests) larger than this will
+# be rejected as invalid. The default is 256MB.
+# native_transport_max_frame_size_in_mb: 256
+
+# enable or disable keepalive on rpc/native connections
+# rpc_keepalive: true
+
+# Set to true to have Scylla create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# keyspace data. Removing these links is the operator's
+# responsibility.
+# incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Scylla won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+# snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+# auto_snapshot: true
+
+# When executing a scan, within or across a partition, we need to keep the
+# tombstones seen in memory so we can return them to the coordinator, which
+# will use them to make sure other replicas also know about the deleted rows.
+# With workloads that generate a lot of tombstones, this can cause performance
+# problems and even exhaust the server heap.
+# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
+# Adjust the thresholds here if you understand the dangers and want to
+# scan more tombstones anyway. These thresholds may also be adjusted at runtime
+# using the StorageService mbean.
+# tombstone_warn_threshold: 1000
+# tombstone_failure_threshold: 100000
+
+# Granularity of the collation index of rows within a partition.
+# Increase if your rows are large, or if you have a very large
+# number of rows per partition. The competing goals are these:
+# 1) a smaller granularity means more index entries are generated
+# and looking up rows within the partition by collation column
+# is faster
+# 2) but, Scylla will keep the collation index in memory for hot
+# rows (as part of the key cache), so a larger granularity means
+# you can cache more hot rows
+# column_index_size_in_kb: 64
+
+# Auto-scaling of the promoted index prevents running out of memory
+# when the promoted index grows too large (due to partitions with many rows
+# vs. too small column_index_size_in_kb). When the serialized representation
+# of the promoted index grows by this threshold, the desired block size
+# for this partition (initialized to column_index_size_in_kb)
+# is doubled, to decrease the sampling resolution by half.
+#
+# To disable promoted index auto-scaling, set the threshold to 0.
+# column_index_auto_scale_threshold_in_kb: 10240
+
+# Log a warning when writing partitions larger than this value
+# compaction_large_partition_warning_threshold_mb: 1000
+
+# Log a warning when writing rows larger than this value
+# compaction_large_row_warning_threshold_mb: 10
+
+# Log a warning when writing cells larger than this value
+# compaction_large_cell_warning_threshold_mb: 1
+
+# Log a warning when row number is larger than this value
+# compaction_rows_count_warning_threshold: 100000
+
+# Log a warning when writing a collection containing more elements than this value
+# compaction_collection_elements_count_warning_threshold: 10000
+
+# How long the coordinator should wait for seq or index scans to complete
+# range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+# counter_write_request_timeout_in_ms: 5000
+# How long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+# cas_contention_timeout_in_ms: 1000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+# truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+# request_timeout_in_ms: 10000
+
+# Enable or disable inter-node encryption.
+# You must also generate keys and provide the appropriate key and trust store locations and passwords.
+#
+# The available internode options are : all, none, dc, rack
+# If set to dc scylla will encrypt the traffic between the DCs
+# If set to rack scylla will encrypt the traffic between the racks
+#
+# SSL/TLS algorithm and ciphers used can be controlled by
+# the priority_string parameter. Info on priority string
+# syntax and values is available at:
+# https://gnutls.org/manual/html_node/Priority-Strings.html
+#
+# The require_client_auth parameter allows you to
+# restrict access to service based on certificate
+# validation. Client must provide a certificate
+# accepted by the used trust store to connect.
+#
+# server_encryption_options:
+# internode_encryption: none
+# certificate: conf/scylla.crt
+# keyfile: conf/scylla.key
+# truststore:
+# certficate_revocation_list:
+# require_client_auth: False
+# priority_string:
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: true
+ certificate: /etc/scylla/scylla.cer.pem
+ keyfile: /etc/scylla/scylla.key.pem
+ truststore: /etc/scylla/scylla.truststore
+ truststore_password: scylla
+# certficate_revocation_list:
+# require_client_auth: False
+# priority_string:
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be: all - all traffic is compressed
+# dc - traffic between different datacenters is compressed
+# none - nothing is compressed.
+# internode_compression: none
+
+# Enables inter-node traffic compression metrics (`scylla_rpc_compression_...`)
+# and enables a new implementation of inter-node traffic compressors,
+# capable of using zstd (in addition to the default lz4)
+# and shared dictionaries.
+# (Those features must still be enabled by other settings).
+# Has minor CPU cost.
+#
+# internode_compression_enable_advanced: false
+
+# Enables training of shared compression dictionaries on inter-node traffic.
+# New dictionaries are distributed throughout the cluster via Raft,
+# and used to improve the effectiveness of inter-node traffic compression
+# when `internode_compression_enable_advanced` is enabled.
+#
+# WARNING: this may leak unencrypted data to disk. The trained dictionaries
+# contain randomly-selected pieces of data written to the cluster.
+# When the Raft log is unencrypted, those pieces of data will be
+# written to disk unencrypted. At the moment of writing, there is no
+# way to encrypt the Raft log.
+# This problem is tracked by https://github.com/scylladb/scylla-enterprise/issues/4717.
+#
+# Can be: never - Dictionaries aren't trained by this node.
+# when_leader - New dictionaries are trained by this node only if
+# it's the current Raft leader.
+# always - Dictionaries are trained by this node unconditionally.
+#
+# For efficiency reasons, training shouldn't be enabled on more than one node.
+# To enable it on a single node, one can let the cluster pick the trainer
+# by setting `when_leader` on all nodes, or specify one manually by setting `always`
+# on one node and `never` on others.
+#
+# rpc_dict_training_when: never
+
+# A number in range [0.0, 1.0] specifying the share of CPU which can be spent
+# by this node on compressing inter-node traffic with zstd.
+#
+# Depending on the workload, enabling zstd might have a drastic negative
+# effect on performance, so it shouldn't be done lightly.
+#
+# internode_compression_zstd_max_cpu_fraction: 0.0
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+# inter_dc_tcp_nodelay: false
+
+# Relaxation of environment checks.
+#
+# Scylla places certain requirements on its environment. If these requirements are
+# not met, performance and reliability can be degraded.
+#
+# These requirements include:
+# - A filesystem with good support for asynchronous I/O (AIO). Currently,
+# this means XFS.
+#
+# false: strict environment checks are in place; do not start if they are not met.
+# true: relaxed environment checks; performance and reliability may degraade.
+#
+# developer_mode: false
+
+
+# Idle-time background processing
+#
+# Scylla can perform certain jobs in the background while the system is otherwise idle,
+# freeing processor resources when there is other work to be done.
+#
+# defragment_memory_on_idle: true
+#
+# prometheus port
+# By default, Scylla opens prometheus API port on port 9180
+# setting the port to 0 will disable the prometheus API.
+# prometheus_port: 9180
+#
+# prometheus address
+# Leaving this blank will set it to the same value as listen_address.
+# This means that by default, Scylla listens to the prometheus API on the same
+# listening address (and therefore network interface) used to listen for
+# internal communication. If the monitoring node is not in this internal
+# network, you can override prometheus_address explicitly - e.g., setting
+# it to 0.0.0.0 to listen on all interfaces.
+# prometheus_address: 1.2.3.4
+
+# Distribution of data among cores (shards) within a node
+#
+# Scylla distributes data within a node among shards, using a round-robin
+# strategy:
+# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ...
+#
+# Scylla versions 1.6 and below used just one repetition of the pattern;
+# this interfered with data placement among nodes (vnodes).
+#
+# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this
+# provides for better data distribution.
+#
+# the value below is log (base 2) of the number of repetitions.
+#
+# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and
+# below.
+#
+# Keep at 12 for new clusters.
+murmur3_partitioner_ignore_msb_bits: 12
+
+# Use on a new, parallel algorithm for performing aggregate queries.
+# Set to `false` to fall-back to the old algorithm.
+# enable_parallelized_aggregation: true
+
+# Time for which task manager task started internally is kept in memory after it completes.
+# task_ttl_in_seconds: 0
+
+# Time for which task manager task started by user is kept in memory after it completes.
+# user_task_ttl_in_seconds: 3600
+
+# In materialized views, restrictions are allowed only on the view's primary key columns.
+# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part
+# of the view's primary key. These invalid restrictions were ignored.
+# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions.
+#
+# Can be true, false, or warn.
+# * `true`: IS NOT NULL is allowed only on the view's primary key columns,
+# trying to use it on other columns will cause an error, as it should.
+# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored.
+# It's useful for backwards compatibility.
+# * `warn`: The same as false, but there's a warning about invalid view restrictions.
+#
+# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`.
+# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`)
+# to make sure that trying to create an invalid view causes an error.
+strict_is_not_null_in_views: true
+
+# The Unix Domain Socket the node uses for maintenance socket.
+# The possible options are:
+# * ignore: the node will not open the maintenance socket,
+# * workdir: the node will open the maintenance socket on the path /cql.m,
+# where is a path defined by the workdir configuration option,
+# * : the node will open the maintenance socket on the path .
+maintenance_socket: ignore
+
+# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL
+# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime
+# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating
+# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false
+# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers.
+# live_updatable_config_params_changeable_via_cql: true
+
+# ****************
+# * GUARDRAILS *
+# ****************
+
+# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold.
+# Please note that the value of 0 is always allowed,
+# which means that having no replication at all, i.e. RF = 0, is always valid.
+# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled.
+# Commenting out a guardrail also means it is disabled.
+# minimum_replication_factor_fail_threshold: -1
+# minimum_replication_factor_warn_threshold: 3
+# maximum_replication_factor_warn_threshold: -1
+# maximum_replication_factor_fail_threshold: -1
+
+# Guardrails to warn about or disallow creating a keyspace with specific replication strategy.
+# Each of these 2 settings is a list storing replication strategies considered harmful.
+# The replication strategies to choose from are:
+# 1) SimpleStrategy,
+# 2) NetworkTopologyStrategy,
+# 3) LocalStrategy,
+# 4) EverywhereStrategy
+#
+# replication_strategy_warn_list:
+# - SimpleStrategy
+# replication_strategy_fail_list:
+
+# Enable tablets for new keyspaces.
+# When enabled, newly created keyspaces will have tablets enabled by default.
+# That can be explicitly disabled in the CREATE KEYSPACE query
+# by using the `tablets = {'enabled': false}` replication option.
+#
+# Correspondingly, when disabled, newly created keyspaces will use vnodes
+# unless tablets are explicitly enabled in the CREATE KEYSPACE query
+# by using the `tablets = {'enabled': true}` replication option.
+#
+# Note that creating keyspaces with tablets enabled or disabled is irreversible.
+# The `tablets` option cannot be changed using `ALTER KEYSPACE`.
+enable_tablets: true
diff --git a/modules/selenium/build.gradle b/modules/selenium/build.gradle
index da8ad97f933..10dbcd6752d 100644
--- a/modules/selenium/build.gradle
+++ b/modules/selenium/build.gradle
@@ -13,7 +13,7 @@ dependencies {
testImplementation 'org.mortbay.jetty:jetty:6.1.26'
testImplementation project(':nginx')
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
- compileOnly 'org.jetbrains:annotations:24.1.0'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
}
diff --git a/modules/solace/build.gradle b/modules/solace/build.gradle
index fb83eede28c..aae6c28eefd 100644
--- a/modules/solace/build.gradle
+++ b/modules/solace/build.gradle
@@ -3,10 +3,10 @@ description = "Testcontainers :: Solace"
dependencies {
api project(':testcontainers')
- shaded 'org.awaitility:awaitility:4.2.0'
+ shaded 'org.awaitility:awaitility:4.3.0'
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'com.solacesystems:sol-jcsmp:10.24.1'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'com.solacesystems:sol-jcsmp:10.27.2'
testImplementation 'org.apache.qpid:qpid-jms-client:0.61.0'
testImplementation 'org.eclipse.paho:org.eclipse.paho.client.mqttv3:1.2.5'
testImplementation 'org.apache.httpcomponents:fluent-hc:4.5.14'
diff --git a/modules/solace/src/main/java/org/testcontainers/solace/Service.java b/modules/solace/src/main/java/org/testcontainers/solace/Service.java
index 6ec7de44d08..9c9342ef789 100644
--- a/modules/solace/src/main/java/org/testcontainers/solace/Service.java
+++ b/modules/solace/src/main/java/org/testcontainers/solace/Service.java
@@ -4,10 +4,25 @@
* Services that are supported by Testcontainers implementation
*/
public enum Service {
+ /**
+ * Advanced Message Queuing Protocol
+ */
AMQP("amqp", 5672, "amqp", false),
+ /**
+ * Message Queuing Telemetry Transport
+ */
MQTT("mqtt", 1883, "tcp", false),
+ /**
+ * Representational State Transfer
+ */
REST("rest", 9000, "http", false),
+ /**
+ * Solace Message Format
+ */
SMF("smf", 55555, "tcp", true),
+ /**
+ * Solace Message Format with SSL
+ */
SMF_SSL("smf", 55443, "tcps", true);
private final String name;
diff --git a/modules/solace/src/main/java/org/testcontainers/solace/SolaceContainer.java b/modules/solace/src/main/java/org/testcontainers/solace/SolaceContainer.java
index 64b4365081f..00db0606536 100644
--- a/modules/solace/src/main/java/org/testcontainers/solace/SolaceContainer.java
+++ b/modules/solace/src/main/java/org/testcontainers/solace/SolaceContainer.java
@@ -65,11 +65,19 @@ public SolaceContainer(String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
+ /**
+ * Create a new solace container with the specified docker image.
+ *
+ * @param dockerImageName the image name that should be used.
+ */
public SolaceContainer(DockerImageName dockerImageName) {
super(dockerImageName);
dockerImageName.assertCompatibleWith(DEFAULT_IMAGE_NAME);
withCreateContainerCmdModifier(cmd -> {
- cmd.getHostConfig().withShmSize(SHM_SIZE).withUlimits(new Ulimit[] { new Ulimit("nofile", 2448L, 6592L) });
+ cmd
+ .getHostConfig()
+ .withShmSize(SHM_SIZE)
+ .withUlimits(new Ulimit[] { new Ulimit("nofile", 2448L, 1048576L) });
});
this.waitStrategy = Wait.forLogMessage(SOLACE_READY_MESSAGE, 1).withStartupTimeout(Duration.ofSeconds(60));
withExposedPorts(8080);
@@ -103,6 +111,17 @@ private Transferable createConfigurationScript() {
updateConfigScript(scriptBuilder, "create message-vpn " + vpn);
updateConfigScript(scriptBuilder, "no shutdown");
updateConfigScript(scriptBuilder, "exit");
+ updateConfigScript(scriptBuilder, "client-profile default message-vpn " + vpn);
+ updateConfigScript(scriptBuilder, "message-spool");
+ updateConfigScript(scriptBuilder, "allow-guaranteed-message-send");
+ updateConfigScript(scriptBuilder, "allow-guaranteed-message-receive");
+ updateConfigScript(scriptBuilder, "allow-guaranteed-endpoint-create");
+ updateConfigScript(scriptBuilder, "allow-guaranteed-endpoint-create-durability all");
+ updateConfigScript(scriptBuilder, "exit");
+ updateConfigScript(scriptBuilder, "exit");
+ updateConfigScript(scriptBuilder, "message-spool message-vpn " + vpn);
+ updateConfigScript(scriptBuilder, "max-spool-usage 60000");
+ updateConfigScript(scriptBuilder, "exit");
}
// Configure username and password
@@ -260,7 +279,7 @@ public SolaceContainer withVpn(String vpn) {
* Sets the solace server ceritificates
*
* @param certFile Server certificate
- * @param caFile Certified Authority ceritificate
+ * @param caFile Certified Authority certificate
* @return This container.
*/
public SolaceContainer withClientCert(final MountableFile certFile, final MountableFile caFile) {
diff --git a/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerAMQPTest.java b/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerAMQPTest.java
index 6c68d27d358..f7266fe8212 100644
--- a/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerAMQPTest.java
+++ b/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerAMQPTest.java
@@ -31,7 +31,7 @@ public class SolaceContainerAMQPTest {
@Test
public void testSolaceContainer() throws JMSException {
try (
- SolaceContainer solaceContainer = new SolaceContainer("solace/solace-pubsub-standard:10.2")
+ SolaceContainer solaceContainer = new SolaceContainer("solace/solace-pubsub-standard:10.25.0")
.withTopic(TOPIC_NAME, Service.AMQP)
.withVpn("amqp-vpn")
) {
diff --git a/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerMQTTTest.java b/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerMQTTTest.java
index ab7d5ed56bb..07c08d32416 100644
--- a/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerMQTTTest.java
+++ b/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerMQTTTest.java
@@ -27,7 +27,7 @@ public class SolaceContainerMQTTTest {
@Test
public void testSolaceContainer() {
try (
- SolaceContainer solaceContainer = new SolaceContainer("solace/solace-pubsub-standard:10.2")
+ SolaceContainer solaceContainer = new SolaceContainer("solace/solace-pubsub-standard:10.25.0")
.withTopic(TOPIC_NAME, Service.MQTT)
.withVpn("mqtt-vpn")
) {
diff --git a/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerRESTTest.java b/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerRESTTest.java
index f2722b986ca..7f54cec3f74 100644
--- a/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerRESTTest.java
+++ b/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerRESTTest.java
@@ -28,7 +28,7 @@ public class SolaceContainerRESTTest {
@Test
public void testSolaceContainer() throws IOException {
try (
- SolaceContainer solaceContainer = new SolaceContainer("solace/solace-pubsub-standard:10.2")
+ SolaceContainer solaceContainer = new SolaceContainer("solace/solace-pubsub-standard:10.25.0")
.withTopic(TOPIC_NAME, Service.REST)
.withVpn("rest-vpn")
) {
diff --git a/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerSMFTest.java b/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerSMFTest.java
index d7ccd94998e..d7e0e8ea981 100644
--- a/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerSMFTest.java
+++ b/modules/solace/src/test/java/org/testcontainers/solace/SolaceContainerSMFTest.java
@@ -1,11 +1,14 @@
package org.testcontainers.solace;
import com.solacesystems.jcsmp.BytesXMLMessage;
+import com.solacesystems.jcsmp.ConsumerFlowProperties;
+import com.solacesystems.jcsmp.EndpointProperties;
import com.solacesystems.jcsmp.JCSMPException;
import com.solacesystems.jcsmp.JCSMPFactory;
import com.solacesystems.jcsmp.JCSMPProperties;
import com.solacesystems.jcsmp.JCSMPSession;
import com.solacesystems.jcsmp.JCSMPStreamingPublishCorrelatingEventHandler;
+import com.solacesystems.jcsmp.Queue;
import com.solacesystems.jcsmp.TextMessage;
import com.solacesystems.jcsmp.Topic;
import com.solacesystems.jcsmp.XMLMessageConsumer;
@@ -30,40 +33,75 @@ public class SolaceContainerSMFTest {
private static final Topic TOPIC = JCSMPFactory.onlyInstance().createTopic("Topic/ActualTopic");
+ private static final Queue QUEUE = JCSMPFactory.onlyInstance().createQueue("Queue");
+
@Test
public void testSolaceContainerWithSimpleAuthentication() {
try (
// solaceContainerSetup {
- SolaceContainer solaceContainer = new SolaceContainer("solace/solace-pubsub-standard:10.2")
+ SolaceContainer solaceContainer = new SolaceContainer("solace/solace-pubsub-standard:10.25.0")
.withCredentials("user", "pass")
- .withTopic("Topic/ActualTopic", Service.SMF)
+ .withTopic(TOPIC.getName(), Service.SMF)
.withVpn("test_vpn")
// }
) {
solaceContainer.start();
JCSMPSession session = createSessionWithBasicAuth(solaceContainer);
assertThat(session).isNotNull();
- assertThat(consumeMessageFromSolace(session)).isEqualTo(MESSAGE);
+ consumeMessageFromTopics(session);
session.closeSession();
}
}
+ @Test
+ public void testSolaceContainerWithCreateFlow() {
+ try (
+ SolaceContainer solaceContainer = new SolaceContainer("solace/solace-pubsub-standard:10.25.0")
+ .withCredentials("user", "pass")
+ .withTopic(TOPIC.getName(), Service.SMF)
+ .withVpn("test_vpn")
+ ) {
+ solaceContainer.start();
+ JCSMPSession session = createSessionWithBasicAuth(solaceContainer);
+ assertThat(session).isNotNull();
+ testCreateFlow(session);
+ session.closeSession();
+ }
+ }
+
+ private static void testCreateFlow(JCSMPSession session) {
+ try {
+ EndpointProperties endpointProperties = new EndpointProperties();
+ endpointProperties.setAccessType(EndpointProperties.ACCESSTYPE_NONEXCLUSIVE);
+ endpointProperties.setQuota(1000);
+ session.provision(QUEUE, endpointProperties, JCSMPSession.FLAG_IGNORE_ALREADY_EXISTS);
+ session.addSubscription(QUEUE, TOPIC, JCSMPSession.WAIT_FOR_CONFIRM);
+ ConsumerFlowProperties flowProperties = new ConsumerFlowProperties().setEndpoint(QUEUE);
+ TestConsumer listener = new TestConsumer();
+ session.createFlow(listener, flowProperties).start();
+ publishMessageToSolaceTopic(session);
+ listener.waitForMessage();
+ } catch (Exception e) {
+ throw new RuntimeException("Cannot process message using solace topic/queue: " + e.getMessage(), e);
+ }
+ }
+
@Test
public void testSolaceContainerWithCertificates() {
try (
// solaceContainerUsageSSL {
- SolaceContainer solaceContainer = new SolaceContainer("solace/solace-pubsub-standard:10.6")
+ SolaceContainer solaceContainer = new SolaceContainer("solace/solace-pubsub-standard:10.25.0")
.withClientCert(
MountableFile.forClasspathResource("solace.pem"),
MountableFile.forClasspathResource("rootCA.crt")
)
- .withTopic("Topic/ActualTopic", Service.SMF_SSL)
+ .withTopic(TOPIC.getName(), Service.SMF_SSL)
// }
) {
solaceContainer.start();
JCSMPSession session = createSessionWithCertificates(solaceContainer);
assertThat(session).isNotNull();
- assertThat(consumeMessageFromSolace(session)).isEqualTo(MESSAGE);
+ consumeMessageFromTopics(session);
session.closeSession();
}
}
@@ -112,7 +150,7 @@ private static JCSMPSession createSession(JCSMPProperties properties) {
}
}
- private void publishMessageToSolace(JCSMPSession session) throws JCSMPException {
+ private static void publishMessageToSolaceTopic(JCSMPSession session) throws JCSMPException {
XMLMessageProducer producer = session.getMessageProducer(
new JCSMPStreamingPublishCorrelatingEventHandler() {
@Override
@@ -131,37 +169,49 @@ public void handleErrorEx(Object o, JCSMPException e, long l) {
producer.send(msg, TOPIC);
}
- private String consumeMessageFromSolace(JCSMPSession session) {
- CountDownLatch latch = new CountDownLatch(1);
+ private static void consumeMessageFromTopics(JCSMPSession session) {
try {
- String[] result = new String[1];
- XMLMessageConsumer cons = session.getMessageConsumer(
- new XMLMessageListener() {
- @Override
- public void onReceive(BytesXMLMessage msg) {
- if (msg instanceof TextMessage) {
- TextMessage textMessage = (TextMessage) msg;
- String message = textMessage.getText();
- result[0] = message;
- LOGGER.info("TextMessage received: " + message);
- }
- latch.countDown();
- }
-
- @Override
- public void onException(JCSMPException e) {
- LOGGER.error("Exception received: " + e.getMessage());
- latch.countDown();
- }
- }
- );
+ TestConsumer listener = new TestConsumer();
+ XMLMessageConsumer cons = session.getMessageConsumer(listener);
session.addSubscription(TOPIC);
cons.start();
- publishMessageToSolace(session);
- assertThat(latch.await(10L, TimeUnit.SECONDS)).isTrue();
- return result[0];
+ publishMessageToSolaceTopic(session);
+ listener.waitForMessage();
} catch (Exception e) {
- throw new RuntimeException("Cannot receive message from solace", e);
+ throw new RuntimeException("Cannot process message using solace: " + e.getMessage(), e);
+ }
+ }
+
+ static class TestConsumer implements XMLMessageListener {
+
+ private final CountDownLatch latch = new CountDownLatch(1);
+
+ private String result;
+
+ @Override
+ public void onReceive(BytesXMLMessage msg) {
+ if (msg instanceof TextMessage) {
+ TextMessage textMessage = (TextMessage) msg;
+ String message = textMessage.getText();
+ result = message;
+ LOGGER.info("Message received: " + message);
+ }
+ latch.countDown();
+ }
+
+ @Override
+ public void onException(JCSMPException e) {
+ LOGGER.error("Exception received: " + e.getMessage());
+ latch.countDown();
+ }
+
+ private void waitForMessage() {
+ try {
+ assertThat(latch.await(10L, TimeUnit.SECONDS)).isTrue();
+ assertThat(result).isEqualTo(MESSAGE);
+ } catch (Exception e) {
+ throw new RuntimeException("Cannot receive message from solace: " + e.getMessage(), e);
+ }
}
}
}
diff --git a/modules/solr/build.gradle b/modules/solr/build.gradle
index d8ff7aeba39..f4ac079772d 100644
--- a/modules/solr/build.gradle
+++ b/modules/solr/build.gradle
@@ -5,6 +5,12 @@ dependencies {
// TODO use JDK's HTTP client and/or Apache HttpClient5
shaded 'com.squareup.okhttp3:okhttp:4.12.0'
- testImplementation 'org.apache.solr:solr-solrj:8.11.3'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.apache.solr:solr-solrj:8.11.4'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.SolrContainer"
+ ]
}
diff --git a/modules/solr/src/main/java/org/testcontainers/containers/SolrContainer.java b/modules/solr/src/main/java/org/testcontainers/containers/SolrContainer.java
index 18aca269235..16c3a0d62d4 100644
--- a/modules/solr/src/main/java/org/testcontainers/containers/SolrContainer.java
+++ b/modules/solr/src/main/java/org/testcontainers/containers/SolrContainer.java
@@ -4,6 +4,7 @@
import lombok.SneakyThrows;
import org.apache.commons.lang3.StringUtils;
import org.testcontainers.containers.wait.strategy.LogMessageWaitStrategy;
+import org.testcontainers.utility.ComparableVersion;
import org.testcontainers.utility.DockerImageName;
import java.net.URL;
@@ -27,29 +28,14 @@ public class SolrContainer extends GenericContainer {
private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("solr");
- @Deprecated
- public static final String IMAGE = DEFAULT_IMAGE_NAME.getUnversionedPart();
-
- @Deprecated
- public static final String DEFAULT_TAG = "8.3.0";
-
public static final Integer ZOOKEEPER_PORT = 9983;
public static final Integer SOLR_PORT = 8983;
private SolrContainerConfiguration configuration;
- /**
- * @deprecated use {@link #SolrContainer(DockerImageName)} instead
- */
- @Deprecated
- public SolrContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
+ private final ComparableVersion imageVersion;
- /**
- * @deprecated use {@link #SolrContainer(DockerImageName)} instead
- */
public SolrContainer(final String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
@@ -63,6 +49,7 @@ public SolrContainer(final DockerImageName dockerImageName) {
.withRegEx(".*o\\.e\\.j\\.s\\.Server Started.*")
.withStartupTimeout(Duration.of(60, ChronoUnit.SECONDS));
this.configuration = new SolrContainerConfiguration();
+ this.imageVersion = new ComparableVersion(dockerImageName.getVersionPart());
}
public SolrContainer withZookeeper(boolean zookeeper) {
@@ -107,14 +94,18 @@ protected void configure() {
throw new IllegalStateException("Solr needs to have a configuration if you want to use a schema");
}
// Generate Command Builder
- String command = "solr -f";
+ String command = "solr start -f";
// Add Default Ports
this.addExposedPort(SOLR_PORT);
// Configure Zookeeper
if (configuration.isZookeeper()) {
this.addExposedPort(ZOOKEEPER_PORT);
- command = "-DzkRun -h localhost";
+ if (this.imageVersion.isGreaterThanOrEqualTo("9.7.0")) {
+ command = "-DzkRun --host localhost";
+ } else {
+ command = "-DzkRun -h localhost";
+ }
}
// Apply generated Command
@@ -135,7 +126,7 @@ protected void waitUntilContainerStarted() {
@SneakyThrows
protected void containerIsStarted(InspectContainerResponse containerInfo) {
if (!configuration.isZookeeper()) {
- ExecResult result = execInContainer("solr", "create_core", "-c", configuration.getCollectionName());
+ ExecResult result = execInContainer("solr", "create", "-c", configuration.getCollectionName());
if (result.getExitCode() != 0) {
throw new IllegalStateException(
"Unable to create solr core:\nStdout: " + result.getStdout() + "\nStderr:" + result.getStderr()
diff --git a/modules/solr/src/test/java/org/testcontainers/containers/SolrContainerTest.java b/modules/solr/src/test/java/org/testcontainers/containers/SolrContainerTest.java
index 6bb4fabb1a4..f678155b3c1 100644
--- a/modules/solr/src/test/java/org/testcontainers/containers/SolrContainerTest.java
+++ b/modules/solr/src/test/java/org/testcontainers/containers/SolrContainerTest.java
@@ -6,15 +6,23 @@
import org.apache.solr.client.solrj.response.SolrPingResponse;
import org.junit.After;
import org.junit.Test;
-import org.testcontainers.utility.DockerImageName;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
import java.io.IOException;
import static org.assertj.core.api.Assertions.assertThat;
+@RunWith(Parameterized.class)
public class SolrContainerTest {
- private static final DockerImageName SOLR_IMAGE = DockerImageName.parse("solr:8.3.0");
+ @Parameterized.Parameters(name = "{0}")
+ public static String[] getVersionsToTest() {
+ return new String[] { "solr:8.11.4", "solr:9.8.0" };
+ }
+
+ @Parameterized.Parameter
+ public String solrImage;
private SolrClient client = null;
@@ -28,7 +36,7 @@ public void stopRestClient() throws IOException {
@Test
public void solrCloudTest() throws IOException, SolrServerException {
- try (SolrContainer container = new SolrContainer(SOLR_IMAGE)) {
+ try (SolrContainer container = new SolrContainer(solrImage)) {
container.start();
SolrPingResponse response = getClient(container).ping("dummy");
assertThat(response.getStatus()).isZero();
@@ -38,7 +46,7 @@ public void solrCloudTest() throws IOException, SolrServerException {
@Test
public void solrStandaloneTest() throws IOException, SolrServerException {
- try (SolrContainer container = new SolrContainer(SOLR_IMAGE).withZookeeper(false)) {
+ try (SolrContainer container = new SolrContainer(solrImage).withZookeeper(false)) {
container.start();
SolrPingResponse response = getClient(container).ping("dummy");
assertThat(response.getStatus()).isZero();
@@ -50,7 +58,7 @@ public void solrStandaloneTest() throws IOException, SolrServerException {
public void solrCloudPingTest() throws IOException, SolrServerException {
// solrContainerUsage {
// Create the solr container.
- SolrContainer container = new SolrContainer(SOLR_IMAGE);
+ SolrContainer container = new SolrContainer(solrImage);
// Start the container. This step might take some time...
container.start();
diff --git a/modules/spock/build.gradle b/modules/spock/build.gradle
index d6af49fecbe..7a5c6e0e0fc 100644
--- a/modules/spock/build.gradle
+++ b/modules/spock/build.gradle
@@ -6,21 +6,27 @@ description = "Testcontainers :: Spock-Extension"
dependencies {
api project(':testcontainers')
- api 'org.spockframework:spock-core:2.3-groovy-4.0'
+ implementation 'org.spockframework:spock-core:2.3-groovy-4.0'
testImplementation project(':selenium')
testImplementation project(':mysql')
testImplementation project(':postgresql')
- testImplementation 'com.zaxxer:HikariCP:4.0.3'
+ testImplementation 'com.zaxxer:HikariCP:6.3.0'
testImplementation 'org.apache.httpcomponents:httpclient:4.5.14'
- testRuntimeOnly 'org.postgresql:postgresql:42.7.4'
+ testRuntimeOnly 'org.postgresql:postgresql:42.7.7'
testRuntimeOnly 'mysql:mysql-connector-java:8.0.33'
- testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.10.3'
- testRuntimeOnly 'org.junit.platform:junit-platform-testkit:1.11.0'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.13.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-testkit:1.13.3'
- testCompileOnly 'org.jetbrains:annotations:24.1.0'
+ testCompileOnly 'org.jetbrains:annotations:26.0.2'
+}
+
+tasks.withType(GroovyCompile) {
+ sourceCompatibility = '1.8'
+ targetCompatibility = '1.8'
+ options.encoding = 'UTF-8'
}
sourceJar {
diff --git a/modules/spock/src/main/groovy/org/testcontainers/spock/DockerAvailableDetector.groovy b/modules/spock/src/main/groovy/org/testcontainers/spock/DockerAvailableDetector.groovy
new file mode 100644
index 00000000000..b64299ffa87
--- /dev/null
+++ b/modules/spock/src/main/groovy/org/testcontainers/spock/DockerAvailableDetector.groovy
@@ -0,0 +1,15 @@
+package org.testcontainers.spock
+
+import org.testcontainers.DockerClientFactory
+
+class DockerAvailableDetector {
+
+ boolean isDockerAvailable() {
+ try {
+ DockerClientFactory.instance().client();
+ return true;
+ } catch (Throwable ex) {
+ return false;
+ }
+ }
+}
diff --git a/modules/spock/src/main/groovy/org/testcontainers/spock/Testcontainers.groovy b/modules/spock/src/main/groovy/org/testcontainers/spock/Testcontainers.groovy
index 632b129ec7a..98c2223904b 100644
--- a/modules/spock/src/main/groovy/org/testcontainers/spock/Testcontainers.groovy
+++ b/modules/spock/src/main/groovy/org/testcontainers/spock/Testcontainers.groovy
@@ -54,4 +54,11 @@ import java.lang.annotation.Target
@Target([ElementType.TYPE, ElementType.METHOD])
@ExtensionAnnotation(TestcontainersExtension)
@interface Testcontainers {
+
+ /**
+ * Whether tests should be disabled (rather than failing) when Docker is not available. Defaults to
+ * {@code false}.
+ * @return if the tests should be disabled when Docker is not available
+ */
+ boolean disabledWithoutDocker() default false;
}
diff --git a/modules/spock/src/main/groovy/org/testcontainers/spock/TestcontainersExtension.groovy b/modules/spock/src/main/groovy/org/testcontainers/spock/TestcontainersExtension.groovy
index 40392210f3a..2654408e901 100644
--- a/modules/spock/src/main/groovy/org/testcontainers/spock/TestcontainersExtension.groovy
+++ b/modules/spock/src/main/groovy/org/testcontainers/spock/TestcontainersExtension.groovy
@@ -7,8 +7,23 @@ import org.spockframework.runtime.model.SpecInfo
class TestcontainersExtension extends AbstractAnnotationDrivenExtension {
+ private final DockerAvailableDetector dockerDetector
+
+ TestcontainersExtension() {
+ this(new DockerAvailableDetector())
+ }
+
+ TestcontainersExtension(DockerAvailableDetector dockerDetector) {
+ this.dockerDetector = dockerDetector
+ }
+
@Override
void visitSpecAnnotation(Testcontainers annotation, SpecInfo spec) {
+ if (annotation.disabledWithoutDocker()) {
+ if (!dockerDetector.isDockerAvailable()) {
+ spec.skip("disabledWithoutDocker is true and Docker is not available")
+ }
+ }
def listener = new ErrorListener()
def interceptor = new TestcontainersMethodInterceptor(spec, listener)
spec.addSetupSpecInterceptor(interceptor)
diff --git a/modules/spock/src/test/groovy/org/testcontainers/spock/TestcontainersExtensionTest.groovy b/modules/spock/src/test/groovy/org/testcontainers/spock/TestcontainersExtensionTest.groovy
new file mode 100644
index 00000000000..d8cbdf2e497
--- /dev/null
+++ b/modules/spock/src/test/groovy/org/testcontainers/spock/TestcontainersExtensionTest.groovy
@@ -0,0 +1,39 @@
+package org.testcontainers.spock
+
+import org.spockframework.runtime.model.SpecInfo
+import spock.lang.Specification
+import spock.lang.Unroll
+
+class TestcontainersExtensionTest extends Specification {
+
+ @Unroll
+ def "should handle disabledWithoutDocker=#disabledWithoutDocker and dockerAvailable=#dockerAvailable correctly"() {
+ given:
+ def dockerDetector = Mock(DockerAvailableDetector)
+ dockerDetector.isDockerAvailable() >> dockerAvailable
+ def extension = new TestcontainersExtension(dockerDetector)
+ def specInfo = Mock(SpecInfo)
+ def annotation = disabledWithoutDocker ?
+ TestDisabledWithoutDocker.getAnnotation(Testcontainers) :
+ TestEnabledWithoutDocker.getAnnotation(Testcontainers)
+
+ when:
+ extension.visitSpecAnnotation(annotation, specInfo)
+
+ then:
+ skipCalls * specInfo.skip("disabledWithoutDocker is true and Docker is not available")
+
+ where:
+ disabledWithoutDocker | dockerAvailable | skipCalls
+ true | true | 0
+ true | false | 1
+ false | true | 0
+ false | false | 0
+ }
+
+ @Testcontainers(disabledWithoutDocker = true)
+ static class TestDisabledWithoutDocker {}
+
+ @Testcontainers
+ static class TestEnabledWithoutDocker {}
+}
diff --git a/modules/tidb/build.gradle b/modules/tidb/build.gradle
index 43ef0603e3b..37a528a41ff 100644
--- a/modules/tidb/build.gradle
+++ b/modules/tidb/build.gradle
@@ -6,5 +6,5 @@ dependencies {
testImplementation project(':jdbc-test')
testRuntimeOnly 'mysql:mysql-connector-java:8.0.33'
- compileOnly 'org.jetbrains:annotations:24.1.0'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
}
diff --git a/modules/timeplus/build.gradle b/modules/timeplus/build.gradle
index 007b46e23e4..72da2ed182f 100644
--- a/modules/timeplus/build.gradle
+++ b/modules/timeplus/build.gradle
@@ -5,6 +5,6 @@ dependencies {
api project(':jdbc')
testImplementation project(':jdbc-test')
- testRuntimeOnly 'com.timeplus:timeplus-native-jdbc:2.0.5'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testRuntimeOnly 'com.timeplus:timeplus-native-jdbc:2.0.10'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
diff --git a/modules/toxiproxy/build.gradle b/modules/toxiproxy/build.gradle
index 211d5e38ae3..fecefb33421 100644
--- a/modules/toxiproxy/build.gradle
+++ b/modules/toxiproxy/build.gradle
@@ -4,6 +4,12 @@ dependencies {
api project(':testcontainers')
api 'eu.rekawek.toxiproxy:toxiproxy-java:2.1.7'
- testImplementation 'redis.clients:jedis:5.1.5'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'redis.clients:jedis:6.0.0'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+}
+
+tasks.japicmp {
+ classExcludes = [
+ "org.testcontainers.containers.ToxiproxyContainer"
+ ]
}
diff --git a/modules/toxiproxy/src/main/java/org/testcontainers/containers/ToxiproxyContainer.java b/modules/toxiproxy/src/main/java/org/testcontainers/containers/ToxiproxyContainer.java
index a2a85a95d88..c28c4e93ddf 100644
--- a/modules/toxiproxy/src/main/java/org/testcontainers/containers/ToxiproxyContainer.java
+++ b/modules/toxiproxy/src/main/java/org/testcontainers/containers/ToxiproxyContainer.java
@@ -31,8 +31,6 @@ public class ToxiproxyContainer extends GenericContainer {
private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("shopify/toxiproxy");
- private static final String DEFAULT_TAG = "2.1.0";
-
private static final DockerImageName GHCR_IMAGE_NAME = DockerImageName.parse("ghcr.io/shopify/toxiproxy");
private static final int TOXIPROXY_CONTROL_PORT = 8474;
@@ -47,14 +45,6 @@ public class ToxiproxyContainer extends GenericContainer {
private final AtomicInteger nextPort = new AtomicInteger(FIRST_PROXIED_PORT);
- /**
- * @deprecated use {@link #ToxiproxyContainer(DockerImageName)} instead
- */
- @Deprecated
- public ToxiproxyContainer() {
- this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_TAG));
- }
-
public ToxiproxyContainer(String dockerImageName) {
this(DockerImageName.parse(dockerImageName));
}
diff --git a/modules/trino/build.gradle b/modules/trino/build.gradle
index ce37aa68718..f8d3a4f1307 100644
--- a/modules/trino/build.gradle
+++ b/modules/trino/build.gradle
@@ -4,6 +4,6 @@ dependencies {
api project(':jdbc')
testImplementation project(':jdbc-test')
- testRuntimeOnly 'io.trino:trino-jdbc:458'
- compileOnly 'org.jetbrains:annotations:24.1.0'
+ testRuntimeOnly 'io.trino:trino-jdbc:476'
+ compileOnly 'org.jetbrains:annotations:26.0.2'
}
diff --git a/modules/typesense/build.gradle b/modules/typesense/build.gradle
index 1a639ba2677..b82022ed9ab 100644
--- a/modules/typesense/build.gradle
+++ b/modules/typesense/build.gradle
@@ -4,5 +4,5 @@ dependencies {
api project(':testcontainers')
testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'org.typesense:typesense-java:0.9.0'
+ testImplementation 'org.typesense:typesense-java:1.3.0'
}
diff --git a/modules/vault/build.gradle b/modules/vault/build.gradle
index 263dff37fbf..af9f27c505f 100644
--- a/modules/vault/build.gradle
+++ b/modules/vault/build.gradle
@@ -4,7 +4,7 @@ dependencies {
api project(':testcontainers')
testImplementation 'com.bettercloud:vault-java-driver:5.1.0'
- testImplementation 'io.rest-assured:rest-assured:5.5.0'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'io.rest-assured:rest-assured:5.5.5'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}
diff --git a/modules/vault/src/main/java/org/testcontainers/vault/VaultContainer.java b/modules/vault/src/main/java/org/testcontainers/vault/VaultContainer.java
index 2e6396f7b93..f29595b7ebb 100644
--- a/modules/vault/src/main/java/org/testcontainers/vault/VaultContainer.java
+++ b/modules/vault/src/main/java/org/testcontainers/vault/VaultContainer.java
@@ -171,7 +171,7 @@ public SELF withLogLevel(VaultLogLevel level) {
* {@link #addSecrets() addSecrets}, called from {@link #containerIsStarted(InspectContainerResponse) containerIsStarted}
*
* @param path specific Vault path to store specified secrets
- * @param firstSecret first secret to add to specifed path
+ * @param firstSecret first secret to add to specified path
* @param remainingSecrets var args list of secrets to add to specified path
* @return this
* @deprecated use {@link #withInitCommand(String...)} instead
diff --git a/modules/weaviate/build.gradle b/modules/weaviate/build.gradle
index 1281fa120d0..0a2be01a079 100644
--- a/modules/weaviate/build.gradle
+++ b/modules/weaviate/build.gradle
@@ -3,6 +3,6 @@ description = "Testcontainers :: Weaviate"
dependencies {
api project(':testcontainers')
- testImplementation 'org.assertj:assertj-core:3.26.3'
- testImplementation 'io.weaviate:client:4.8.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testImplementation 'io.weaviate:client:5.3.0'
}
diff --git a/modules/weaviate/src/test/java/org/testcontainers/weaviate/WeaviateContainerTest.java b/modules/weaviate/src/test/java/org/testcontainers/weaviate/WeaviateContainerTest.java
index 53096182839..535b81126fd 100644
--- a/modules/weaviate/src/test/java/org/testcontainers/weaviate/WeaviateContainerTest.java
+++ b/modules/weaviate/src/test/java/org/testcontainers/weaviate/WeaviateContainerTest.java
@@ -19,7 +19,7 @@ public class WeaviateContainerTest {
@Test
public void testWeaviate() {
try ( // container {
- WeaviateContainer weaviate = new WeaviateContainer("cr.weaviate.io/semitechnologies/weaviate:1.25.5")
+ WeaviateContainer weaviate = new WeaviateContainer("cr.weaviate.io/semitechnologies/weaviate:1.29.0")
// }
) {
weaviate.start();
@@ -27,7 +27,7 @@ public void testWeaviate() {
config.setGRPCHost(weaviate.getGrpcHostAddress());
WeaviateClient client = new WeaviateClient(config);
Result meta = client.misc().metaGetter().run();
- assertThat(meta.getResult().getVersion()).isEqualTo("1.25.5");
+ assertThat(meta.getResult().getVersion()).isEqualTo("1.29.0");
}
}
@@ -43,13 +43,13 @@ public void testWeaviateWithModules() {
Map env = new HashMap<>();
env.put("ENABLE_MODULES", String.join(",", enableModules));
env.put("BACKUP_FILESYSTEM_PATH", "/tmp/backups");
- try (WeaviateContainer weaviate = new WeaviateContainer("semitechnologies/weaviate:1.25.5").withEnv(env)) {
+ try (WeaviateContainer weaviate = new WeaviateContainer("semitechnologies/weaviate:1.29.0").withEnv(env)) {
weaviate.start();
Config config = new Config("http", weaviate.getHttpHostAddress());
config.setGRPCHost(weaviate.getGrpcHostAddress());
WeaviateClient client = new WeaviateClient(config);
Result meta = client.misc().metaGetter().run();
- assertThat(meta.getResult().getVersion()).isEqualTo("1.25.5");
+ assertThat(meta.getResult().getVersion()).isEqualTo("1.29.0");
Object modules = meta.getResult().getModules();
assertThat(modules)
.isNotNull()
diff --git a/modules/yugabytedb/build.gradle b/modules/yugabytedb/build.gradle
index cb5d583b4ca..15540f0020a 100644
--- a/modules/yugabytedb/build.gradle
+++ b/modules/yugabytedb/build.gradle
@@ -4,7 +4,7 @@ dependencies {
api project(':jdbc')
testImplementation project(':jdbc-test')
// YCQL driver
- testImplementation 'com.yugabyte:java-driver-core:4.15.0-yb-2-TESTFIX.0'
+ testImplementation 'com.yugabyte:java-driver-core:4.19.0-yb-1'
// YSQL driver
- testRuntimeOnly 'com.yugabyte:jdbc-yugabytedb:42.3.5-yb-6'
+ testRuntimeOnly 'com.yugabyte:jdbc-yugabytedb:42.7.3-yb-4'
}
diff --git a/modules/yugabytedb/src/main/java/org/testcontainers/containers/YugabyteDBYCQLContainer.java b/modules/yugabytedb/src/main/java/org/testcontainers/containers/YugabyteDBYCQLContainer.java
index d193d19eef1..d79812f247f 100644
--- a/modules/yugabytedb/src/main/java/org/testcontainers/containers/YugabyteDBYCQLContainer.java
+++ b/modules/yugabytedb/src/main/java/org/testcontainers/containers/YugabyteDBYCQLContainer.java
@@ -74,7 +74,7 @@ public Set getLivenessCheckPortNumbers() {
* Configures the environment variables. Setting up these variables would create the
* custom objects. Setting {@link #withKeyspaceName(String)},
* {@link #withUsername(String)}, {@link #withPassword(String)} these parameters will
- * initilaize the database with those custom values
+ * initialize the database with those custom values
*/
@Override
protected void configure() {
@@ -123,7 +123,7 @@ public YugabyteDBYCQLContainer withPassword(final String password) {
}
/**
- * Executes the initilization script
+ * Executes the initialization script
* @param containerInfo containerInfo
*/
@Override
diff --git a/modules/yugabytedb/src/main/java/org/testcontainers/containers/YugabyteDBYSQLContainer.java b/modules/yugabytedb/src/main/java/org/testcontainers/containers/YugabyteDBYSQLContainer.java
index e3b39e780f0..aae3f67b313 100644
--- a/modules/yugabytedb/src/main/java/org/testcontainers/containers/YugabyteDBYSQLContainer.java
+++ b/modules/yugabytedb/src/main/java/org/testcontainers/containers/YugabyteDBYSQLContainer.java
@@ -70,7 +70,7 @@ public Set getLivenessCheckPortNumbers() {
* Configures the environment variables. Setting up these variables would create the
* custom objects. Setting {@link #withDatabaseName(String)},
* {@link #withUsername(String)}, {@link #withPassword(String)} these parameters will
- * initilaize the database with those custom values
+ * initialize the database with those custom values
*/
@Override
diff --git a/settings.gradle b/settings.gradle
index 401cb2be959..0099b899082 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -5,8 +5,8 @@ buildscript {
}
}
dependencies {
- classpath "com.gradle.enterprise:com.gradle.enterprise.gradle.plugin:3.18.2"
- classpath "com.gradle:common-custom-user-data-gradle-plugin:2.0.2"
+ classpath "com.gradle.enterprise:com.gradle.enterprise.gradle.plugin:3.19.2"
+ classpath "com.gradle:common-custom-user-data-gradle-plugin:2.3"
classpath "org.gradle.toolchains:foojay-resolver:0.8.0"
}
}
diff --git a/smoke-test/build.gradle b/smoke-test/build.gradle
index 197452e43c5..781d6e87792 100644
--- a/smoke-test/build.gradle
+++ b/smoke-test/build.gradle
@@ -25,7 +25,7 @@ subprojects {
}
checkstyle {
- toolVersion = "10.12.4"
+ toolVersion = "10.23.0"
configFile = rootProject.file('../config/checkstyle/checkstyle.xml')
}
}
diff --git a/smoke-test/gradle/wrapper/gradle-wrapper.jar b/smoke-test/gradle/wrapper/gradle-wrapper.jar
index 2c3521197d7..1b33c55baab 100644
Binary files a/smoke-test/gradle/wrapper/gradle-wrapper.jar and b/smoke-test/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/smoke-test/gradle/wrapper/gradle-wrapper.properties b/smoke-test/gradle/wrapper/gradle-wrapper.properties
index 68e8816d71c..78cb6e16a49 100644
--- a/smoke-test/gradle/wrapper/gradle-wrapper.properties
+++ b/smoke-test/gradle/wrapper/gradle-wrapper.properties
@@ -1,7 +1,7 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
-distributionSha256Sum=d725d707bfabd4dfdc958c624003b3c80accc03f7037b5122c4b1d0ef15cecab
-distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-bin.zip
+distributionSha256Sum=bd71102213493060956ec229d946beee57158dbd89d0e62b91bca0fa2c5f3531
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.14.3-bin.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME
diff --git a/smoke-test/gradlew b/smoke-test/gradlew
index f5feea6d6b1..23d15a93670 100755
--- a/smoke-test/gradlew
+++ b/smoke-test/gradlew
@@ -86,8 +86,7 @@ done
# shellcheck disable=SC2034
APP_BASE_NAME=${0##*/}
# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036)
-APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s
-' "$PWD" ) || exit
+APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD=maximum
@@ -115,7 +114,7 @@ case "$( uname )" in #(
NONSTOP* ) nonstop=true ;;
esac
-CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+CLASSPATH="\\\"\\\""
# Determine the Java command to use to start the JVM.
@@ -206,7 +205,7 @@ fi
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
# Collect all arguments for the java command:
-# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
+# * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
# and any embedded shellness will be escaped.
# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be
# treated as '${Hostname}' itself on the command line.
@@ -214,7 +213,7 @@ DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
set -- \
"-Dorg.gradle.appname=$APP_BASE_NAME" \
-classpath "$CLASSPATH" \
- org.gradle.wrapper.GradleWrapperMain \
+ -jar "$APP_HOME/gradle/wrapper/gradle-wrapper.jar" \
"$@"
# Stop when "xargs" is not available.
diff --git a/smoke-test/gradlew.bat b/smoke-test/gradlew.bat
index 9b42019c791..5eed7ee8452 100644
--- a/smoke-test/gradlew.bat
+++ b/smoke-test/gradlew.bat
@@ -70,11 +70,11 @@ goto fail
:execute
@rem Setup the command line
-set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+set CLASSPATH=
@rem Execute Gradle
-"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" -jar "%APP_HOME%\gradle\wrapper\gradle-wrapper.jar" %*
:end
@rem End local scope for the variables with windows NT shell
diff --git a/smoke-test/turbo-mode/build.gradle b/smoke-test/turbo-mode/build.gradle
index 8a47c80aeee..1408842ee00 100644
--- a/smoke-test/turbo-mode/build.gradle
+++ b/smoke-test/turbo-mode/build.gradle
@@ -4,9 +4,10 @@ plugins {
dependencies {
testImplementation 'org.testcontainers:testcontainers'
- testImplementation 'org.junit.jupiter:junit-jupiter:5.11.0'
+ testImplementation 'org.junit.jupiter:junit-jupiter:5.13.3'
testImplementation 'ch.qos.logback:logback-classic:1.3.14'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.13.3'
}
test {
diff --git a/test-support/build.gradle b/test-support/build.gradle
index 1df20ae6fba..5c39ac72ac9 100644
--- a/test-support/build.gradle
+++ b/test-support/build.gradle
@@ -1,5 +1,5 @@
dependencies {
implementation 'junit:junit:4.13.2'
implementation 'org.slf4j:slf4j-api:2.0.16'
- testImplementation 'org.assertj:assertj-core:3.26.3'
+ testImplementation 'org.assertj:assertj-core:3.27.3'
}