diff --git a/docs/configuration.md b/docs/configuration.md index 33dbf2e6d..779fbb08e 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -33,7 +33,7 @@ Configuration of the Docker daemon: Configuration of Testcontainers and its behaviours: | Variable | Example | Description | -| ------------------------------------- | -------------------------- | -------------------------------------------- | +| ------------------------------------- |----------------------------| -------------------------------------------- | | TESTCONTAINERS_HOST_OVERRIDE | tcp://docker:2375 | Docker's host on which ports are exposed | | TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE | /var/run/docker.sock | Path to Docker's socket used by ryuk | | TESTCONTAINERS_RYUK_PRIVILEGED | true | Run ryuk as a privileged container | @@ -41,7 +41,7 @@ Configuration of Testcontainers and its behaviours: | TESTCONTAINERS_RYUK_PORT | 65515 | Set ryuk host port (not recommended) | | TESTCONTAINERS_SSHD_PORT | 65515 | Set SSHd host port (not recommended) | | TESTCONTAINERS_HUB_IMAGE_NAME_PREFIX | mycompany.com/registry | Set default image registry | -| RYUK_CONTAINER_IMAGE | testcontainers/ryuk:0.11.0 | Custom image for ryuk | -| SSHD_CONTAINER_IMAGE | testcontainers/sshd:1.1.0 | Custom image for SSHd | +| RYUK_CONTAINER_IMAGE | testcontainers/ryuk:0.12.0 | Custom image for ryuk | +| SSHD_CONTAINER_IMAGE | testcontainers/sshd:1.3.0 | Custom image for SSHd | | TESTCONTAINERS_REUSE_ENABLE | true | Enable reusable containers | | TESTCONTAINERS_RYUK_VERBOSE | true | Sets RYUK_VERBOSE env var in ryuk container | diff --git a/docs/features/advanced.md b/docs/features/advanced.md index c2af420cb..3cf56a6f4 100644 --- a/docs/features/advanced.md +++ b/docs/features/advanced.md @@ -2,7 +2,7 @@ ## Container Runtime Client -Testcontainers configures an underlying container runtime to perform its tasks. This runtime works automatically with several providers like Docker, Podman, Colima, Rancher Desktop and Testcontainers Desktop. There are too many usage examples to list here, but here are some common examples: +Testcontainers configures an underlying container runtime to perform its tasks. This runtime works automatically with several providers like Docker, Podman, Colima, Rancher Desktop and Testcontainers Desktop. There are too many usage examples to list here, but here are some common examples. ### Fetch container runtime information diff --git a/docs/features/compose.md b/docs/features/compose.md index 85aedbebd..2f22ddef7 100644 --- a/docs/features/compose.md +++ b/docs/features/compose.md @@ -4,7 +4,7 @@ Create and start a Docker Compose environment: -```javascript +```js const { DockerComposeEnvironment } = require("testcontainers"); const composeFilePath = "/path/to/build-context"; @@ -15,7 +15,7 @@ const environment = await new DockerComposeEnvironment(composeFilePath, composeF You can override by providing [multiple compose files](https://docs.docker.com/compose/extends/#multiple-compose-files): -```javascript +```js const environment = await new DockerComposeEnvironment( composeFilePath, [ @@ -27,14 +27,14 @@ const environment = await new DockerComposeEnvironment( Provide a list of service names to only start those services: -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile) .up(["redis-1", "postgres-1"]); ``` ### With wait strategy -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile) .withWaitStrategy("redis-1", Wait.forLogMessage("Ready to accept connections")) .withWaitStrategy("postgres-1", Wait.forHealthCheck()) @@ -46,7 +46,7 @@ const environment = await new DockerComposeEnvironment(composeFilePath, composeF By default Testcontainers uses the "listening ports" wait strategy for all containers. If you'd like to override the default wait strategy for all services, you can do so: -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile) .withDefaultWaitStrategy(Wait.forHealthCheck()) .up(); @@ -56,7 +56,7 @@ const environment = await new DockerComposeEnvironment(composeFilePath, composeF Testcontainers will automatically pull an image if it doesn't exist. This is configurable: -```javascript +```js const { DockerComposeEnvironment, PullPolicy } = require("testcontainers"); const environment = await new DockerComposeEnvironment(composeFilePath, composeFile) @@ -66,7 +66,7 @@ const environment = await new DockerComposeEnvironment(composeFilePath, composeF Create a custom pull policy: -```typescript +```ts const { GenericContainer, ImagePullPolicy } = require("testcontainers"); class CustomPullPolicy implements ImagePullPolicy { @@ -82,7 +82,7 @@ const environment = await new DockerComposeEnvironment(composeFilePath, composeF ### With rebuild -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile) .withBuild() .up(); @@ -92,7 +92,7 @@ const environment = await new DockerComposeEnvironment(composeFilePath, composeF See [environment file](https://docs.docker.com/compose/environment-variables/#using-the---env-file--option). -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile) .withEnvironmentFile(".env.custom") .up(); @@ -102,7 +102,7 @@ const environment = await new DockerComposeEnvironment(composeFilePath, composeF See [profiles](https://docs.docker.com/compose/profiles/). -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile) .withProfiles("profile1", "profile2") .up(); @@ -110,7 +110,7 @@ const environment = await new DockerComposeEnvironment(composeFilePath, composeF ### With no recreate -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile) .withNoRecreate() .up(); @@ -126,7 +126,7 @@ services: image: redis:${TAG} ``` -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile) .withEnvironment({ "TAG": "VALUE" }) .up(); @@ -136,7 +136,7 @@ const environment = await new DockerComposeEnvironment(composeFilePath, composeF See [project name](https://docs.docker.com/compose/project-name/). -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile) .withProjectName("test") .up(); @@ -146,7 +146,7 @@ const environment = await new DockerComposeEnvironment(composeFilePath, composeF See [docker-compose](https://github.com/PDMLab/docker-compose/) library. -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile) .withClientOptions({ executable: { standalone: true, executablePath: "/path/to/docker-compose" } }) .up(); @@ -157,21 +157,21 @@ const environment = await new DockerComposeEnvironment(composeFilePath, composeF Testcontainers by default will not wait until the environment has downed. It will simply issue the down command and return immediately. This is to save time when running tests. -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile).up(); await environment.down(); ``` If you need to wait for the environment to be downed, you can provide a timeout: -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile).up(); await environment.down({ timeout: 10_000 }); // 10 seconds ``` Volumes created by the environment are removed when stopped. This is configurable: -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile).up(); await environment.down({ removeVolumes: false }); ``` @@ -180,7 +180,7 @@ await environment.down({ removeVolumes: false }); If you have multiple docker-compose environments which share dependencies such as networks, you can stop the environment instead of downing it: -```javascript +```js const environment = await new DockerComposeEnvironment(composeFilePath, composeFile).up(); await environment.stop(); ``` @@ -189,6 +189,6 @@ await environment.stop(); Interact with the containers in your compose environment as you would any other Generic Container. Note that the container name suffix has changed from `_` to `-` between docker-compose v1 and v2 respectively. -```javascript +```js const container = environment.getContainer("alpine-1"); ``` diff --git a/docs/features/containers.md b/docs/features/containers.md index 80a66ad68..55331e8c9 100644 --- a/docs/features/containers.md +++ b/docs/features/containers.md @@ -4,7 +4,7 @@ Create and start any container using a Generic Container: -```javascript +```js const { GenericContainer } = require("testcontainers"); const container = await new GenericContainer("alpine").start(); @@ -12,7 +12,7 @@ const container = await new GenericContainer("alpine").start(); To use a specific image version: -```javascript +```js const container = await new GenericContainer("alpine:3.10").start(); ``` @@ -20,7 +20,7 @@ const container = await new GenericContainer("alpine:3.10").start(); Testcontainers will automatically pull an image if it doesn't exist. This is configurable: -```javascript +```js const { GenericContainer, PullPolicy } = require("testcontainers"); const container = await new GenericContainer("alpine") @@ -30,7 +30,7 @@ const container = await new GenericContainer("alpine") Create a custom pull policy: -```typescript +```ts const { GenericContainer, ImagePullPolicy } = require("testcontainers"); class CustomPullPolicy implements ImagePullPolicy { @@ -46,7 +46,7 @@ const container = await new GenericContainer("alpine") ### With a command -```javascript +```js const container = await new GenericContainer("alpine") .withCommand(["sleep", "infinity"]) .start(); @@ -54,7 +54,7 @@ const container = await new GenericContainer("alpine") ### With an entrypoint -```javascript +```js const container = await new GenericContainer("alpine") .withEntrypoint(["cat"]) .start(); @@ -62,7 +62,7 @@ const container = await new GenericContainer("alpine") ### With environment variables -```javascript +```js const container = await new GenericContainer("alpine") .withEnvironment({ ENV: "VALUE" }) .start(); @@ -70,7 +70,7 @@ const container = await new GenericContainer("alpine") ### With a platform -```javascript +```js const container = await new GenericContainer("alpine") .withPlatform("linux/arm64") // similar to `--platform linux/arm64` .start(); @@ -82,7 +82,7 @@ const container = await new GenericContainer("alpine") Bind mounts are not portable. They do not work with Docker in Docker or in cases where the Docker agent is remote. It is preferred to [copy files/directories/content into the container](../containers#with-filesdirectoriescontent) instead. -```javascript +```js const container = await new GenericContainer("alpine") .withBindMounts([{ source: "/local/file.txt", @@ -97,7 +97,7 @@ const container = await new GenericContainer("alpine") ### With labels -```javascript +```js const container = await new GenericContainer("alpine") .withLabels({ label: "value" }) .start(); @@ -109,7 +109,7 @@ const container = await new GenericContainer("alpine") If a container with the same name already exists, Docker will raise a conflict. If you are specifying a name to enable container to container communication, look into creating a network and using [network aliases](../networking#network-aliases). -```javascript +```js const container = await new GenericContainer("alpine") .withName("custom-container-name") .start(); @@ -119,7 +119,7 @@ const container = await new GenericContainer("alpine") Copy files/directories or content to a container before it starts: -```javascript +```js const container = await new GenericContainer("alpine") .withCopyFilesToContainer([{ source: "/local/file.txt", @@ -142,7 +142,7 @@ const container = await new GenericContainer("alpine") Or after it starts: -```javascript +```js const container = await new GenericContainer("alpine").start(); container.copyFilesToContainer([{ @@ -162,7 +162,7 @@ container.copyArchiveToContainer(nodeReadable, "/some/nested/remotedir"); An optional `mode` can be specified in octal for setting file permissions: -```javascript +```js const container = await new GenericContainer("alpine") .withCopyFilesToContainer([{ source: "/local/file.txt", @@ -186,14 +186,14 @@ const container = await new GenericContainer("alpine") Files and directories can be fetched from a started or stopped container as a tar archive. The archive is returned as a readable stream: -```javascript +```js const container = await new GenericContainer("alpine").start(); const tarArchiveStream = await container.copyArchiveFromContainer("/var/log") ``` And when a container is stopped but not removed: -```javascript +```js const container = await new GenericContainer("alpine").start(); const stoppedContainer = await container.stop({ remove: false }); const tarArchiveStream = await stoppedContainer.copyArchiveFromContainer("/var/log/syslog") @@ -201,7 +201,7 @@ const tarArchiveStream = await stoppedContainer.copyArchiveFromContainer("/var/l ### With working directory -```javascript +```js const container = await new GenericContainer("alpine") .withWorkingDir("/opt") .start(); @@ -213,7 +213,7 @@ May be necessary when the driver of your docker host does not support reading lo See [log drivers](https://docs.docker.com/config/containers/logging/configure/#configure-the-logging-driver-for-a-container). -```javascript +```js const container = await new GenericContainer("alpine") .withDefaultLogDriver() .start(); @@ -221,7 +221,7 @@ const container = await new GenericContainer("alpine") ### With a tmpfs mount -```javascript +```js const container = await new GenericContainer("alpine") .withTmpFs({ "/temp_pgdata": "rw,noexec,nosuid,size=65536k" }) .start(); @@ -231,7 +231,7 @@ const container = await new GenericContainer("alpine") Value can be a username or UID (format: `[:]`). -```javascript +```js const container = await new GenericContainer("alpine") .withUser("bob") .start(); @@ -239,7 +239,7 @@ const container = await new GenericContainer("alpine") ### With privileged mode -```javascript +```js const container = await new GenericContainer("alpine") .withPrivilegedMode() .start(); @@ -249,7 +249,7 @@ const container = await new GenericContainer("alpine") See [capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html). -```javascript +```js const container = await new GenericContainer("alpine") .withAddedCapabilities("NET_ADMIN", "IPC_LOCK") .start(); @@ -259,7 +259,7 @@ const container = await new GenericContainer("alpine") See [capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html). -```javascript +```js const container = await new GenericContainer("alpine") .withDroppedCapabilities("NET_ADMIN", "IPC_LOCK") .start(); @@ -269,7 +269,7 @@ const container = await new GenericContainer("alpine") **Not supported in rootless container runtimes.** -```javascript +```js const container = await new GenericContainer("aline") .withUlimits({ memlock: { @@ -284,7 +284,7 @@ const container = await new GenericContainer("aline") See [IPC mode](https://docs.docker.com/engine/reference/run/#ipc-settings---ipc). -```javascript +```js const container = await new GenericContainer("alpine") .withIpcMode("host") .start(); @@ -299,7 +299,7 @@ See [NanoCpu and Memory in ContainerCreate](https://docs.docker.com/engine/api/v - Memory – Limit in Gigabytes - CPU – Quota in units of CPUs -```javascript +```js const container = await new GenericContainer("alpine") .withResourcesQuota({ memory: 0.5, cpu: 1 }) .start(); @@ -307,7 +307,7 @@ const container = await new GenericContainer("alpine") ### With shared memory size -```javascript +```js const container = await new GenericContainer("alpine") .withSharedMemorySize(512 * 1024 * 1024) .start(); @@ -319,7 +319,7 @@ const container = await new GenericContainer("alpine") See this [Docker blog post on Testcontainers best practices](https://www.docker.com/blog/testcontainers-best-practices/#:~:text=Don't%20hardcode%20the%20hostname) -```javascript +```js const container = await new GenericContainer("alpine") .withHostname("my-hostname") .start(); @@ -329,28 +329,28 @@ const container = await new GenericContainer("alpine") Testcontainers by default will not wait until the container has stopped. It will simply issue the stop command and return immediately. This is to save time when running tests. -```javascript +```js const container = await new GenericContainer("alpine").start(); await container.stop(); ``` If you need to wait for the container to be stopped, you can provide a timeout: -```javascript +```js const container = await new GenericContainer("alpine").start(); await container.stop({ timeout: 10_000 }); // 10 seconds ``` You can disable automatic removal of the container, which is useful for debugging, or if for example you want to copy content from the container once it has stopped: -```javascript +```js const container = await new GenericContainer("alpine").start(); await container.stop({ remove: false }); ``` Alternatively, you can disable automatic removal while configuring the container: -```javascript +```js const container = await new GenericContainer("alpine") .withAutoRemove(false) .start(); @@ -360,7 +360,7 @@ await container.stop(); The value specified to `.withAutoRemove()` can be overridden by `.stop()`: -```javascript +```js const container = await new GenericContainer("alpine") .withAutoRemove(false) .start(); @@ -373,21 +373,21 @@ Keep in mind that disabling ryuk (set `TESTCONTAINERS_RYUK_DISABLED` to `true`) Volumes created by the container are removed when stopped. This is configurable: -```javascript +```js const container = await new GenericContainer("alpine").start(); await container.stop({ removeVolumes: false }); ``` ## Restarting a container -```javascript +```js const container = await new GenericContainer("alpine").start(); await container.restart(); ``` ## Committing a container to an image -```javascript +```js const container = await new GenericContainer("alpine").start(); // Do something with the container await container.exec(["sh", "-c", `echo 'hello world' > /hello-world.txt`]); @@ -400,7 +400,7 @@ const containerFromCommit = await new GenericContainer(newImageId).start(); By default, the image inherits the behavior of being marked for cleanup on exit. You can override this behavior using the `deleteOnExit` option: -```javascript +```js const container = await new GenericContainer("alpine").start(); // Do something with the container await container.exec(["sh", "-c", `echo 'hello world' > /hello-world.txt`]); @@ -414,7 +414,7 @@ Enabling container re-use means that Testcontainers will not start a new contain This is useful for example if you want to share a container across tests without global set up. -```javascript +```js const container1 = await new GenericContainer("alpine") .withCommand(["sleep", "infinity"]) .withReuse() @@ -430,7 +430,7 @@ expect(container1.getId()).toBe(container2.getId()); You can also re-use stopped but not removed containers. -```javascript +```js const container1 = await new GenericContainer("alpine") .withReuse() .withAutoRemove(false) @@ -451,7 +451,7 @@ If this environment variable is not declared, the feature is enabled by default. You can create your own Generic Container as follows: -```typescript +```ts import { GenericContainer, TestContainer, @@ -492,7 +492,7 @@ const startedCustomContainer: StartedTestContainer = await customContainer.start Define your own lifecycle callbacks for better control over your custom containers: -```typescript +```ts import { GenericContainer, AbstractStartedContainer, @@ -544,7 +544,7 @@ class CustomStartedContainer extends AbstractStartedContainer { Specify which container ports you want accessible by the host: -```javascript +```js const container = await new GenericContainer("alpine") .withExposedPorts(22, 80, 443) .start(); @@ -554,7 +554,7 @@ Testcontainers will automatically bind an available, random port on the host to Retrieve the mapped port as follows: -```javascript +```js const container = await new GenericContainer("alpine") .withExposedPorts(80) .start(); @@ -564,7 +564,7 @@ const httpPort = container.getMappedPort(80); If a container exposes a single port, you can use the following convenience method: -```javascript +```js const container = await new GenericContainer("alpine") .withExposedPorts(80) .start(); @@ -574,7 +574,7 @@ const httpPort = container.getFirstMappedPort(); Specify a protocol for the exposed port: -```javascript +```js const container = await new GenericContainer("alpine") .withExposedPorts({ container: 80, @@ -587,7 +587,7 @@ const httpPort = container.getMappedPort(80, "udp"); Alternatively, specify the protocol using a string with the format `port/protocol`: -```javascript +```js const container = await new GenericContainer("alpine") .withExposedPorts("80/udp") .start(); @@ -599,7 +599,7 @@ If no protocol is specified, it defaults to `tcp`. Specify fixed host port bindings (**not recommended**): -```javascript +```js const container = await new GenericContainer("alpine") .withExposedPorts({ container: 80, @@ -612,7 +612,7 @@ const container = await new GenericContainer("alpine") `SocatContainer` enables any TCP port of another container to be exposed publicly. -```javascript +```js const network = await new Network().start(); const container = await new GenericContainer("testcontainers/helloworld:1.2.0") @@ -643,7 +643,7 @@ To run a command inside an already started container, use the exec method. The command will be run in the container's working directory, returning the combined output (`output`), standard output (`stdout`), standard error (`stderr`), and exit code (`exitCode`). -```javascript +```js const container = await new GenericContainer("alpine") .withCommand(["sleep", "infinity"]) .start(); @@ -659,7 +659,7 @@ The following options can be provided to modify the command execution: 3. **`env`:** A map of environment variables to set inside the container. -```javascript +```js const container = await new GenericContainer("alpine") .withCommand(["sleep", "infinity"]) .start(); @@ -678,7 +678,7 @@ const { output, stdout, stderr, exitCode } = await container.exec(["echo", "hell Logs can be consumed either from a started container: -```javascript +```js const container = await new GenericContainer("alpine").start(); (await container.logs()) @@ -689,7 +689,7 @@ const container = await new GenericContainer("alpine").start(); Or a consumer can be provided before start. This is useful for example if your container is failing to start: -```javascript +```js const container = await new GenericContainer("alpine") .withLogConsumer(stream => { stream.on("data", line => console.log(line)); @@ -701,7 +701,7 @@ const container = await new GenericContainer("alpine") You can specify a point in time as a UNIX timestamp from which you want the logs to start: -```javascript +```js const msInSec = 1000; const tenSecondsAgoMs = new Date().getTime() - 10 * msInSec; const since = tenSecondsAgoMs / msInSec; diff --git a/docs/features/images.md b/docs/features/images.md index e4e942846..96af1d58e 100644 --- a/docs/features/images.md +++ b/docs/features/images.md @@ -4,7 +4,7 @@ Build and start your own Docker image: -```javascript +```js const { GenericContainer } = require("testcontainers"); const container = await GenericContainer @@ -16,7 +16,7 @@ const startedContainer = await container.start(); Images are built by default with a randomly generated name and are deleted on exit. If you wish to keep the built images between test runs, you can provide a name and specify not to delete the image: -```javascript +```js const { GenericContainer } = require("testcontainers"); const container = await GenericContainer @@ -26,7 +26,7 @@ const container = await GenericContainer ### With buildkit -```javascript +```js const { GenericContainer } = require("testcontainers"); const container = await GenericContainer @@ -39,7 +39,7 @@ const container = await GenericContainer Testcontainers will automatically pull an image if it doesn't exist. This is configurable: -```javascript +```js const { GenericContainer, PullPolicy } = require("testcontainers"); const container = await GenericContainer @@ -50,7 +50,7 @@ const container = await GenericContainer Create a custom pull policy: -```typescript +```ts const { GenericContainer, ImagePullPolicy } = require("testcontainers"); class CustomPullPolicy implements ImagePullPolicy { @@ -67,7 +67,7 @@ const container = await GenericContainer ### With build arguments -```javascript +```js const container = await GenericContainer .fromDockerfile("/path/to/build-context") .withBuildArgs({ ARG: "VALUE" }) @@ -78,7 +78,7 @@ const container = await GenericContainer Stop the build at a specific stage by specifying a target: -```javascript +```js const container = await GenericContainer .fromDockerfile("/path/to/build-context") .withTarget('my-stage') @@ -87,7 +87,7 @@ const container = await GenericContainer ### With custom Dockerfile -```javascript +```js const container = await GenericContainer .fromDockerfile("/path/to/build-context", "my-dockerfile") .build(); @@ -95,7 +95,7 @@ const container = await GenericContainer ### Without cache -```javascript +```js const container = await GenericContainer .fromDockerfile("/path/to/build-context") .withCache(false) @@ -104,7 +104,7 @@ const container = await GenericContainer ### With platform -```javascript +```js const container = await GenericContainer .fromDockerfile("/path/to/build-context") .withPlatform("linux/amd64") diff --git a/docs/features/networking.md b/docs/features/networking.md index f17d0e12a..bb57ea29e 100644 --- a/docs/features/networking.md +++ b/docs/features/networking.md @@ -4,7 +4,7 @@ Create and start a new network. Start a container within the network: -```javascript +```js const { GenericContainer, Network } = require("testcontainers"); const network = await new Network().start(); @@ -19,7 +19,7 @@ await network.stop(); Find a container's IP address in a given network: -```javascript +```js const network = await new Network().start(); const container = await new GenericContainer("alpine") @@ -33,7 +33,7 @@ const networkIpAddress = container.getIpAddress(network.getName()); Note that some network modes, for example `host`, only work on Linux machines. -```javascript +```js const container = await new GenericContainer("alpine") .withNetworkMode("bridge") .start(); @@ -41,7 +41,7 @@ const container = await new GenericContainer("alpine") ### With extra hosts -```javascript +```js const container = await new GenericContainer("alpine") .withExtraHosts([{ host: "foo", @@ -60,7 +60,7 @@ expect((await container.exec(["getent", "hosts", "bar"])).exitCode).toBe(0); Network aliases are the preferred option for container communication on the same network: -```javascript +```js const network = await new Network().start(); const container = await new GenericContainer("alpine") @@ -81,7 +81,7 @@ expect((await container.exec(["getent", "hosts", "foo"])).exitCode).toBe(0); You can expose a host port to a container: -```javascript +```js const { GenericContainer, TestContainers } = require("testcontainers"); const { createServer } = require("http"); diff --git a/docs/features/wait-strategies.md b/docs/features/wait-strategies.md index 123e5be68..1e06a9bbb 100644 --- a/docs/features/wait-strategies.md +++ b/docs/features/wait-strategies.md @@ -2,7 +2,7 @@ Note that the startup timeout of all wait strategies is configurable: -```javascript +```js const { GenericContainer } = require("testcontainers"); const container = await new GenericContainer("alpine") @@ -14,7 +14,7 @@ const container = await new GenericContainer("alpine") The default wait strategy used by Testcontainers. It will wait up to 60 seconds for the container's mapped network ports to be bound. -```javascript +```js const { GenericContainer } = require("testcontainers"); const container = await new GenericContainer("alpine").withExposedPorts(6379).start(); @@ -22,7 +22,7 @@ const container = await new GenericContainer("alpine").withExposedPorts(6379).st It can be set explicitly but is not required: -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); const container = await new GenericContainer("alpine") @@ -35,7 +35,7 @@ const container = await new GenericContainer("alpine") Wait until the container has logged a message: -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); const container = await new GenericContainer("alpine") @@ -45,7 +45,7 @@ const container = await new GenericContainer("alpine") With a regular expression: -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); const container = await new GenericContainer("alpine") @@ -55,7 +55,7 @@ const container = await new GenericContainer("alpine") Wait until the container has logged a message a number of times: -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); const container = await new GenericContainer("alpine") @@ -67,24 +67,26 @@ const container = await new GenericContainer("alpine") Wait until the container's health check is successful: -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); -const container = await new GenericContainer("alpine").withWaitStrategy(Wait.forHealthCheck()).start(); +const container = await new GenericContainer("alpine") + .withWaitStrategy(Wait.forHealthCheck()) + .start(); ``` -Define your own health check: +Define your own health check. Note that time units are in seconds: -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); const container = await new GenericContainer("alpine") .withHealthCheck({ test: ["CMD-SHELL", "curl -f http://localhost || exit 1"], - interval: 1000, // 1 second - timeout: 3000, // 3 seconds + interval: 1000, + timeout: 3000, retries: 5, - startPeriod: 1000, // 1 second + startPeriod: 1000, }) .withWaitStrategy(Wait.forHealthCheck()) .start(); @@ -92,31 +94,35 @@ const container = await new GenericContainer("alpine") Note that `interval`, `timeout`, `retries` and `startPeriod` are optional as they are inherited from the image or parent image if omitted. +--- + To execute the test with a shell use the form `["CMD-SHELL", "command"]`: -```javascript +```js ["CMD-SHELL", "curl -f http://localhost:8000 || exit 1"]; ``` To execute the test without a shell, use the form: `["CMD", "command", "arg1", "arg2"]`. This may be needed when working with distroless images: -```javascript +```js ["CMD", "/usr/bin/wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/hello-world"]; ``` ## HTTP -Wait for an HTTP request to satisfy a condition. By default, it will wait for a 200 response: +Wait for a HTTP request to satisfy a condition. By default, it will wait for a 200 response: -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); -const container = await new GenericContainer("redis").withWaitStrategy(Wait.forHttp("/health", 8080)).start(); +const container = await new GenericContainer("redis") + .withWaitStrategy(Wait.forHttp("/health", 8080)) + .start(); ``` Stop waiting after container exited if waiting for container restart not needed. -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); const container = await new GenericContainer("redis") @@ -126,24 +132,24 @@ const container = await new GenericContainer("redis") ### For status code -```javascript +```js .withWaitStrategy(Wait.forHttp("/health", 8080) .forStatusCode(201)) .withWaitStrategy(Wait.forHttp("/health", 8080) - .forStatusCodeMatching(statusCode => statusCode === 201)) + .forStatusCodeMatching(statusCode => `${statusCode}`.startsWith("2"))) ``` ### For response body -```javascript +```js .withWaitStrategy(Wait.forHttp("/health", 8080) .forResponsePredicate(response => response === "OK")) ``` ### Custom request -```javascript +```js .withWaitStrategy(Wait.forHttp("/health", 8080) .withMethod("POST") .withHeaders({ X_CUSTOM_VALUE: "custom" }) @@ -153,14 +159,14 @@ const container = await new GenericContainer("redis") ### Use TLS -```javascript +```js .withWaitStrategy(Wait.forHttp("/health", 8443) .usingTls()) ``` #### Insecure TLS -```javascript +```js .withWaitStrategy(Wait.forHttp("/health", 8443) .usingTls() .insecureTls()) @@ -170,7 +176,7 @@ const container = await new GenericContainer("redis") Wait until a shell command returns a successful exit code: -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); const container = await new GenericContainer("alpine") @@ -182,7 +188,7 @@ const container = await new GenericContainer("alpine") This strategy is intended for use with containers that only run briefly and exit of their own accord. As such, success is deemed to be when the container has stopped with exit code 0. -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); const container = await new GenericContainer("alpine") @@ -194,7 +200,7 @@ const container = await new GenericContainer("alpine") Multiple wait strategies can be chained together: -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); const container = await new GenericContainer("alpine") @@ -204,7 +210,7 @@ const container = await new GenericContainer("alpine") The composite wait strategy by default will respect each individual wait strategy's startup timeout. For example: -```javascript +```js const w1 = Wait.forListeningPorts().withStartupTimeout(1000); // 1 second const w2 = Wait.forLogMessage("READY").withStartupTimeout(2000); // 2 seconds @@ -216,7 +222,7 @@ expect(w2.getStartupTimeout()).toBe(2000); The startup timeout of inner wait strategies that have not defined their own startup timeout can be set by setting the startup timeout on the composite: -```javascript +```js const w1 = Wait.forListeningPorts().withStartupTimeout(1000); // 1 second const w2 = Wait.forLogMessage("READY"); @@ -228,7 +234,7 @@ expect(w2.getStartupTimeout()).toBe(2000); The startup timeout of all wait strategies can be controlled by setting a deadline on the composite. In this case, the composite will throw unless all inner wait strategies have resolved before the deadline. -```javascript +```js const w1 = Wait.forListeningPorts(); const w2 = Wait.forLogMessage("READY"); const composite = Wait.forAll([w1, w2]).withDeadline(2000); // 2 seconds @@ -238,7 +244,7 @@ const composite = Wait.forAll([w1, w2]).withDeadline(2000); // 2 seconds If these options do not meet your requirements, you can subclass `StartupCheckStrategy` and use `Dockerode`, which is the underlying Docker client used by Testcontainers: -```javascript +```js const Dockerode = require("dockerode"); const { GenericContainer, diff --git a/docs/modules/arangodb.md b/docs/modules/arangodb.md index 3c93995f2..5ff6667c0 100644 --- a/docs/modules/arangodb.md +++ b/docs/modules/arangodb.md @@ -1,6 +1,4 @@ -# ArangoDB Module - -[ArangoDB](https://www.arangodb.com/) is an open source friendly multi-model database. You can find the documentation of the [ArangoDB JavaScript](https://www.arangodb.com/docs/stable/drivers/js.html) driver here. +# ArangoDB ## Install @@ -8,9 +6,18 @@ npm install @testcontainers/arangodb --save-dev ``` -## Example +## Examples + +These examples use the following libraries: + +- [arangojs](https://www.npmjs.com/package/arangojs/v/6.0.0-alpha.0) + + npm install arangojs + +Choose an image from the [container registry](https://hub.docker.com/_/arangodb) and substitute `IMAGE`. + +### Execute a query -[](../../packages/modules/arangodb/src/arangodb-container.test.ts) inside_block:connect +[](../../packages/modules/arangodb/src/arangodb-container.test.ts) inside_block:example - diff --git a/docs/modules/azurite.md b/docs/modules/azurite.md index e811456eb..b9d69fdbd 100644 --- a/docs/modules/azurite.md +++ b/docs/modules/azurite.md @@ -1,6 +1,4 @@ -# Azurite Module - -[Azurite](https://github.com/Azure/Azurite) is an open source Azure Storage API compatible server (emulator). Based on Node.js, Azurite provides cross platform experiences for developers wanting to try Azure Storage easily in a local environment. Azurite simulates most of the commands supported by Azure Storage with minimal dependencies. +# Azurite ## Install @@ -10,26 +8,54 @@ npm install @testcontainers/azurite --save-dev ## Examples +These examples use the following libraries: + +- [@azure/data-tables](https://www.npmjs.com/package/@azure/data-tables) + + npm install @azure/data-tables + +- [@azure/storage-blob](https://www.npmjs.com/package/@azure/storage-blob) + + npm install @azure/storage-blob + +- [@azure/storage-queue](https://www.npmjs.com/package/@azure/storage-queue) + + npm install @azure/storage-queue + +Choose an image from the [container registry](https://hub.docker.com/r/microsoft/azure-storage-azurite) and substitute `IMAGE`. + +### Upload/download a blob + -[Upload and download a blob:](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:uploadAndDownloadBlob +[](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:uploadAndDownloadBlob +### Send/receive queue messages + -[Send and receive queue messages:](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:sendAndReceiveQueue +[](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:sendAndReceiveQueue +### Create/insert/fetch on a table + -[Create and insert on table:](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:createAndInsertOnTable +[](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:createAndInsertOnTable +### In memory persistence + -[Use custom credentials:](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:customCredentials +[](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:inMemoryPersistence +### With credentials + -[Use custom ports:](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:customPorts +[](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:customCredentials +### With ports + -[Enable in-memory persistence:](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:inMemoryPersistence +[](../../packages/modules/azurite/src/azurite-container.test.ts) inside_block:customPorts diff --git a/docs/modules/cassandra.md b/docs/modules/cassandra.md index 77917efc8..c7c32c351 100644 --- a/docs/modules/cassandra.md +++ b/docs/modules/cassandra.md @@ -1,8 +1,4 @@ -# Cassandra Module - -[Cassandra](https://cassandra.apache.org/_/index.html) is a free and open source, distributed NoSQL database management system. It is designed to handle large amounts of data across many commodity servers, providing high availability with no single point of failure. - - +# Cassandra ## Install @@ -12,18 +8,28 @@ npm install @testcontainers/cassandra --save-dev ## Examples - -[Connect:](../../packages/modules/cassandra/src/cassandra-container.test.ts) inside_block:connectWithDefaultCredentials - +These examples use the following libraries: + +- [cassandra-driver](https://www.npmjs.com/package/cassandra-driver) + + npm install cassandra-driver + +Choose an image from the [container registry](https://hub.docker.com/_/cassandra) and substitute `IMAGE`. + +### Execute a query -[Connect with custom credentials:](../../packages/modules/cassandra/src/cassandra-container.test.ts) inside_block:connectWithCustomCredentials +[](../../packages/modules/cassandra/src/cassandra-container.test.ts) inside_block:connectWithDefaultCredentials +### With credentials + -[With custom datacenter / rack](../../packages/modules/cassandra/src/cassandra-container.test.ts) inside_block:customDataCenterAndRack +[](../../packages/modules/cassandra/src/cassandra-container.test.ts) inside_block:connectWithCustomCredentials +### With datacenter/rack + -[Insert & fetch data:](../../packages/modules/cassandra/src/cassandra-container.test.ts) inside_block:createAndFetchData +[](../../packages/modules/cassandra/src/cassandra-container.test.ts) inside_block:customDataCenterAndRack diff --git a/docs/modules/chromadb.md b/docs/modules/chromadb.md index 3d80ffc2e..05873f0c6 100644 --- a/docs/modules/chromadb.md +++ b/docs/modules/chromadb.md @@ -1,6 +1,4 @@ -# ChromaDB Module - -[ChromaDB](https://www.trychroma.com/) is an AI-native open-source embedding database. +# ChromaDB ## Install @@ -8,37 +6,40 @@ npm install @testcontainers/chromadb --save-dev ``` -## Resources +## Examples -* [GitHub](https://github.com/chroma-core/chroma) -* [Node.js Client](https://www.npmjs.com/package/chromadb) -* [Docs](https://docs.trychroma.com) -* [Discord](https://discord.gg/MMeYNTmh3x) -* [Cookbook](https://cookbook.chromadb.dev) +These examples use the following libraries: -## Examples +- [chromadb](https://www.npmjs.com/package/chromadb) - -[Connect to Chroma:](../../packages/modules/chromadb/src/chromadb-container.test.ts) -inside_block:simpleConnect - + npm install chromadb + +- [ollama](https://www.npmjs.com/package/ollama) + + npm install ollama + +Choose an image from the [container registry](https://hub.docker.com/r/chromadb/chroma) and substitute `IMAGE`. + +### Execute a query -[Create Collection:](../../packages/modules/chromadb/src/chromadb-container.test.ts) -inside_block:createCollection +[](../../packages/modules/chromadb/src/chromadb-container.test.ts) inside_block:chromaCreateCollection +### Embedding function + -[Query Collection with Embedding Function:](../../packages/modules/chromadb/src/chromadb-container.test.ts) -inside_block:queryCollectionWithEmbeddingFunction +[](../../packages/modules/chromadb/src/chromadb-container.test.ts) inside_block:queryCollectionWithEmbeddingFunction +### Persistent directory + -[Work with persistent directory:](../../packages/modules/chromadb/src/chromadb-container.test.ts) -inside_block:persistentData +[](../../packages/modules/chromadb/src/chromadb-container.test.ts) inside_block:persistentData +### Authentication + -[Work with authentication:](../../packages/modules/chromadb/src/chromadb-container.test.ts) inside_block:auth +[](../../packages/modules/chromadb/src/chromadb-container.test.ts) inside_block:chromaAuth - diff --git a/docs/modules/clickhouse.md b/docs/modules/clickhouse.md index f69752717..d26c860a4 100644 --- a/docs/modules/clickhouse.md +++ b/docs/modules/clickhouse.md @@ -1,6 +1,4 @@ -# ClickHouse Module - -[ClickHouse](https://clickhouse.com/) is a column-oriented database management system for online analytical processing (OLAP) that allows users to generate analytical reports using SQL queries in real-time. +# ClickHouse ## Install @@ -10,46 +8,34 @@ npm install @testcontainers/clickhouse --save-dev ## Examples - -[Connect and execute query:](../../packages/modules/clickhouse/src/clickhouse-container.test.ts) inside_block:connectWithOptions - +These examples use the following libraries: + +- [@clickhouse/client](https://www.npmjs.com/package/@clickhouse/client) + + npm install @clickhouse/client + +Choose an image from the [container registry](https://hub.docker.com/r/clickhouse/clickhouse-server) and substitute `IMAGE`. + +### Execute a query -[Connect using URL and execute query:](../../packages/modules/clickhouse/src/clickhouse-container.test.ts) inside_block:connectWithUrl +[](../../packages/modules/clickhouse/src/clickhouse-container.test.ts) inside_block:connectWithOptions +### Connect with URL + -[Connect with username and password and execute query:](../../packages/modules/clickhouse/src/clickhouse-container.test.ts) inside_block:connectWithUsernameAndPassword +[](../../packages/modules/clickhouse/src/clickhouse-container.test.ts) inside_block:connectWithUrl +### With credentials + -[Set database:](../../packages/modules/clickhouse/src/clickhouse-container.test.ts) inside_block:setDatabase +[](../../packages/modules/clickhouse/src/clickhouse-container.test.ts) inside_block:connectWithUsernameAndPassword +### With database + -[Set username:](../../packages/modules/clickhouse/src/clickhouse-container.test.ts) inside_block:setUsername +[](../../packages/modules/clickhouse/src/clickhouse-container.test.ts) inside_block:setDatabase - -### Connection Methods - -The module provides several methods to connect to the ClickHouse container: - -1. `getClientOptions()` - Returns a configuration object suitable for `@clickhouse/client`: - ```typescript - { - url: string; // HTTP URL with host and port - username: string; // Container username - password: string; // Container password - database: string; // Container database - } - ``` -2. `getConnectionUrl()` - Returns a complete HTTP URL including credentials and database: - ``` - http://[username[:password]@][host[:port]]/database - ``` -3. `getHttpUrl()` - Returns the base HTTP URL without credentials: - ``` - http://[host[:port]] - ``` - -These methods can be used with the `@clickhouse/client` package or any other ClickHouse client. diff --git a/docs/modules/cockroachdb.md b/docs/modules/cockroachdb.md index 07bf4c73d..95705a060 100644 --- a/docs/modules/cockroachdb.md +++ b/docs/modules/cockroachdb.md @@ -1,7 +1,4 @@ -# CockroachDB Module - -[CockroachDB](https://github.com/cockroachdb/cockroach) is a cloud-native, postgresql compatible, distributed SQL database designed to build, scale, and manage modern, data-intensive applications. - +# CockroachDB ## Install @@ -11,18 +8,35 @@ npm install @testcontainers/cockroachdb --save-dev ## Examples +These examples use the following libraries: + +- [pg](https://www.npmjs.com/package/pg) + + npm install pg + npm install @types/pg --save-dev + +Choose an image from the [container registry](https://hub.docker.com/r/cockroachdb/cockroach) and substitute `IMAGE`. + +### Execute a query + -[Connect and execute query:](../../packages/modules/cockroachdb/src/cockroachdb-container.test.ts) inside_block:connect +[](../../packages/modules/cockroachdb/src/cockroachdb-container.test.ts) inside_block:cockroachConnect +### Connect with URI + -[Connect and execute query using URI:](../../packages/modules/cockroachdb/src/cockroachdb-container.test.ts) inside_block:uriConnect +[](../../packages/modules/cockroachdb/src/cockroachdb-container.test.ts) inside_block:uriConnect +### With database + -[Set database:](../../packages/modules/cockroachdb/src/cockroachdb-container.test.ts) inside_block:setDatabase +[](../../packages/modules/cockroachdb/src/cockroachdb-container.test.ts) inside_block:setDatabase +### With username + -[Set username:](../../packages/modules/cockroachdb/src/cockroachdb-container.test.ts) inside_block:setUsername +[](../../packages/modules/cockroachdb/src/cockroachdb-container.test.ts) inside_block:setUsername diff --git a/docs/modules/cosmosdb.md b/docs/modules/cosmosdb.md index 807742104..0613ddbf1 100644 --- a/docs/modules/cosmosdb.md +++ b/docs/modules/cosmosdb.md @@ -1,6 +1,13 @@ -# Cosmos DB Emulator Module (Linux-based) +# CosmosDB -[Azure Cosmos DB](https://azure.microsoft.com/en-GB/products/cosmos-db) is a globally distributed, multi-model database service provided by Microsoft. +!!! info + This module uses the **Linux-based** version of the CosmosDB emulator. In general, it: + + - Provides better compatibility on a variety of systems. + - Consumes significantly less resources. + - Comes with much faster startup times. + + However, not all features of a full CosmosDB are implemented yet. Refer to [this overview](https://learn.microsoft.com/en-us/azure/cosmos-db/emulator-linux#feature-support) for a detailed list. ## Install @@ -9,24 +16,23 @@ npm install @testcontainers/azurecosmosdb --save-dev ``` ## Examples - -[Connect to emulator and create a database:](../../packages/modules/azurecosmosdb/src/azure-cosmosdb-emulator-container.test.ts) inside_block:httpCreateDB - - -[Using HTTPS:](../../packages/modules/azurecosmosdb/src/azure-cosmosdb-emulator-container.test.ts) inside_block:httpsCreateDB - +These examples use the following libraries: + +- [@azure/cosmos](https://www.npmjs.com/package/@azure/cosmos) + + npm install @azure/cosmos + +Choose an image from [Microsoft Artifact Registry](https://mcr.microsoft.com/) and substitute `IMAGE`. For example, `mcr.microsoft.com/cosmosdb/linux/azure-cosmos-emulator:vnext-preview`. + +### Execute a query -[Create and read items:](../../packages/modules/azurecosmosdb/src/azure-cosmosdb-emulator-container.test.ts) inside_block:createAndRead +[](../../packages/modules/azurecosmosdb/src/azure-cosmosdb-emulator-container.test.ts) inside_block:createAndRead -## Caveats -### Compatibility -This testcontainer uses the [linux-based](https://learn.microsoft.com/en-us/azure/cosmos-db/emulator-linux) version. In general, it: +### With HTTPS -- Provides better compatibility on a variety of systems -- Consumes significantly less resources -- Comes with much faster startup times - -However, not all features of a full CosmosDB are implemented yet - please refer to [this overview](https://learn.microsoft.com/en-us/azure/cosmos-db/emulator-linux#feature-support) for a detailed list. \ No newline at end of file + +[](../../packages/modules/azurecosmosdb/src/azure-cosmosdb-emulator-container.test.ts) inside_block:httpsCreateDB + diff --git a/docs/modules/couchbase.md b/docs/modules/couchbase.md index d826fda62..27f576cd5 100644 --- a/docs/modules/couchbase.md +++ b/docs/modules/couchbase.md @@ -1,7 +1,4 @@ -# Couchbase Module - -[Couchbase](https://www.couchbase.com/) is a distributed document database with a powerful search engine and in-built operational and analytical capabilities. It brings the power of NoSQL to the edge and provides fast, efficient bidirectional synchronization of data between the edge and the cloud. - +# Couchbase ## Install @@ -11,10 +8,16 @@ npm install @testcontainers/couchbase --save-dev ## Examples - -[upsertAndGet:](../../packages/modules/couchbase/src/couchbase-container.test.ts) inside_block:upsertAndGet - +These examples use the following libraries: + +- [couchbase](https://www.npmjs.com/package/couchbase) + + npm install couchbase + +Choose an image from the [container registry](https://hub.docker.com/r/couchbase/server) and substitute `IMAGE`. + +### Execute a query -[Connect and execute query:](../../packages/modules/couchbase/src/couchbase-container.test.ts) inside_block:connectAndQuery +[](../../packages/modules/couchbase/src/couchbase-container.test.ts) inside_block:connectAndQuery diff --git a/docs/modules/elasticsearch.md b/docs/modules/elasticsearch.md index 1e9d6678c..6347bda6c 100644 --- a/docs/modules/elasticsearch.md +++ b/docs/modules/elasticsearch.md @@ -1,6 +1,4 @@ -# Elasticsearch Module - -[Elasticsearch](https://www.elastic.co/elasticsearch/) is a search engine based on the Lucene library. It provides a distributed, multitenant-capable full-text search engine with an HTTP web interface and schema-free JSON documents. +# Elasticsearch ## Install @@ -10,10 +8,28 @@ npm install @testcontainers/elasticsearch --save-dev ## Examples +These examples use the following libraries: + +- [@elastic/elasticsearch](https://www.npmjs.com/package/@elastic/elasticsearch) + + npm install @elastic/elasticsearch + +Choose an image from the [container registry](https://hub.docker.com/_/elasticsearch) and substitute `IMAGE`. + +### Create an index + -[Create an index:](../../packages/modules/elasticsearch/src/elasticsearch-container.test.ts) inside_block:createIndex +[](../../packages/modules/elasticsearch/src/elasticsearch-container.test.ts) inside_block:createIndex +### Index a document + + +[](../../packages/modules/elasticsearch/src/elasticsearch-container.test.ts) inside_block:indexDocument + + +### With password + -[Index a document:](../../packages/modules/elasticsearch/src/elasticsearch-container.test.ts) inside_block:indexDocument +[](../../packages/modules/elasticsearch/src/elasticsearch-container.test.ts) inside_block:withPassword diff --git a/docs/modules/etcd.md b/docs/modules/etcd.md index 0088dba2e..f6debc66a 100644 --- a/docs/modules/etcd.md +++ b/docs/modules/etcd.md @@ -1,6 +1,4 @@ -# Etcd Module - -[Etcd](https://etcd.io/) is a strongly consistent, distributed key-value store that provides a reliable way to store data that needs to be accessed by a distributed system or cluster of machines. +# Etcd ## Install @@ -10,10 +8,23 @@ npm install @testcontainers/etcd --save-dev ## Examples +These examples use the following libraries: + +- [etcd3](https://www.npmjs.com/package/etcd3) + + npm install etcd3 + +Choose an image from the [container registry](https://quay.io/repository/coreos/etcd?tab=info) and substitute `IMAGE`. + +### Read and write key-value pairs + -[Read and write key-value pairs:](../../packages/modules/etcd/src/etcd-container.test.ts) inside_block:readWrite +[](../../packages/modules/etcd/src/etcd-container.test.ts) inside_block:readWrite +### Subscribe to key changes + -[Subscribe to key changes:](../../packages/modules/etcd/src/etcd-container.test.ts) inside_block:subscribe +[](../../packages/modules/etcd/src/etcd-container.test.ts) inside_block:etcdSubscribe + \ No newline at end of file diff --git a/docs/modules/gcloud.md b/docs/modules/gcloud.md index 3195a51f1..2722ae466 100644 --- a/docs/modules/gcloud.md +++ b/docs/modules/gcloud.md @@ -1,6 +1,4 @@ -# GCloud Module - -Testcontainers module for the Google Cloud Platform's [Cloud SDK](https://cloud.google.com/sdk/). +# GCloud ## Install @@ -8,73 +6,112 @@ Testcontainers module for the Google Cloud Platform's [Cloud SDK](https://cloud. npm install @testcontainers/gcloud --save-dev ``` -The module supports multiple emulators. Use the following classes: +## Examples -Emulator | Class | Container Image --|-|- -Firestore (Native mode) | FirestoreEmulatorContainer | [gcr.io/google.com/cloudsdktool/google-cloud-cli:emulators](https://gcr.io/google.com/cloudsdktool/google-cloud-cli) -Firestore (Datastore mode) | DatastoreEmulatorContainer | [gcr.io/google.com/cloudsdktool/google-cloud-cli:emulators](https://gcr.io/google.com/cloudsdktool/google-cloud-cli) -Cloud PubSub | PubSubEmulatorContainer | [gcr.io/google.com/cloudsdktool/google-cloud-cli:emulators](https://gcr.io/google.com/cloudsdktool/google-cloud-cli) -Cloud Storage | CloudStorageEmulatorContainer | [fsouza/fake-gcs-server:1.52.2](https://hub.docker.com/r/fsouza/fake-gcs-server) -BigQuery | BigQueryEmulatorContainer | [ghcr.io/goccy/bigquery-emulator:0.6.6](https://ghcr.io/goccy/bigquery-emulator) -Cloud Spanner | SpannerEmulatorContainer | [gcr.io/cloud-spanner-emulator/emulator:1.5.37](https://gcr.io/cloud-spanner-emulator/emulator:1.5.37) +### Firestore -## Examples +These examples use the following libraries: -### Firestore (Native mode) +- [@google-cloud/firestore](https://www.npmjs.com/package/@google-cloud/firestore) - -[Starting a Firestore Emulator container with the default image](../../packages/modules/gcloud/src/firestore-emulator-container.test.ts) inside_block:firestore4 - + npm install @google-cloud/firestore + +Choose an image from the [container registry](https://gcr.io/google.com/cloudsdktool/google-cloud-cli) and substitute `IMAGE`. -[Starting a Firestore Emulator container with a custom emulator image](../../packages/modules/gcloud/src/firestore-emulator-container.test.ts) inside_block:firestore5 +[](../../packages/modules/gcloud/src/firestore-emulator-container.test.ts) inside_block:firestoreExample -### Firestore (Datastore mode) +--- + +### Datastore - -[Starting a Datastore Emulator container with the default image](../../packages/modules/gcloud/src/datastore-emulator-container.test.ts) inside_block:datastore4 - +These examples use the following libraries: + +- [@google-cloud/datastore](https://www.npmjs.com/package/@google-cloud/datastore) + npm install @google-cloud/datastore + +Choose an image from the [container registry](https://gcr.io/google.com/cloudsdktool/google-cloud-cli) and substitute `IMAGE`. + -[Starting a Datastore Emulator container with a custom emulator image](../../packages/modules/gcloud/src/datastore-emulator-container.test.ts) inside_block:datastore5 +[](../../packages/modules/gcloud/src/datastore-emulator-container.test.ts) inside_block:datastoreExample + +--- ### Cloud PubSub +These examples use the following libraries: + +- [@google-cloud/pubsub](https://www.npmjs.com/package/@google-cloud/pubsub) + + npm install @google-cloud/pubsub + +Choose an image from the [container registry](https://gcr.io/google.com/cloudsdktool/google-cloud-cli) and substitute `IMAGE`. + -[Starting a Cloud PubSub Emulator container with the default image](../../packages/modules/gcloud/src/pubsub-emulator-container.test.ts) +[](../../packages/modules/gcloud/src/pubsub-emulator-container.test.ts) inside_block:pubsubExample +--- + ### Cloud Storage -The Cloud Storage container uses a fake Cloud Storage server by [Francisco Souza](https://github.com/fsouza). +These examples use the following libraries: - -[Starting a Cloud Storage Emulator container with the default image](../../packages/modules/gcloud/src/cloudstorage-emulator-container.test.ts) inside_block:cloud-storage - +- [@google-cloud/storage](https://www.npmjs.com/package/@google-cloud/storage) -### BigQuery + npm install @google-cloud/storage -The BigQuery emulator is by [Masaaki Goshima](https://github.com/goccy) and uses [go-zetasqlite](https://github.com/goccy/go-zetasqlite). +Choose an image from the [container registry](https://hub.docker.com/r/fsouza/fake-gcs-server) and substitute `IMAGE`. -[Starting a BigQuery Emulator container with the default image](../../packages/modules/gcloud/src/bigquery-emulator-container.test.ts) +[](../../packages/modules/gcloud/src/cloudstorage-emulator-container.test.ts) inside_block:cloudstorageExample +--- + ### Cloud Spanner -The Cloud Spanner emulator container wraps Google's official emulator image. +These examples use the following libraries: + +- [@google-cloud/spanner](https://www.npmjs.com/package/@google-cloud/spanner) + + npm install @google-cloud/spanner + +Choose an image from the [container registry](https://gcr.io/cloud-spanner-emulator/emulator:1.5.37) and substitute `IMAGE`. +#### Connect via client + -[Starting a Spanner Emulator container and exposing endpoints using explicitly configured client](../../packages/modules/gcloud/src/spanner-emulator-container.test.ts) inside_block:startupWithExplicitClient +[](../../packages/modules/gcloud/src/spanner-emulator-container.test.ts) inside_block:startupWithExplicitClient +#### Connect via environment + -[Starting a Spanner Emulator container and exposing endpoints using projectId and SPANNER_EMULATOR_HOST](../../packages/modules/gcloud/src/spanner-emulator-container.test.ts) inside_block:startupWithEnvironmentVariable +[](../../packages/modules/gcloud/src/spanner-emulator-container.test.ts) inside_block:startupWithEnvironmentVariable +#### Helper usage + + +[](../../packages/modules/gcloud/src/spanner-emulator-helper.test.ts) inside_block:createAndDelete + + +--- + +### BigQuery + +These examples use the following libraries: + +- [@google-cloud/bigquery](https://www.npmjs.com/package/@google-cloud/bigquery) + + npm install @google-cloud/bigquery + +Choose an image from the [container registry](https://ghcr.io/goccy/bigquery-emulator) and substitute `IMAGE`. + -[Creating and deleting instance and database via helper](../../packages/modules/gcloud/src/spanner-emulator-helper.test.ts) inside_block:createAndDelete +[](../../packages/modules/gcloud/src/bigquery-emulator-container.test.ts) inside_block:bigqueryExample diff --git a/docs/modules/hivemq.md b/docs/modules/hivemq.md index 62ce6503e..f77a32db1 100644 --- a/docs/modules/hivemq.md +++ b/docs/modules/hivemq.md @@ -1,7 +1,4 @@ -# HiveMQ MQTT Module - -This module allows automatic start up of [HiveMQ's](https://www.hivemq.com/) docker container within -test suites, to enable programmatic testing of JavaScript based MQTT client applications. +# HiveMQ ## Install @@ -9,22 +6,18 @@ test suites, to enable programmatic testing of JavaScript based MQTT client appl npm install @testcontainers/hivemq --save-dev ``` -## Resources - -- [Community forum](https://community.hivemq.com/) -- [HiveMQ website](https://www.hivemq.com/) -- [MQTT Essentials](https://www.hivemq.com/mqtt-essentials/) -- [MQTT 5 Essentials](https://www.hivemq.com/mqtt-5/) +## Examples -Please make sure to check out the hivemq-docs for the [Community Edition](https://github.com/hivemq/hivemq-community-edition/wiki/). +These examples use the following libraries: -!!! Info -We are working to support the HiveMQ Enterprise Edition as outlined in the [Java Test Containers Module](https://java.testcontainers.org/modules/hivemq/). +- [mqtt](https://www.npmjs.com/package/mqtt) -## Examples + npm install mqtt - +Choose an image from the [container registry](https://hub.docker.com/r/hivemq/hivemq-ce) and substitute `IMAGE`. -[Connect with a mqtt.js client to HiveMQ](../../packages/modules/hivemq/src/hivemq-container.test.ts) inside_block:connect +### Produce/consume a message + +[](../../packages/modules/hivemq/src/hivemq-container.test.ts) inside_block:hivemqConnect diff --git a/docs/modules/k3s.md b/docs/modules/k3s.md index 0ed673142..1eeadc8fd 100644 --- a/docs/modules/k3s.md +++ b/docs/modules/k3s.md @@ -1,6 +1,7 @@ -# K3s Module +# K3s -[K3s](https://k3s.io/) is a highly available, certified Kubernetes distribution designed for production workloads in unattended, resource-constrained, remote locations or inside IoT appliances. +!!! warning + This container runs privileged, as it spawns its own containers. For this reason, this container will not work in certain rootless Docker, Docker-in-Docker, or other environments that disallow privileged containers. ## Install @@ -10,17 +11,28 @@ npm install @testcontainers/k3s --save-dev ## Examples +These examples use the following libraries: + +- [@kubernetes/client-node](https://www.npmjs.com/package/@kubernetes/client-node) + + npm install @kubernetes/client-node + +Choose an image from the [container registry](https://hub.docker.com/r/rancher/k3s) and substitute `IMAGE`. + +### List nodes + -[Starting a K3s server:](../../packages/modules/k3s/src/k3s-container.test.ts) inside_block:starting_k3s +[](../../packages/modules/k3s/src/k3s-container.test.ts) inside_block:k3sListNodes + +### Start a pod -[Connecting to the server using the Kubernetes JavaScript client:](../../packages/modules/k3s/src/k3s-container.test.ts) inside_block:connecting_with_client +[](../../packages/modules/k3s/src/k3s-container.test.ts) inside_block:k3sStartPod -## Known limitations +### Aliased kubeconfig -!!! warning - * K3sContainer runs as a privileged container and needs to be able to spawn its own containers. For these reasons, - K3sContainer will not work in certain rootless Docker, Docker-in-Docker, or other environments where privileged - containers are disallowed. + +[](../../packages/modules/k3s/src/k3s-container.test.ts) inside_block:k3sAliasedKubeConfig + diff --git a/docs/modules/kafka.md b/docs/modules/kafka.md index bdadf1f1b..63d417d21 100644 --- a/docs/modules/kafka.md +++ b/docs/modules/kafka.md @@ -1,6 +1,4 @@ -# Kafka Module - -[Kafka](https://kafka.apache.org/) is an open-source distributed event streaming platform used by thousands of companies for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications. +# Kafka ## Install @@ -8,34 +6,66 @@ npm install @testcontainers/kafka --save-dev ``` -## Kafka 8.x +## Examples + +### Kafka 8.x + +These examples use the following libraries: + +- [kafkajs](https://www.npmjs.com/package/kafkajs) -### Examples + npm install kafkajs + +Choose an image from the [container registry](https://hub.docker.com/r/confluentinc/cp-kafka) and substitute `IMAGE`. + +#### Produce/consume a message -[Connect to Kafka:](../../packages/modules/kafka/src/kafka-container-latest.test.ts) inside_block:connectKafkaLatest +[Code](../../packages/modules/kafka/src/kafka-container-latest.test.ts) inside_block:kafkaLatestConnect +[`assertMessageProducedAndConsumed`](../../packages/modules/kafka/src/test-helper.ts) inside_block:kafkaTestHelper +#### With SSL + -[Connect to Kafka using SSL:](../../packages/modules/kafka/src/kafka-container-latest.test.ts) inside_block:ssl +[Code](../../packages/modules/kafka/src/kafka-container-latest.test.ts) inside_block:kafkaLatestSsl +[`assertMessageProducedAndConsumed`](../../packages/modules/kafka/src/test-helper.ts) inside_block:kafkaTestHelper -## Kafka 7.x +--- + +### Kafka 7.x -### Examples +These examples use the following libraries: + +- [kafkajs](https://www.npmjs.com/package/kafkajs) + + npm install kafkajs + +Choose an image from the [container registry](https://hub.docker.com/r/confluentinc/cp-kafka) and substitute `IMAGE`. + +#### Produce/consume a message -[Connect to Kafka using in-built ZooKeeper:](../../packages/modules/kafka/src/kafka-container-7.test.ts) inside_block:connectBuiltInZK +[Code](../../packages/modules/kafka/src/kafka-container-7.test.ts) inside_block:connectBuiltInZK +[`assertMessageProducedAndConsumed`](../../packages/modules/kafka/src/test-helper.ts) inside_block:kafkaTestHelper +#### With SSL + -[Connect to Kafka using your own ZooKeeper:](../../packages/modules/kafka/src/kafka-container-7.test.ts) inside_block:connectProvidedZK +[Code](../../packages/modules/kafka/src/kafka-container-7.test.ts) inside_block:kafkaSsl +[`assertMessageProducedAndConsumed`](../../packages/modules/kafka/src/test-helper.ts) inside_block:kafkaTestHelper +#### With provided ZooKeeper + -[Connect to Kafka using SSL:](../../packages/modules/kafka/src/kafka-container-7.test.ts) inside_block:ssl +[](../../packages/modules/kafka/src/kafka-container-7.test.ts) inside_block:connectProvidedZK +#### With Kraft + -[Connect to Kafka using Kraft:](../../packages/modules/kafka/src/kafka-container-7.test.ts) inside_block:connectKraft +[](../../packages/modules/kafka/src/kafka-container-7.test.ts) inside_block:connectKraft diff --git a/docs/modules/kurrentdb.md b/docs/modules/kurrentdb.md index d016770ef..09c7a1106 100644 --- a/docs/modules/kurrentdb.md +++ b/docs/modules/kurrentdb.md @@ -1,6 +1,4 @@ -# KurrentDB Module - -[KurrentDB](https://kurrent.io) is an event sourcing database that stores data in streams of immutable events. +# KurrentDB ## Install @@ -10,10 +8,22 @@ npm install @testcontainers/kurrentdb --save-dev ## Examples +These examples use the following libraries: + +- [@kurrent/kurrentdb-client](https://www.npmjs.com/package/@kurrent/kurrentdb-client) + + npm install @kurrent/kurrentdb-client + +Choose an image from the [container registry](https://hub.docker.com/r/kurrentplatform/kurrentdb) and substitute `IMAGE`. + +### Execute a query + -[Start container:](../../packages/modules/kurrentdb/src/kurrentdb-container.test.ts) inside_block:startContainer +[](../../packages/modules/kurrentdb/src/kurrentdb-container.test.ts) inside_block:startContainer +### Subscribe to a standard projection + -[Subscribe to standard projection:](../../packages/modules/kurrentdb/src/kurrentdb-container.test.ts) inside_block:usingStandardProjections +[](../../packages/modules/kurrentdb/src/kurrentdb-container.test.ts) inside_block:usingStandardProjections diff --git a/docs/modules/localstack.md b/docs/modules/localstack.md index 09aa6ffb4..634f24960 100644 --- a/docs/modules/localstack.md +++ b/docs/modules/localstack.md @@ -1,6 +1,4 @@ -# Localstack Module - -[Localstack](https://www.localstack.cloud/): Develop and test your AWS applications locally to reduce development time and increase product velocity +# Localstack ## Install @@ -10,6 +8,17 @@ npm install @testcontainers/localstack --save-dev ## Examples +These examples use the following libraries: + +- [@aws-sdk/client-s3](https://www.npmjs.com/package/@aws-sdk/client-s3) + + npm install @aws-sdk/client-s3 + +Choose an image from the [container registry](https://hub.docker.com/r/localstack/localstack) and substitute `IMAGE`. + +### Create a S3 bucket + -[Create a S3 bucket:](../../packages/modules/localstack/src/localstack-container.test.ts) inside_block:createS3Bucket +[](../../packages/modules/localstack/src/localstack-container.test.ts) inside_block:localstackCreateS3Bucket + \ No newline at end of file diff --git a/docs/modules/mariadb.md b/docs/modules/mariadb.md index aefc08af1..68d94e693 100644 --- a/docs/modules/mariadb.md +++ b/docs/modules/mariadb.md @@ -1,8 +1,4 @@ -# MariaDB Module - -[MariaDB](https://mariadb.org/) is one of the most popular open source relational databases. It’s made by the original developers of MySQL and guaranteed to stay open source. It is part of most cloud offerings and the default in most Linux distributions. - - +# MariaDB ## Install @@ -12,18 +8,34 @@ npm install @testcontainers/mariadb --save-dev ## Examples +These examples use the following libraries: + +- [mariadb](https://www.npmjs.com/package/mariadb) + + npm install mariadb + +Choose an image from the [container registry](https://hub.docker.com/_/mariadb) and substitute `IMAGE`. + +### Execute a query + -[Connect and execute query:](../../packages/modules/mariadb/src/mariadb-container.test.ts) inside_block:connect +[](../../packages/modules/mariadb/src/mariadb-container.test.ts) inside_block:mariaDbConnect +### Connect via URI + -[Connect and execute query using URI:](../../packages/modules/mariadb/src/mariadb-container.test.ts) inside_block:uriConnect +[](../../packages/modules/mariadb/src/mariadb-container.test.ts) inside_block:mariaDbUriConnect +### With user + -[Set username:](../../packages/modules/mariadb/src/mariadb-container.test.ts) inside_block:setUsername +[](../../packages/modules/mariadb/src/mariadb-container.test.ts) inside_block:mariaDbSetUsername +### With database + -[Insert & fetch data:](../../packages/modules/mariadb/src/mariadb-container.test.ts) inside_block:insertAndFetchData +[](../../packages/modules/mariadb/src/mariadb-container.test.ts) inside_block:mariaDbSetDatabase diff --git a/docs/modules/minio.md b/docs/modules/minio.md index 69baab5a9..069dfcd47 100644 --- a/docs/modules/minio.md +++ b/docs/modules/minio.md @@ -1,8 +1,4 @@ -# MinIO Module - -[MinIO](https://min.io/) is a high performance object storage solution. It is API compatible with the Amazon S3 cloud storage service and can handle unstructured data such as photos, videos, log files, backups, and container images - - +# MinIO ## Install @@ -12,10 +8,22 @@ npm install @testcontainers/minio --save-dev ## Examples +These examples use the following libraries: + +- [minio](https://www.npmjs.com/package/minio) + + npm install minio + +Choose an image from the [container registry](https://hub.docker.com/r/minio/minio) and substitute `IMAGE`. + +### Upload a file + -[Connect with default credentials:](../../packages/modules/minio/src/minio-container.test.ts) inside_block:connectWithDefaultCredentials +[](../../packages/modules/minio/src/minio-container.test.ts) inside_block:connectWithDefaultCredentials +### With credentials + -[Connect with custom credentials:](../../packages/modules/minio/src/minio-container.test.ts) inside_block:connectWithCustomCredentials +[](../../packages/modules/minio/src/minio-container.test.ts) inside_block:connectWithCustomCredentials diff --git a/docs/modules/mockserver.md b/docs/modules/mockserver.md index 477c070ad..db6df9505 100644 --- a/docs/modules/mockserver.md +++ b/docs/modules/mockserver.md @@ -1,6 +1,4 @@ -# Mockserver Module - -[MockServer](https://www.mock-server.com/#what-is-mockserver) allows you to mock any server or service via HTTP or HTTPS, such as a REST or RPC service. +# Mockserver ## Install @@ -10,12 +8,30 @@ npm install @testcontainers/mockserver --save-dev ## Examples +These examples use the following libraries: + +- [mockserver-client](https://www.npmjs.com/package/mockserver-client) + + npm install mockserver-client + +- [superagent](https://www.npmjs.com/package/superagent) + + npm install superagent + npm install @types/superagent --save-dev + +Choose an image from the [container registry](https://hub.docker.com/r/mockserver/mockserver) and substitute `IMAGE`. + +### Mock HTTP request + -[Start container:](../../packages/modules/mockserver/src/mockserver-container.test.ts) inside_block:startContainer +[](../../packages/modules/mockserver/src/mockserver-container.test.ts) inside_block:httpMockServer -MockServer includes built-in TLS support. To obtain an HTTPS URL, use the `getSecureUrl` method. Keep in mind that MockServer uses a self-signed certificate. +### Mock HTTPS request + +!!! note + MockServer uses a self-signed certificate for HTTPS connections. -[Using TLS:](../../packages/modules/mockserver/src/mockserver-container.test.ts) inside_block:httpsRequests - \ No newline at end of file +[](../../packages/modules/mockserver/src/mockserver-container.test.ts) inside_block:mockServerHttps + diff --git a/docs/modules/mongodb.md b/docs/modules/mongodb.md index 586173048..96571b56b 100644 --- a/docs/modules/mongodb.md +++ b/docs/modules/mongodb.md @@ -1,6 +1,4 @@ -# MongoDB Module - -[MongoDB](https://www.mongodb.com/) is an open source NoSQL database management program. NoSQL is used as an alternative to traditional relational databases. NoSQL databases are quite useful for working with large sets of distributed data. MongoDB is a tool that can manage document-oriented information, store or retrieve information. +# MongoDB ## Install @@ -10,10 +8,22 @@ npm install @testcontainers/mongodb --save-dev ## Examples +These examples use the following libraries: + +- [mongoose](https://www.npmjs.com/package/mongoose) + + npm install mongoose + +Choose an image from the [container registry](https://hub.docker.com/_/mongo) and substitute `IMAGE`. + +### Execute a query + -[Connect:](../../packages/modules/mongodb/src/mongodb-container.test.ts) inside_block:connectMongo +[](../../packages/modules/mongodb/src/mongodb-container.test.ts) inside_block:connectMongo +### With credentials + -[Connect with credentials:](../../packages/modules/mongodb/src/mongodb-container.test.ts) inside_block:connectWithCredentials +[](../../packages/modules/mongodb/src/mongodb-container.test.ts) inside_block:connectWithCredentials diff --git a/docs/modules/mssqlserver.md b/docs/modules/mssqlserver.md index 19cad9992..883ff451c 100644 --- a/docs/modules/mssqlserver.md +++ b/docs/modules/mssqlserver.md @@ -1,6 +1,4 @@ -# MS SQL Server Module - -[Microsoft SQL Server](https://www.microsoft.com/en-us/sql-server) is a relational database management system developed by Microsoft. It provides a platform for efficiently storing, managing, and retrieving structured data. MSSQL offers features for data storage, retrieval, manipulation, and analysis, making it a key component in various applications ranging from small-scale projects to enterprise-level systems. +# MSSQL Server ## Install @@ -10,27 +8,40 @@ npm install @testcontainers/mssqlserver --save-dev ## Examples +These examples use the following libraries: + +- [mssql](https://www.npmjs.com/package/mssql) + + npm install mssql + npm install @types/mssql --save-dev + +Choose an image from the [container registry](https://mcr.microsoft.com/en-us/artifact/mar/mssql/server) and substitute `IMAGE`. + !!! warning "EULA Acceptance" -Due to licencing restrictions you are required to accept an EULA for this container image. To indicate that you accept the MS SQL Server image EULA, call the `acceptLicense()` method. + Due to licencing restrictions you are required to accept an EULA for this container image. To indicate that you accept the MS SQL Server image EULA, call the `acceptLicense()` method. Please see the [`microsoft-mssql-server` image documentation](https://hub.docker.com/_/microsoft-mssql-server#environment-variables) for a link to the EULA document. - -[Connect and execute query:](../../packages/modules/mssqlserver/src/mssqlserver-container.test.ts) inside_block:connect - +### Execute a query -[Connect and execute query using URI:](../../packages/modules/mssqlserver/src/mssqlserver-container.test.ts) inside_block:uriConnect +[](../../packages/modules/mssqlserver/src/mssqlserver-container.test.ts) inside_block:mssqlConnect +### Connect via URI + -[Connect and execute query using a valid custom password:](../../packages/modules/mssqlserver/src/mssqlserver-container.test.ts) inside_block:validPassword +[](../../packages/modules/mssqlserver/src/mssqlserver-container.test.ts) inside_block:mssqlUriConnect +### With password + -[Throw error with an invalid password:](../../packages/modules/mssqlserver/src/mssqlserver-container.test.ts) inside_block:invalidPassword +[](../../packages/modules/mssqlserver/src/mssqlserver-container.test.ts) inside_block:mssqlValidPassword +### With different edition + -[Use a different edition:](../../packages/modules/mssqlserver/src/mssqlserver-container.test.ts) inside_block:expressEdition +[](../../packages/modules/mssqlserver/src/mssqlserver-container.test.ts) inside_block:mssqlExpressEdition diff --git a/docs/modules/mysql.md b/docs/modules/mysql.md index 2a224c80c..bcedb9d69 100644 --- a/docs/modules/mysql.md +++ b/docs/modules/mysql.md @@ -1,6 +1,4 @@ -# MySQL Module - -[MySQL](https://www.mysql.com/) is the world's most popular open source database. With its proven performance, reliability and ease-of-use, MySQL has become the leading database choice for web-based applications, covering the entire range from personal projects and websites, via e-commerce and information services, all the way to high profile web properties including Facebook, Twitter, YouTube, Yahoo! and many more. +# MySQL ## Install @@ -10,18 +8,28 @@ npm install @testcontainers/mysql --save-dev ## Examples - -[Connect and execute query:](../../packages/modules/mysql/src/mysql-container.test.ts) inside_block:connect - +These examples use the following libraries: + +- [mysql2](https://www.npmjs.com/package/mysql2) + + npm install mysql2 + +Choose an image from the [container registry](https://hub.docker.com/_/mysql) and substitute `IMAGE`. + +### Execute a query -[Connect and execute query using URI:](../../packages/modules/mysql/src/mysql-container.test.ts) inside_block:uriConnect +[](../../packages/modules/mysql/src/mysql-container.test.ts) inside_block:mysqlConnect +### Execute a query inside the container + -[Set username:](../../packages/modules/mysql/src/mysql-container.test.ts) inside_block:setUsername +[](../../packages/modules/mysql/src/mysql-container.test.ts) inside_block:mysqlExecuteQuery +### With credentials + -[Execute a query inside the container:](../../packages/modules/mysql/src/mysql-container.test.ts) inside_block:executeQuery +[](../../packages/modules/mysql/src/mysql-container.test.ts) inside_block:mysqlUriConnect diff --git a/docs/modules/nats.md b/docs/modules/nats.md index b79f13299..dd0328484 100644 --- a/docs/modules/nats.md +++ b/docs/modules/nats.md @@ -1,6 +1,4 @@ -# Nats Module - -[NATS](https://nats.io/) is a simple, secure and high performance open source messaging system for cloud native applications, IoT messaging, and microservices architectures. +# Nats ## Install @@ -10,18 +8,32 @@ npm install @testcontainers/nats --save-dev ## Examples - -[Connect:](../../packages/modules/nats/src/nats-container.test.ts) inside_block:connect - +These examples use the following libraries: + +- [@nats-io/transport-node](https://www.npmjs.com/package/@nats-io/transport-node) + + npm install @nats-io/transport-node + +- [@nats-io/jetstream](https://www.npmjs.com/package/@nats-io/jetstream) + + npm install @nats-io/jetstream + +Choose an image from the [container registry](https://hub.docker.com/_/nats) and substitute `IMAGE`. + +### Produce/consume a message -[Publish and subscribe:](../../packages/modules/nats/src/nats-container.test.ts) inside_block:pubsub +[](../../packages/modules/nats/src/nats-container.test.ts) inside_block:natsPubsub +### With credentials + -[Set credentials:](../../packages/modules/nats/src/nats-container.test.ts) inside_block:credentials +[](../../packages/modules/nats/src/nats-container.test.ts) inside_block:natsCredentials +### With Jetstream + -[Enable JetStream:](../../packages/modules/nats/src/nats-container.test.ts) inside_block:jetstream - \ No newline at end of file +[](../../packages/modules/nats/src/nats-container.test.ts) inside_block:natsJetstream + diff --git a/docs/modules/neo4j.md b/docs/modules/neo4j.md index a1776745c..4d08f4bb8 100644 --- a/docs/modules/neo4j.md +++ b/docs/modules/neo4j.md @@ -1,6 +1,4 @@ -# Neo4j Module - -[Neo4j](https://neo4j.com/) is a highly scalable, robust native graph database. +# Neo4j ## Install @@ -10,18 +8,34 @@ npm install @testcontainers/neo4j --save-dev ## Examples +These examples use the following libraries: + +- [neo4j-driver](https://www.npmjs.com/package/neo4j-driver) + + npm install neo4j-driver + +Choose an image from the [container registry](https://hub.docker.com/_/neo4j) and substitute `IMAGE`. + +### Create a node + -[Connect and create a node:](../../packages/modules/neo4j/src/neo4j-container.test.ts) inside_block:createNode +[](../../packages/modules/neo4j/src/neo4j-container.test.ts) inside_block:createNode +### With credentials + -[Set password:](../../packages/modules/neo4j/src/neo4j-container.test.ts) inside_block:setPassword +[](../../packages/modules/neo4j/src/neo4j-container.test.ts) inside_block:setPassword +### With APOC + -[Configure APOC:](../../packages/modules/neo4j/src/neo4j-container.test.ts) inside_block:apoc +[](../../packages/modules/neo4j/src/neo4j-container.test.ts) inside_block:apoc +### With plugins + -[Configure other supported plugins:](../../packages/modules/neo4j/src/neo4j-container.test.ts) inside_block:pluginsList +[](../../packages/modules/neo4j/src/neo4j-container.test.ts) inside_block:pluginsList diff --git a/docs/modules/ollama.md b/docs/modules/ollama.md index 0921b0a75..1dd054ea0 100644 --- a/docs/modules/ollama.md +++ b/docs/modules/ollama.md @@ -1,37 +1,17 @@ # Ollama -Testcontainers module for [Ollama](https://hub.docker.com/r/ollama/ollama) . +## Install -## Ollama usage examples - -You can start an Ollama container instance from any NodeJS application by using: - - -[Ollama container](../../packages/modules/ollama/src/ollama-container.test.ts) inside_block:container - - -### Pulling the model - - -[Pull model](../../packages/modules/ollama/src/ollama-container.test.ts) inside_block:pullModel - - -### Create a new Image +```bash +npm install @testcontainers/ollama --save-dev +``` -In order to create a new image that contains the model, you can use the following code: +## Examples - -[Commit Image](../../packages/modules/ollama/src/ollama-container.test.ts) inside_block:commitToImage - +Choose an image from the [container registry](https://hub.docker.com/r/ollama/ollama) and substitute `IMAGE`. -And use the new image: +### Pull and commit an image -[Use new Image](../../packages/modules/ollama/src/ollama-container.test.ts) inside_block:substitute +[](../../packages/modules/ollama/src/ollama-container.test.ts) inside_block:ollamaPullModel - -## Adding this module to your project - -```bash -npm install @testcontainers/ollama --save-dev -``` \ No newline at end of file diff --git a/docs/modules/opensearch.md b/docs/modules/opensearch.md index 57dd64ac3..ec76de3ca 100644 --- a/docs/modules/opensearch.md +++ b/docs/modules/opensearch.md @@ -1,6 +1,4 @@ -# OpenSearch Module - -[OpenSearch](https://opensearch.org/) is a community-driven, open source search and analytics suite derived from Elasticsearch. It provides a distributed, multitenant-capable full-text search engine with an HTTP web interface and schema-free JSON documents. +# OpenSearch ## Install @@ -10,14 +8,28 @@ npm install @testcontainers/opensearch --save-dev ## Examples +These examples use the following libraries: + +- [@opensearch-project/opensearch](https://www.npmjs.com/package/@opensearch-project/opensearch) + + npm install @opensearch-project/opensearch + +Choose an image from the [container registry](https://hub.docker.com/r/opensearchproject/opensearch) and substitute `IMAGE`. + +### Create an index + -[Create an index:](../../packages/modules/opensearch/src/opensearch-container.test.ts) inside_block:createIndex +[](../../packages/modules/opensearch/src/opensearch-container.test.ts) inside_block:opensearchCreateIndex +### Index a document + -[Index a document:](../../packages/modules/opensearch/src/opensearch-container.test.ts) inside_block:indexDocument +[](../../packages/modules/opensearch/src/opensearch-container.test.ts) inside_block:opensearchIndexDocument +### With password + -[Set a custom password:](../../packages/modules/opensearch/src/opensearch-container.test.ts) inside_block:customPassword +[](../../packages/modules/opensearch/src/opensearch-container.test.ts) inside_block:opensearchCustomPassword diff --git a/docs/modules/postgresql.md b/docs/modules/postgresql.md index 4bcb903fc..2419fa665 100644 --- a/docs/modules/postgresql.md +++ b/docs/modules/postgresql.md @@ -1,6 +1,4 @@ -# PostgreSQL Module - -[PostgreSQL](https://www.postgresql.org/) is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance. +# PostgreSQL ## Install @@ -10,33 +8,46 @@ npm install @testcontainers/postgresql --save-dev ## Examples +These examples use the following libraries: + +- [pg](https://www.npmjs.com/package/pg) + + npm install pg + npm install @types/pg --save-dev + +Choose an image from the [container registry](https://hub.docker.com/_/postgres) and substitute `IMAGE`. + +### Execute a query + -[Connect and execute query:](../../packages/modules/postgresql/src/postgresql-container.test.ts) inside_block:connect +[](../../packages/modules/postgresql/src/postgresql-container.test.ts) inside_block:pgConnect +### Connect via URI + -[Connect and execute query using URI:](../../packages/modules/postgresql/src/postgresql-container.test.ts) inside_block:uriConnect +[](../../packages/modules/postgresql/src/postgresql-container.test.ts) inside_block:pgUriConnect +### With database + -[Set database:](../../packages/modules/postgresql/src/postgresql-container.test.ts) inside_block:setDatabase +[](../../packages/modules/postgresql/src/postgresql-container.test.ts) inside_block:pgSetDatabase +### With username + -[Set username:](../../packages/modules/postgresql/src/postgresql-container.test.ts) inside_block:setUsername +[](../../packages/modules/postgresql/src/postgresql-container.test.ts) inside_block:pgSetUsername -### Using Snapshots +### Snapshots -This example shows the usage of the postgres module's Snapshot feature to give each test a clean database without having -to recreate the database container on every test or run heavy scripts to clean your database. This makes the individual -tests very modular, since they always run on a brand-new database. - -!!!tip - You should never pass the `"postgres"` system database as the container database name if you want to use snapshots. +!!! warning + You should never pass the `"postgres"` system database as the container database name if you want to use snapshots. The Snapshot logic requires dropping the connected database and using the system database to run commands, which will not work if the database for the container is set to `"postgres"`. -[Test with a reusable Postgres container](../../packages/modules/postgresql/src/postgresql-container-snapshot.test.ts) inside_block:createAndRestoreFromSnapshot - \ No newline at end of file +[](../../packages/modules/postgresql/src/postgresql-container-snapshot.test.ts) inside_block:createAndRestoreFromSnapshot + diff --git a/docs/modules/qdrant.md b/docs/modules/qdrant.md index 4e40e47dc..e14cee510 100644 --- a/docs/modules/qdrant.md +++ b/docs/modules/qdrant.md @@ -1,6 +1,4 @@ -# Qdrant Module - -[Qdrant](https://qdrant.tech/) is an open-source, high-performance vector search engine/database. It provides a production-ready service with a convenient API to store, search, and manage points (i.e. vectors) with an additional payload. +# Qdrant ## Install @@ -10,15 +8,29 @@ npm install @testcontainers/qdrant --save-dev ## Examples +These examples use the following libraries: + +- [@qdrant/js-client-rest](https://www.npmjs.com/package/@qdrant/js-client-rest) + + npm install @qdrant/js-client-rest + +Choose an image from the [container registry](https://hub.docker.com/r/qdrant/qdrant) and substitute `IMAGE`. + +### Fetch collections + -[Connect to Qdrant:](../../packages/modules/qdrant/src/qdrant-container.test.ts) +[](../../packages/modules/qdrant/src/qdrant-container.test.ts) inside_block:connectQdrantSimple +### With API key + -[Connect to Qdrant with an API key:](../../packages/modules/qdrant/src/qdrant-container.test.ts) inside_block:connectQdrantWithApiKey +[](../../packages/modules/qdrant/src/qdrant-container.test.ts) inside_block:connectQdrantWithApiKey +### With config file + -[Customize Qdrant instance with a config file:](../../packages/modules/qdrant/src/qdrant-container.test.ts) inside_block:connectQdrantWithConfig +[](../../packages/modules/qdrant/src/qdrant-container.test.ts) inside_block:connectQdrantWithConfig diff --git a/docs/modules/rabbitmq.md b/docs/modules/rabbitmq.md index df2b531d1..b84e618dd 100644 --- a/docs/modules/rabbitmq.md +++ b/docs/modules/rabbitmq.md @@ -1,6 +1,4 @@ -# RabbitMQ Module - -[RabbitMQ](https://www.rabbitmq.com/) is a reliable and mature messaging and streaming broker, which is easy to deploy on cloud environments, on-premises, and on your local machine. It is currently used by millions worldwide. +# RabbitMQ ## Install @@ -10,14 +8,23 @@ npm install @testcontainers/rabbitmq --save-dev ## Examples - -[Connect:](../../packages/modules/rabbitmq/src/rabbitmq-container.test.ts) inside_block:start - +These examples use the following libraries: + +- [amqplib](https://www.npmjs.com/package/amqplib) + + npm install amqplib + npm install @types/amqplib --save-dev + +Choose an image from the [container registry](https://hub.docker.com/_/rabbitmq) and substitute `IMAGE`. + +### Produce/consume a message -[Set credentials:](../../packages/modules/rabbitmq/src/rabbitmq-container.test.ts) inside_block:credentials +[](../../packages/modules/rabbitmq/src/rabbitmq-container.test.ts) inside_block:pubsub +### With credentials + -[Publish and subscribe:](../../packages/modules/rabbitmq/src/rabbitmq-container.test.ts) inside_block:pubsub +[](../../packages/modules/rabbitmq/src/rabbitmq-container.test.ts) inside_block:credentials diff --git a/docs/modules/redis.md b/docs/modules/redis.md index b6468fe5b..bc3205ad7 100644 --- a/docs/modules/redis.md +++ b/docs/modules/redis.md @@ -1,6 +1,4 @@ -# Redis Module - -[Redis](https://redis.io/) The open source, in-memory data store used by millions of developers as a database, cache, streaming engine, and message broker. +# Redis ## Install @@ -10,38 +8,46 @@ npm install @testcontainers/redis --save-dev ## Examples - +These examples use the following libraries: -[Start container:](../../packages/modules/redis/src/redis-container.test.ts) inside_block:startContainer +- [redis](https://www.npmjs.com/package/redis) - + npm install redis - +Choose an image from the [container registry](https://hub.docker.com/_/redis) and substitute `IMAGE`. -[Connect redis client to container:](../../packages/modules/redis/src/redis-container.test.ts) inside_block:simpleConnect - - +### Set/get a value - -[Start container with password authentication:](../../packages/modules/redis/src/redis-container.test.ts) inside_block:startWithCredentials - +[](../../packages/modules/redis/src/redis-container.test.ts) inside_block:redisStartContainer - - -[Define volume for persistent/predefined data:](../../packages/modules/redis/src/redis-container.test.ts) inside_block:persistentData +### With password + +[](../../packages/modules/redis/src/redis-container.test.ts) inside_block:redisStartWithCredentials +### With persistent data + +[](../../packages/modules/redis/src/redis-container.test.ts) inside_block:persistentData + -[Start container with redis/redis-stack-server image:](../../packages/modules/redis/src/redis-container.test.ts) inside_block:startWithRedisStack +### With predefined data + +[](../../packages/modules/redis/src/redis-container.test.ts) inside_block:withPredefinedData +### Redis stack + +[](../../packages/modules/redis/src/redis-container.test.ts) inside_block:startWithRedisStack + -[Execute a command inside the container:](../../packages/modules/redis/src/redis-container.test.ts) inside_block:executeCommand +### Execute a command inside the container + +[](../../packages/modules/redis/src/redis-container.test.ts) inside_block:executeCommand diff --git a/docs/modules/redpanda.md b/docs/modules/redpanda.md index ae452a264..ae0af16a4 100644 --- a/docs/modules/redpanda.md +++ b/docs/modules/redpanda.md @@ -1,32 +1,42 @@ # Redpanda -Testcontainers can be used to automatically instantiate and manage [Redpanda](https://redpanda.com/) containers. -More precisely Testcontainers uses the official Docker images for [Redpanda](https://hub.docker.com/r/redpandadata/redpanda) - -!!! note - This module uses features provided in `docker.redpanda.com/redpandadata/redpanda`. - ## Install - ```bash npm install @testcontainers/redpanda --save-dev ``` -## Example +## Examples + +These examples use the following libraries: + +- [kafkajs](https://www.npmjs.com/package/kafkajs) + + npm install kafkajs + +Choose an image from the [container registry](https://hub.docker.com/r/redpandadata/redpanda) and substitute `IMAGE`. + +### Produce/consume a message -[Connect:](../../packages/modules/redpanda/src/redpanda-container.test.ts) inside_block:connectToKafka +[Code](../../packages/modules/redpanda/src/redpanda-container.test.ts) inside_block:connectToKafka +[`assertMessageProducedAndConsumed`](../../packages/modules/redpanda/src/test-helper.ts) inside_block:redpandaTestHelper +### Connect to schema registry + -[Schema registry:](../../packages/modules/redpanda/src/redpanda-container.test.ts) inside_block:connectToSchemaRegistry +[](../../packages/modules/redpanda/src/redpanda-container.test.ts) inside_block:connectToSchemaRegistry +### Connect to admin + -[Admin APIs:](../../packages/modules/redpanda/src/redpanda-container.test.ts) inside_block:connectToAdmin +[](../../packages/modules/redpanda/src/redpanda-container.test.ts) inside_block:connectToAdmin +### Connect to REST proxy + -[Rest Proxy:](../../packages/modules/redpanda/src/redpanda-container.test.ts) inside_block:connectToRestProxy +[](../../packages/modules/redpanda/src/redpanda-container.test.ts) inside_block:connectToRestProxy diff --git a/docs/modules/scylladb.md b/docs/modules/scylladb.md index 8aa928edd..2a859cf2d 100644 --- a/docs/modules/scylladb.md +++ b/docs/modules/scylladb.md @@ -1,8 +1,4 @@ -# ScyllaDB Module - -[ScyllaDB](https://www.scylladb.com/) is a distributed NoSQL wide-column database for data-intensive apps that require high performance and low latency. It was designed to be compatible with Apache Cassandra while achieving significantly higher throughputs and lower latencies. - - +# ScyllaDB ## Install @@ -12,10 +8,16 @@ npm install @testcontainers/scylladb --save-dev ## Examples - -[Connect:](../../packages/modules/scylladb/src/scylladb-container.test.ts) inside_block:connectWithDefaultCredentials - +These examples use the following libraries: + +- [cassandra-driver](https://www.npmjs.com/package/cassandra-driver) + + npm install cassandra-driver + +Choose an image from the [container registry](https://hub.docker.com/r/scylladb/scylla) and substitute `IMAGE`. + +### Execute a query -[Insert & fetch data:](../../packages/modules/scylladb/src/scylladb-container.test.ts) inside_block:createAndFetchData +[](../../packages/modules/scylladb/src/scylladb-container.test.ts) inside_block:connectWithDefaultCredentials diff --git a/docs/modules/selenium.md b/docs/modules/selenium.md index dce069e45..cb04c1ee7 100644 --- a/docs/modules/selenium.md +++ b/docs/modules/selenium.md @@ -1,8 +1,4 @@ -# Selenium Module - -[Selenium](https://www.selenium.dev/) If you want to create robust, browser-based regression automation suites and tests, scale and -distribute scripts across many environments, then you want to use Selenium WebDriver, a -collection of language specific bindings to drive a browser - the way it is meant to be driven. +# Selenium ## Install @@ -12,69 +8,28 @@ npm install @testcontainers/selenium --save-dev ## Examples -Spin up a Chrome web browser and navigate to a URL: - -```javascript -const { SeleniumContainer } = require("@testcontainers/selenium"); - -const container = await new SeleniumContainer("selenium/standalone-chrome:112.0") - .start(); - -const driver = await new Builder() - .forBrowser(Browser.CHROME) - .usingServer(container.getServerUrl()) - .build(); - -await driver.get("https://testcontainers.com"); -await driver.quit(); -``` - -You can use any Selenium supported web browser by providing the appropriate image and driver configuration, for example: - -```javascript -const container = await new SeleniumContainer("selenium/standalone-edge:112.0") - .start(); +These examples use the following libraries: -const driver = await new Builder() - .forBrowser(Browser.EDGE) - ... - .build(); -``` - -A video recording of the browser session can be enabled and saved to disk once the container has been stopped: - -```javascript -const container = await new SeleniumContainer("selenium/standalone-chrome:112.0") - .withRecording() - .start(); -... - -const stoppedContainer = await container.stop(); -await stoppedContainer.saveRecording("/tmp/videos/recording.mp4"); -``` - -## Troubleshooting +- [selenium-webdriver](https://www.npmjs.com/package/selenium-webdriver) -### ARM architecture + npm install selenium-webdriver + npm install @types/selenium-webdriver --save-dev -Selenium images are not available for ARM architectures. Luckily, there are equivalent, ARM compatible images available via [Seleniarm](https://hub.docker.com/u/seleniarm): +Choose an image from the container registry and substitute `IMAGE`: -``` -seleniarm/standalone-chromium:112.0 -seleniarm/standalone-firefox:112.0 -``` +- [AMD Standalone Chrome](https://hub.docker.com/r/selenium/standalone-chrome) +- [AMD Standalone Firefox](https://hub.docker.com/r/selenium/standalone-firefox) +- [ARM Standalone Chromium](https://hub.docker.com/r/seleniarm/standalone-chromium) +- [ARM Standalone Firefox](https://hub.docker.com/r/seleniarm/standalone-firefox) -```javascript -const { SeleniumContainer } = require("@testcontainers/selenium"); +### Navigate to a page -const container = await new SeleniumContainer("seleniarm/standalone-chromium:112.0") - .start(); + +[](../../packages/modules/selenium/src/selenium-container.test.ts) inside_block:seleniumExample + -const driver = await new Builder() - .forBrowser(Browser.CHROME) - .usingServer(container.getServerUrl()) - .build(); +### Record a video -await driver.get("https://testcontainers.com"); -await driver.quit(); -``` \ No newline at end of file + +[](../../packages/modules/selenium/src/selenium-container.test.ts) inside_block:seleniumVideoExample + diff --git a/docs/modules/toxiproxy.md b/docs/modules/toxiproxy.md index c0658fa94..5259632f7 100644 --- a/docs/modules/toxiproxy.md +++ b/docs/modules/toxiproxy.md @@ -1,68 +1,35 @@ -# Toxiproxy Module - -Testcontainers module for Shopify's [Toxiproxy](https://github.com/Shopify/toxiproxy). -This TCP proxy can be used to simulate network failure conditions. - -You can simulate network failures: - -* between NodeJS code and containers, ideal for testing resilience features of client code -* between containers, for testing resilience and emergent behaviour of multi-container systems -* if desired, between NodeJS code/containers and external resources (non-Dockerized!), for scenarios where not all dependencies can be/have been dockerized - -Testcontainers Toxiproxy support allows resilience features to be easily verified as part of isolated dev/CI testing. This allows earlier testing of resilience features, and broader sets of failure conditions to be covered. +# Toxiproxy ## Install + ```bash npm install @testcontainers/toxiproxy --save-dev ``` - -## Usage example - -A Toxiproxy container can be placed in between test code and a container, or in between containers. -In either scenario, it is necessary to create a `ToxiProxyContainer` instance on the same Docker network. -Next, it is necessary to instruct Toxiproxy to start proxying connections. -Each `ToxiProxyContainer` can proxy to many target containers if necessary. +## Examples -A proxy is created by calling `createProxy` on the `ToxiProxyContainer` instance. +These examples use the following libraries: -The client connecting to the proxied endpoint then needs to use the exposed port from the returned proxy. +- [toxiproxy-node-client](https://www.npmjs.com/package/toxiproxy-node-client) -All of this is done as follows: - -[Creating, starting and using the container:](../../packages/modules/toxiproxy/src/toxiproxy-container.test.ts) inside_block:create_proxy - + npm install toxiproxy-node-client -!!! note - Currently, `ToxiProxyContainer` will reserve 31 ports, starting at 8666. After this, trying to create a new proxy instance will throw an error. +Choose an image from the [container registry](https://github.com/Shopify/toxiproxy/pkgs/container/toxiproxy) and substitute `IMAGE`. - -Having done all of this, it is possible to trigger failure conditions ('Toxics') through the `proxy.instance.addToxic()` object: - -`TPClient` is the internal `toxiproxy-node-client` re-exported in this package. - -* `bandwidth` - Limit a connection to a maximum number of kilobytes per second. -* `latency` - Add a delay to all data going through the proxy. The delay is equal to `latency +/- jitter`. -* `slicer` - Slices TCP data up into small bits, optionally adding a delay between each sliced "packet". -* `slow_close` - Delay the TCP socket from closing until `delay` milliseconds has elapsed. -* `timeout` - Stops all data from getting through, and closes the connection after `timeout`. If `timeout` is `0`, the connection won't close, and data will be delayed until the toxic is removed. -* `limit_data` - Closes connection when transmitted data exceeded limit. -* `reset_peer` - Simulate TCP RESET (Connection reset by peer) on the connections - -Please see the [Toxiproxy documentation](https://github.com/Shopify/toxiproxy#toxics) and the [toxiproxy-node-client](https://github.com/ihsw/toxiproxy-node-client) for full details on the available Toxics. - -As one example, we can introduce latency and random jitter to proxied connections as follows: +### Create a proxy -[Adding latency to a connection](../../packages/modules/toxiproxy/src/toxiproxy-container.test.ts) inside_block:adding_toxic +[](../../packages/modules/toxiproxy/src/toxiproxy-container.test.ts) inside_block:create_proxy -There is also a helper method to enable / disable specific proxy instances (for more fine-grained control instead of using the `reset_peer` toxic). This can also be done by calling the `proxy.instance.update` method, however it is more complicated as you'll need to supply the upstream again and the internal listening port. +### Add a toxic -[Enable and disable the proxy:](../../packages/modules/toxiproxy/src/toxiproxy-container.test.ts) inside_block:enabled_disabled +[](../../packages/modules/toxiproxy/src/toxiproxy-container.test.ts) inside_block:adding_toxic -## Acknowledgements +### Enable/disable the proxy -This module was inspired by the Java implementation, and under the hood uses the [toxiproxy-node-client](https://github.com/ihsw/toxiproxy-node-client). + +[](../../packages/modules/toxiproxy/src/toxiproxy-container.test.ts) inside_block:enabled_disabled + diff --git a/docs/modules/valkey.md b/docs/modules/valkey.md index 5c49c80ee..90d4c3b94 100644 --- a/docs/modules/valkey.md +++ b/docs/modules/valkey.md @@ -1,6 +1,4 @@ -# Valkey Module - -[Valkey](https://valkey.io/) is a distributed, in-memory, key-value store. +# Valkey ## Install @@ -10,32 +8,40 @@ npm install @testcontainers/valkey --save-dev ## Examples - +These examples use the following libraries: -[Start container:](../../packages/modules/valkey/src/valkey-container.test.ts) inside_block:startContainer +- [redis](https://www.npmjs.com/package/redis) - + npm install redis - +Choose an image from the [container registry](https://hub.docker.com/r/valkey/valkey) and substitute `IMAGE`. -[Connect valkey client to container:](../../packages/modules/valkey/src/valkey-container.test.ts) inside_block:simpleConnect - - +### Set/get a value - -[Start container with password authentication:](../../packages/modules/valkey/src/valkey-container.test.ts) inside_block:startWithCredentials - +[](../../packages/modules/valkey/src/valkey-container.test.ts) inside_block:valkeyStartContainer +### With password + +[](../../packages/modules/valkey/src/valkey-container.test.ts) inside_block:valkeyWithCredentials + -[Define volume for persistent/predefined data:](../../packages/modules/valkey/src/valkey-container.test.ts) inside_block:persistentData +### With persistent data + +[](../../packages/modules/valkey/src/valkey-container.test.ts) inside_block:valkeyWithPersistentData +### With predefined data + +[](../../packages/modules/valkey/src/valkey-container.test.ts) inside_block:valkeyWithPredefinedData + -[Execute a command inside the container:](../../packages/modules/valkey/src/valkey-container.test.ts) inside_block:executeCommand +### Execute a command inside the container + +[](../../packages/modules/valkey/src/valkey-container.test.ts) inside_block:valkeyExecuteCommand diff --git a/docs/modules/vault.md b/docs/modules/vault.md index f2b5e412e..ead6d123d 100644 --- a/docs/modules/vault.md +++ b/docs/modules/vault.md @@ -1,6 +1,4 @@ -# Vault Module - -[Vault](https://www.vaultproject.io/) by HashiCorp is a tool for securely accessing secrets such as API keys, passwords, or certificates. This module allows you to run and initialize a Vault container for integration tests. +# Vault ## Install @@ -10,20 +8,22 @@ npm install @testcontainers/vault --save-dev ## Examples - -[Start and perform read/write with node-vault:](../../packages/modules/vault/src/vault-container.test.ts) inside_block:readWrite - +These examples use the following libraries: - -[Run Vault CLI init commands at startup:](../../packages/modules/vault/src/vault-container.test.ts) inside_block:initCommands - +- [node-vault](https://www.npmjs.com/package/node-vault) -## Why use Vault in integration tests? + npm install node-vault -With the growing adoption of Vault in modern infrastructure, testing components that depend on Vault for secret resolution or encryption can be complex. This module allows: +Choose an image from the [container registry](https://hub.docker.com/r/hashicorp/vault) and substitute `IMAGE`. -- Starting a local Vault instance during test runs -- Seeding secrets or enabling engines with Vault CLI -- Validating app behavior with secured data access +### Write/read a value -Use this module to test Vault-backed workflows without the need for pre-provisioned Vault infrastructure. + +[](../../packages/modules/vault/src/vault-container.test.ts) inside_block:readWrite + + +### Run CLI init commands at startup + + +[](../../packages/modules/vault/src/vault-container.test.ts) inside_block:initCommands + diff --git a/docs/modules/weaviate.md b/docs/modules/weaviate.md index b713e89a4..b12540b57 100644 --- a/docs/modules/weaviate.md +++ b/docs/modules/weaviate.md @@ -1,7 +1,4 @@ -# Weaviate Module - -[Weaviate](https://weaviate.io) is an open source, AI-native vector database that helps -developers create intuitive and reliable AI-powered applications. +# Weaviate ## Install @@ -11,12 +8,22 @@ npm install @testcontainers/weaviate --save-dev ## Examples +These examples use the following libraries: + +- [weaviate-ts-client](https://www.npmjs.com/package/weaviate-ts-client) + + npm install weaviate-ts-client + +Choose an image from the [container registry](https://hub.docker.com/r/semitechnologies/weaviate) and substitute `IMAGE`. + +### Connect + -[Connect to Weaviate:](../../packages/modules/weaviate/src/weaviate-container.test.ts) -inside_block:connectWeaviateWithClient +[](../../packages/modules/weaviate/src/weaviate-container.test.ts) inside_block:connectWeaviateWithClient +### With modules + -[Connect to Weaviate with modules defined:](../../packages/modules/weaviate/src/weaviate-container.test.ts) -inside_block:connectWeaviateWithModules +[](../../packages/modules/weaviate/src/weaviate-container.test.ts) inside_block:connectWeaviateWithModules diff --git a/docs/quickstart/global-setup.md b/docs/quickstart/global-setup.md index 8babe9556..50152381d 100644 --- a/docs/quickstart/global-setup.md +++ b/docs/quickstart/global-setup.md @@ -1,25 +1,31 @@ # Global setup -If you have a lot of tests that require the same container, you might not want to spin up one per test. +If you have many tests that require the same container, you may not want to spin up one per test. -In this case a common pattern is to set the container up globally, and reuse it in your tests. Here's an example using Vitest: +!!! info + There is a misconception that containers are heavyweight. -```ts -// setup.js + Sure, if your container has a slow startup time (e.g., a database, which on startup runs large migration scripts), it may be better to just start and manage one instance. But keep in mind that this limits your tests to run sequentially, and you may need to manage the state of the container between tests. + + In many cases it is far easier to start a new container for each test and run them in parallel. Of course, this depends on your specific use case. + +Many popular test frameworks like Jest and Vitest support global setup and teardown scripts. + +--- + +Here's an example which sets up a single Redis container globally, so it can be reused across tests. In this case we're using Vitest: -import { createClient, RedisClientType } from "redis"; -import { GenericContainer, StartedTestContainer } from "testcontainers"; +```ts title="setup.js" +import { createClient } from "redis"; +import { RedisContainer } from "testcontainers"; export async function setup() { - globalThis.redisContainer = await new GenericContainer("redis") - .withExposedPorts(6379) - .start(); + const container = await new RedisContainer("redis:8").start(); + const client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); - globalThis.redisClient = createClient({ - url: `redis://${redisContainer.getHost()}:${redisContainer.getMappedPort(6379)}` - }); - - await globalThis.redisClient.connect(); + globalThis.redisContainer = container; + globalThis.redisClient = client; } export async function teardown() { @@ -28,9 +34,7 @@ export async function teardown() { } ``` -```ts -// vite.config.js - +```ts title="vite.config.js" import { defineConfig } from "vite"; export default defineConfig({ @@ -40,13 +44,9 @@ export default defineConfig({ }); ``` -And to reference the container/client in your tests: +And to use the container/client in your tests: ```ts -it("should set and retrieve a value from Redis", async () => { - await globalThis.redisClient.set("key", "test-value"); - const result = await globalThis.redisClient.get("key"); - - expect(result).toBe("test-value"); -}); +await globalThis.redisClient.set("key", "test-value"); +const result = await globalThis.redisClient.get("key"); ``` diff --git a/docs/quickstart/install.md b/docs/quickstart/install.md index 9989edc6e..bb08b5e55 100644 --- a/docs/quickstart/install.md +++ b/docs/quickstart/install.md @@ -1,7 +1,5 @@ # Install -Install the Testcontainers dependency. - ## NPM ```bash diff --git a/docs/quickstart/logging.md b/docs/quickstart/logging.md index d1951a144..3322971ed 100644 --- a/docs/quickstart/logging.md +++ b/docs/quickstart/logging.md @@ -1,135 +1,24 @@ # Logging -It would be nice to see what Testcontainers is doing while the test is running. You can enable all logs by setting the `DEBUG` environment variable. For example: +Testcontainers writes logs using the [debug](https://www.npmjs.com/package/debug) library. This allows you to enable or disable logs at runtime, and to filter logs by namespace. -```bash -DEBUG=testcontainers* npm test -``` +The following namespaces are available: -If we run the test again, we'll see a lot of debug output: +- `testcontainers*`: Show all logs +- `testcontainers`: Show core logs +- `testcontainers:containers`: Show logs from containers +- `testcontainers:compose`: Show logs from Docker Compose +- `testcontainers:build`: Show build logs +- `testcontainers:pull`: Show image pull logs +- `testcontainers:exec`: Show container execution logs -``` -[DEBUG] Checking container runtime strategy "UnixSocketStrategy"... -[TRACE] Fetching Docker info... -[TRACE] Fetching remote container runtime socket path... -[TRACE] Resolving host... -[TRACE] Fetching Compose info... -[TRACE] Looking up host IPs... -[TRACE] Initialising clients... -[TRACE] Container runtime info: -{ - "node": { - "version": "v22.14.0", - "architecture": "x64", - "platform": "linux" - }, - "containerRuntime": { - "host": "localhost", - "hostIps": [ - { - "address": "127.0.0.1", - "family": 4 - } - ], - "remoteSocketPath": "/var/run/docker.sock", - "indexServerAddress": "https://index.docker.io/v1/", - "serverVersion": "28.0.1", - "operatingSystem": "Docker Desktop", - "operatingSystemType": "linux", - "architecture": "x86_64", - "cpus": 32, - "memory": 33524871168, - "runtimes": [ - "io.containerd.runc.v2", - "nvidia", - "runc" - ], - "labels": [ - "com.docker.desktop.address=unix:///var/run/docker-cli.sock" - ] - }, - "compose": { - "version": "2.33.1-desktop.1", - "compatability": "v2" - } -} -[DEBUG] Container runtime strategy "UnixSocketStrategy" works -[DEBUG] Checking if image exists "redis:latest"... -[DEBUG] Checked if image exists "redis:latest" -[DEBUG] Pulling image "redis:latest"... -[DEBUG] Executing Docker credential provider "docker-credential-desktop.exe" -[DEBUG] Auth config found for registry "https://index.docker.io/v1/": CredsStore -[redis:latest] {"status":"Pulling from library/redis","id":"latest"} -[redis:latest] {"status":"Pulling fs layer","progressDetail":{},"id":"6e909acdb790"} -... -[redis:latest] {"status":"Status: Downloaded newer image for redis:latest"} -[DEBUG] Pulled image "redis:latest" -[DEBUG] Acquiring lock file "/tmp/testcontainers-node.lock"... -[DEBUG] Acquired lock file "/tmp/testcontainers-node.lock" -[DEBUG] Listing containers... -[DEBUG] Listed containers -[DEBUG] Creating new Reaper for session "4c81d4efc176" with socket path "/var/run/docker.sock"... -[DEBUG] Checking if image exists "testcontainers/ryuk:0.11.0"... -[DEBUG] Checked if image exists "testcontainers/ryuk:0.11.0" -[DEBUG] Image "testcontainers/ryuk:0.11.0" already exists -[DEBUG] Creating container for image "testcontainers/ryuk:0.11.0"... -[DEBUG] [11a9d12ea231] Created container for image "testcontainers/ryuk:0.11.0" -[INFO] [11a9d12ea231] Starting container for image "testcontainers/ryuk:0.11.0"... -[DEBUG] [11a9d12ea231] Starting container... -[DEBUG] [11a9d12ea231] Started container -[INFO] [11a9d12ea231] Started container for image "testcontainers/ryuk:0.11.0" -[DEBUG] [11a9d12ea231] Fetching container logs... -[DEBUG] [11a9d12ea231] Demuxing stream... -[DEBUG] [11a9d12ea231] Demuxed stream -[DEBUG] [11a9d12ea231] Fetched container logs -[DEBUG] [11a9d12ea231] Waiting for container to be ready... -[DEBUG] [11a9d12ea231] Waiting for log message "/.*Started.*/"... -[DEBUG] [11a9d12ea231] Fetching container logs... -[11a9d12ea231] time=2025-03-24T12:10:17.130Z level=INFO msg=starting connection_timeout=1m0s reconnection_timeout=10s request_timeout=10s shutdown_timeout=10m0s remove_retries=10 retry_offset=-1s changes_retry_interval=1s port=8080 verbose=false -[11a9d12ea231] time=2025-03-24T12:10:17.130Z level=INFO msg=Started address=[::]:8080 -[11a9d12ea231] time=2025-03-24T12:10:17.130Z level=INFO msg="client processing started" -[DEBUG] [11a9d12ea231] Demuxing stream... -[DEBUG] [11a9d12ea231] Demuxed stream -[DEBUG] [11a9d12ea231] Fetched container logs -[DEBUG] [11a9d12ea231] Log wait strategy complete -[INFO] [11a9d12ea231] Container is ready -[DEBUG] [11a9d12ea231] Connecting to Reaper (attempt 1) on "localhost:32774"... -[DEBUG] [11a9d12ea231] Connected to Reaper -[DEBUG] Releasing lock file "/tmp/testcontainers-node.lock"... -[DEBUG] Released lock file "/tmp/testcontainers-node.lock" -[DEBUG] Creating container for image "redis:latest"... -[11a9d12ea231] time=2025-03-24T12:10:17.145Z level=INFO msg="client connected" address=172.17.0.1:40446 clients=1 -[11a9d12ea231] time=2025-03-24T12:10:17.145Z level=INFO msg="adding filter" type=label values="[org.testcontainers.session-id=4c81d4efc176]" -[936d82e9964e] Created container for image "redis:latest" -[936d82e9964e] Starting container for image "redis:latest"... -[936d82e9964e] Starting container... -[936d82e9964e] Started container -[936d82e9964e] Started container for image "redis:latest" -[936d82e9964e] Fetching container logs... -[936d82e9964e] Demuxing stream... -[936d82e9964e] Demuxed stream -[936d82e9964e] Fetched container logs -[936d82e9964e] Waiting for container to be ready... -[936d82e9964e] Waiting for host port 32775... -[936d82e9964e] Waiting for internal port 6379... -[936d82e9964e] 1:C 24 Mar 2025 12:10:17.419 * oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo -[936d82e9964e] 1:C 24 Mar 2025 12:10:17.419 * Redis version=7.4.2, bits=64, commit=00000000, modified=0, pid=1, just started -[936d82e9964e] 1:C 24 Mar 2025 12:10:17.419 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf -[936d82e9964e] 1:M 24 Mar 2025 12:10:17.419 * monotonic clock: POSIX clock_gettime -[936d82e9964e] 1:M 24 Mar 2025 12:10:17.419 * Running mode=standalone, port=6379. -[936d82e9964e] 1:M 24 Mar 2025 12:10:17.420 * Server initialized -[936d82e9964e] 1:M 24 Mar 2025 12:10:17.420 * Ready to accept connections tcp -[DEBUG] [936d82e9964e] Host port 32775 ready -[DEBUG] [936d82e9964e] Host port wait strategy complete -[DEBUG] [936d82e9964e] Internal port 6379 ready -[INFO] [936d82e9964e] Container is ready -[INFO] [936d82e9964e] Stopping container... -[DEBUG] [936d82e9964e] Stopping container... -[936d82e9964e] 1:signal-handler (1742818217) Received SIGTERM scheduling shutdown... -[DEBUG] [936d82e9964e] Stopped container -[DEBUG] [936d82e9964e] Removing container... -[DEBUG] [936d82e9964e] Removed container -[INFO] [936d82e9964e] Stopped container -``` +!!! note + You can enable multiple loggers: `DEBUG=testcontainers,testcontainers:exec.` -These logs are useful for debugging when a container isn't working as expected. You can see there are logs from the Testcontainers library, as well as logs emitted from all Testcontainers-managed containers. +--- + +You could for example run your tests with all Testcontainers logs enabled like this: + +```bash +DEBUG=testcontainers* npm test +``` diff --git a/docs/quickstart/usage.md b/docs/quickstart/usage.md index 0610b8955..80607c639 100644 --- a/docs/quickstart/usage.md +++ b/docs/quickstart/usage.md @@ -1,16 +1,21 @@ # Usage -As an example, let's spin up and test a Redis container. +**As an example, let's spin up and test a Redis container.** -First, let's install the dependencies: +--- + +First, install the dependencies: ```bash +npm install testcontainers --save-dev npm install redis ``` -Using your favorite testing library, let's now create a test: +--- + +Next, we'll write a test that starts a Redis container, connects to it, and performs an operation: -```ts +```ts hl_lines="2 5 9-11 14 22" import { createClient, RedisClientType } from "redis"; import { GenericContainer, StartedTestContainer } from "testcontainers"; @@ -19,7 +24,7 @@ describe("Redis", () => { let redisClient: RedisClientType; beforeAll(async () => { - container = await new GenericContainer("redis") + container = await new GenericContainer("redis:8") .withExposedPorts(6379) .start(); @@ -44,4 +49,47 @@ describe("Redis", () => { Run the test, and after a few seconds, it passes! -Why did it take a few seconds? Because your container runtime likely had to pull the image first. If you run the test again, it'll run faster. +!!! note + Why did it take a few seconds? + + Because your container runtime first had to pull the image. If you run the test again, it'll run faster. + +--- + +The complexity of configuring a container varies. + +For Redis, it's pretty simple, we just expose a port. But for example, to define a `GenericContainer` for PostgreSQL, you'd need to configure multiple ports, environment variables for credentials, custom wait strategies, and more. For this reason there exists a catalogue of [pre-defined modules](https://testcontainers.com/modules/), which abstract away this complexity. + +If a module exists for the container you want to use, it's highly recommended to use it. + +For example, using the [Redis module](../modules/redis.md), the example above can be simplified: + +```bash +npm install @testcontainers/redis --save-dev +``` + +```ts hl_lines="2 5 9-10" +import { createClient, RedisClientType } from "redis"; +import { RedisContainer, StartedRedisContainer } from "@testcontainers/redis"; + +describe("Redis", () => { + let container: StartedRedisContainer; + let redisClient: RedisClientType; + + beforeAll(async () => { + container = await new StartedRedisContainer("redis:8").start(); + redisClient = createClient({ url: container.getConnectionUrl() }); + await redisClient.connect(); + }); + + afterAll(async () => { + await redisClient.disconnect(); + await container.stop(); + }); + + it("works", async () => { + await redisClient.set("key", "val"); + expect(await redisClient.get("key")).toBe("val"); + }); +}); +``` diff --git a/docs/supported-container-runtimes.md b/docs/supported-container-runtimes.md index 7305dd28b..d87afea35 100644 --- a/docs/supported-container-runtimes.md +++ b/docs/supported-container-runtimes.md @@ -8,38 +8,38 @@ Works out of the box. ### Usage -#### MacOS: +#### MacOS ```bash {% raw %} -export DOCKER_HOST=unix://$(podman machine inspect --format '{{.ConnectionInfo.PodmanSocket.Path}}') +export DOCKER_HOST=unix://$( + podman machine inspect --format '{{.ConnectionInfo.PodmanSocket.Path}}' +) export TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE=/var/run/docker.sock {% endraw %} ``` -#### Linux: +#### Linux -1. Ensure the Podman socket is exposed: +Ensure the Podman socket is exposed, choose between rootless or rootful: - Rootless: - - ```bash - systemctl --user status podman.socket - ``` - - Rootful: - - ```bash - sudo systemctl enable --now podman.socket - ``` +```bash title="Rootless" +systemctl --user status podman.socket +``` + +```bash title="Rootful" +sudo systemctl enable --now podman.socket +``` -2. Export the `DOCKER_HOST`: +Export the `DOCKER_HOST`: - ```bash - {% raw %} - export DOCKER_HOST="unix://$(podman info --format '{{.Host.RemoteSocket.Path}}')" - {% endraw %} - ``` +```bash +{% raw %} +export DOCKER_HOST=unix://$( + podman info --format '{{.Host.RemoteSocket.Path}}' +) +{% endraw %} +``` ### Known issues @@ -51,7 +51,7 @@ When running rootless, the resource reaper will not work, disable it: export TESTCONTAINERS_RYUK_DISABLED=true ``` -When running rootful, the resource reaper can be made to work by telling it to run privileged: +When running rootful, the resource reaper can be made to work by running it privileged: ```bash export TESTCONTAINERS_RYUK_PRIVILEGED=true @@ -87,11 +87,14 @@ The way Colima works is it periodically checks for exposed ports, and then forwa You can use a composite wait strategy to additionally wait for a port to be bound, on top of an existing wait strategy. For example: -```javascript +```js const { GenericContainer, Wait } = require("testcontainers"); const container = await new GenericContainer("redis") - .withWaitStrategy(Wait.forAll([Wait.forListeningPorts(), Wait.forLogMessage("Ready to accept connections")])) + .withWaitStrategy(Wait.forAll([ + Wait.forListeningPorts(), + Wait.forLogMessage("Ready to accept connections") + ])) .start(); ``` diff --git a/mkdocs.yml b/mkdocs.yml index f83c3a2b0..cbd1158b9 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -14,6 +14,8 @@ theme: code: Roboto Mono logo: "site/logo.svg" favicon: "site/favicon.ico" + features: + - content.code.copy extra_css: - "site/css/extra.css" @@ -28,8 +30,10 @@ markdown_extensions: - admonition - pymdownx.details - pymdownx.superfences + - pymdownx.highlight: + linenums: true - pymdownx.tabbed: - alternate_style: false + alternate_style: true - toc: permalink: true @@ -54,9 +58,9 @@ nav: - Cassandra: modules/cassandra.md - ChromaDB: modules/chromadb.md - ClickHouse: modules/clickhouse.md + - CockroachDB: modules/cockroachdb.md - CosmosDB: modules/cosmosdb.md - Couchbase: modules/couchbase.md - - CockroachDB: modules/cockroachdb.md - Elasticsearch: modules/elasticsearch.md - Etcd: modules/etcd.md - GCloud: modules/gcloud.md @@ -66,8 +70,8 @@ nav: - KurrentDB: modules/kurrentdb.md - Localstack: modules/localstack.md - MariaDB: modules/mariadb.md - - Mockserver: modules/mockserver.md - MinIO: modules/minio.md + - Mockserver: modules/mockserver.md - MongoDB: modules/mongodb.md - MSSQLServer: modules/mssqlserver.md - MySQL: modules/mysql.md diff --git a/packages/modules/arangodb/src/arangodb-container.test.ts b/packages/modules/arangodb/src/arangodb-container.test.ts index 437c3e37c..6fb7721bf 100755 --- a/packages/modules/arangodb/src/arangodb-container.test.ts +++ b/packages/modules/arangodb/src/arangodb-container.test.ts @@ -4,12 +4,12 @@ import { ArangoDBContainer } from "./arangodb-container"; const IMAGE = getImage(__dirname); -describe("ArangoDB", { timeout: 180_000 }, () => { - // connect { +describe("ArangoDBContainer", { timeout: 180_000 }, () => { it("should connect and return a query result", async () => { + // example { await using container = await new ArangoDBContainer(IMAGE).start(); - const db = new Database({ url: container.getHttpUrl() }); + const db = new Database({ url: container.getHttpUrl() }); db.database("_system"); db.useBasicAuth(container.getUsername(), container.getPassword()); @@ -19,7 +19,8 @@ describe("ArangoDB", { timeout: 180_000 }, () => { bindVars: { value }, }); const returnValue = await result.next(); + expect(returnValue).toBe(value); + // } }); - // } }); diff --git a/packages/modules/azurecosmosdb/src/azure-cosmosdb-emulator-container.test.ts b/packages/modules/azurecosmosdb/src/azure-cosmosdb-emulator-container.test.ts index 2ee18215d..93ca446af 100644 --- a/packages/modules/azurecosmosdb/src/azure-cosmosdb-emulator-container.test.ts +++ b/packages/modules/azurecosmosdb/src/azure-cosmosdb-emulator-container.test.ts @@ -37,16 +37,18 @@ describe("AzureCosmosDbEmulatorContainer", { timeout: 180_000 }, async () => { }); // } - // httpsCreateDB { it("should be able to create a database using https", async () => { + // httpsCreateDB { await using container = await new AzureCosmosDbEmulatorContainer(IMAGE).withProtocol("https").start(); + const cosmosClient = new CosmosClient({ endpoint: container.getEndpoint(), key: container.getKey(), agent: new https.Agent({ - rejectUnauthorized: false, //allows insecure TLS; import * as https from "node:https"; + rejectUnauthorized: false, }), }); + // } const dbName = "testdb"; const createResponse = await cosmosClient.databases.createIfNotExists({ @@ -57,23 +59,22 @@ describe("AzureCosmosDbEmulatorContainer", { timeout: 180_000 }, async () => { const db = await cosmosClient.database(dbName).read(); expect(db.database.id).toBe(dbName); }); - // } - // createAndRead { it("should be able to create a container and store and retrieve items", async () => { + // createAndRead { + const dbName = "testdb"; + const containerName = "testcontainer"; + await using container = await new AzureCosmosDbEmulatorContainer(IMAGE).withProtocol("http").start(); + const cosmosClient = new CosmosClient({ endpoint: container.getEndpoint(), key: container.getKey(), }); - const dbName = "testdb"; - await cosmosClient.databases.createIfNotExists({ - id: dbName, - }); - const dbClient = cosmosClient.database(dbName); + await cosmosClient.databases.createIfNotExists({ id: dbName }); - const containerName = "testcontainer"; + const dbClient = cosmosClient.database(dbName); await dbClient.containers.createIfNotExists({ id: containerName, partitionKey: { @@ -83,12 +84,10 @@ describe("AzureCosmosDbEmulatorContainer", { timeout: 180_000 }, async () => { }); const containerClient = dbClient.container(containerName); - const createResponse = await containerClient.items.create({ - foo: "bar", - }); + const createResponse = await containerClient.items.create({ foo: "bar" }); const readItem = await containerClient.item(createResponse.item.id, "bar").read(); expect(readItem.resource.foo).toEqual("bar"); + // } }); - // } }); diff --git a/packages/modules/azurite/src/azurite-container.test.ts b/packages/modules/azurite/src/azurite-container.test.ts index e81b7669e..4e53e3f89 100644 --- a/packages/modules/azurite/src/azurite-container.test.ts +++ b/packages/modules/azurite/src/azurite-container.test.ts @@ -6,66 +6,49 @@ import { AzuriteContainer } from "./azurite-container"; const IMAGE = getImage(__dirname); -describe("Azurite", { timeout: 240_000 }, () => { - // uploadAndDownloadBlob { +describe("AzuriteContainer", { timeout: 240_000 }, () => { it("should upload and download blob with default credentials", async () => { + // uploadAndDownloadBlob { await using container = await new AzuriteContainer(IMAGE).start(); const connectionString = container.getConnectionString(); - expect(connectionString).toBeTruthy(); - const serviceClient = BlobServiceClient.fromConnectionString(connectionString); const containerClient = serviceClient.getContainerClient("test"); await containerClient.createIfNotExists(); + const blobName = "hello.txt"; const content = "Hello world!"; await containerClient.uploadBlockBlob(blobName, content, Buffer.byteLength(content)); const blobClient = containerClient.getBlockBlobClient(blobName); - const downloadResponse = await blobClient.download(0, undefined); - - const readable = downloadResponse.readableStreamBody as NodeJS.ReadableStream; - expect(readable).toBeTruthy(); - - readable.setEncoding("utf8"); - let data = ""; - for await (const chunk of readable) { - data += chunk; - } - - expect(data).toBe(content); + const downloadBuffer = await blobClient.downloadToBuffer(); + expect(downloadBuffer.toString()).toBe(content); + // } }); - // } - // sendAndReceiveQueue { it("should add to queue with default credentials", async () => { + // sendAndReceiveQueue { await using container = await new AzuriteContainer(IMAGE).start(); const connectionString = container.getConnectionString(); - expect(connectionString).toBeTruthy(); - const serviceClient = QueueServiceClient.fromConnectionString(connectionString); const queueName = "test-queue"; await serviceClient.createQueue(queueName); + const messageText = "Hello world!"; const queueClient = serviceClient.getQueueClient(queueName); - - const message = "Hello world!"; - await queueClient.sendMessage(message); + await queueClient.sendMessage(messageText); const messages = await queueClient.receiveMessages(); - expect(messages.receivedMessageItems).toHaveLength(1); - expect(messages.receivedMessageItems[0].messageText).toBe(message); + expect(messages.receivedMessageItems).toMatchObject([{ messageText }]); + // } }); - // } - // createAndInsertOnTable { it("should add to table with default credentials", async () => { + // createAndInsertOnTable { await using container = await new AzuriteContainer(IMAGE).start(); const connectionString = container.getConnectionString(); - expect(connectionString).toBeTruthy(); - const tableName = "person"; const tableClient = TableClient.fromConnectionString(connectionString, tableName, { allowInsecureConnection: true, @@ -79,16 +62,14 @@ describe("Azurite", { timeout: 240_000 }, () => { }; await tableClient.createEntity(entity); - const e1 = await tableClient.listEntities().next(); - expect(e1.value).toBeTruthy(); - expect(e1.value.name).toBe(entity.name); + const nextEntity = await tableClient.listEntities().next(); + expect(nextEntity.value).toEqual(expect.objectContaining(entity)); + // } }); - // } - // customCredentials { it("should be able to specify accountName and accountKey", async () => { + // customCredentials { const accountName = "test-account"; - // Account key must be base64 encoded const accountKey = Buffer.from("test-key").toString("base64"); await using container = await new AzuriteContainer(IMAGE) @@ -98,6 +79,7 @@ describe("Azurite", { timeout: 240_000 }, () => { const credentials = new StorageSharedKeyCredential(accountName, accountKey); const serviceClient = new BlobServiceClient(container.getBlobEndpoint(), credentials); + // } const blobContainerName = "test"; const containerClient = serviceClient.getContainerClient(blobContainerName); @@ -107,13 +89,13 @@ describe("Azurite", { timeout: 240_000 }, () => { expect(blobContainer.value).toBeTruthy(); expect(blobContainer.value.name).toBe(blobContainerName); }); - // } - // customPorts { it("should be able to specify custom ports", async () => { + // customPorts { const blobPort = 13000; const queuePort = 14000; const tablePort = 15000; + await using container = await new AzuriteContainer(IMAGE) .withBlobPort({ container: 10001, host: blobPort }) .withQueuePort({ container: 10002, host: queuePort }) @@ -123,6 +105,7 @@ describe("Azurite", { timeout: 240_000 }, () => { expect(container.getBlobPort()).toBe(blobPort); expect(container.getQueuePort()).toBe(queuePort); expect(container.getTablePort()).toBe(tablePort); + // } const connectionString = container.getConnectionString(); expect(connectionString).toContain("13000"); @@ -133,17 +116,15 @@ describe("Azurite", { timeout: 240_000 }, () => { const containerClient = serviceClient.getContainerClient("test"); await containerClient.createIfNotExists(); }); - // } - // inMemoryPersistence { it("should be able to use in-memory persistence", async () => { + // inMemoryPersistence { await using container = await new AzuriteContainer(IMAGE).withInMemoryPersistence().start(); + const blobName = "hello.txt"; { const connectionString = container.getConnectionString(); - expect(connectionString).toBeTruthy(); - const serviceClient = BlobServiceClient.fromConnectionString(connectionString); const containerClient = serviceClient.getContainerClient("test"); await containerClient.createIfNotExists(); @@ -158,14 +139,12 @@ describe("Azurite", { timeout: 240_000 }, () => { { const connectionString = container.getConnectionString(); - expect(connectionString).toBeTruthy(); - const serviceClient = BlobServiceClient.fromConnectionString(connectionString); const containerClient = serviceClient.getContainerClient("test"); const blobClient = containerClient.getBlockBlobClient(blobName); const blobExistsAfterRestart = await blobClient.exists(); expect(blobExistsAfterRestart).toBeFalsy(); } + // } }); - // } }); diff --git a/packages/modules/cassandra/src/cassandra-container.test.ts b/packages/modules/cassandra/src/cassandra-container.test.ts index 019406ecb..c0bf4ad26 100644 --- a/packages/modules/cassandra/src/cassandra-container.test.ts +++ b/packages/modules/cassandra/src/cassandra-container.test.ts @@ -5,9 +5,9 @@ import { CassandraContainer } from "./cassandra-container"; const IMAGE = getImage(__dirname); -describe.sequential("Cassandra", { timeout: 240_000 }, () => { - // connectWithDefaultCredentials { +describe.sequential("CassandraContainer", { timeout: 240_000 }, () => { it("should connect and execute a query with default credentials", async () => { + // connectWithDefaultCredentials { await using container = await new CassandraContainer(IMAGE).start(); const client = new Client({ @@ -15,18 +15,17 @@ describe.sequential("Cassandra", { timeout: 240_000 }, () => { localDataCenter: container.getDatacenter(), keyspace: "system", }); - await client.connect(); const result = await client.execute("SELECT release_version FROM system.local"); expect(result.rows[0].release_version).toBe(ImageName.fromString(IMAGE).tag); await client.shutdown(); + // } }); - // } - // connectWithCustomCredentials { it("should connect with custom username and password", async () => { + // connectWithCustomCredentials { const username = "testUser"; const password = "testPassword"; @@ -38,6 +37,7 @@ describe.sequential("Cassandra", { timeout: 240_000 }, () => { credentials: { username, password }, keyspace: "system", }); + // } await client.connect(); @@ -46,12 +46,12 @@ describe.sequential("Cassandra", { timeout: 240_000 }, () => { await client.shutdown(); }); - // } - // customDataCenterAndRack { it("should set datacenter and rack", async () => { + // customDataCenterAndRack { const customDataCenter = "customDC"; const customRack = "customRack"; + await using container = await new CassandraContainer(IMAGE) .withDatacenter(customDataCenter) .withRack(customRack) @@ -61,15 +61,15 @@ describe.sequential("Cassandra", { timeout: 240_000 }, () => { contactPoints: [container.getContactPoint()], localDataCenter: container.getDatacenter(), }); - await client.connect(); + const result = await client.execute("SELECT data_center, rack FROM system.local"); expect(result.rows[0].data_center).toBe(customDataCenter); expect(result.rows[0].rack).toBe(customRack); await client.shutdown(); + // } }); - // } // createAndFetchData { it("should create keyspace, a table, insert data, and retrieve it", async () => { diff --git a/packages/modules/chromadb/src/chromadb-container.test.ts b/packages/modules/chromadb/src/chromadb-container.test.ts index 8f2d74f90..bb38fa54c 100755 --- a/packages/modules/chromadb/src/chromadb-container.test.ts +++ b/packages/modules/chromadb/src/chromadb-container.test.ts @@ -4,64 +4,53 @@ import os from "node:os"; import path from "node:path"; import { GenericContainer } from "testcontainers"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; -import { ChromaDBContainer, StartedChromaDBContainer } from "./chromadb-container"; +import { ChromaDBContainer } from "./chromadb-container"; const IMAGE = getImage(__dirname); -describe("ChromaDB", { timeout: 360_000 }, () => { - // startContainer { +describe("ChromaDBContainer", { timeout: 360_000 }, () => { it("should connect", async () => { await using container = await new ChromaDBContainer(IMAGE).start(); - const client = await connectTo(container); + const client = new ChromaClient({ path: container.getHttpUrl() }); expect(await client.heartbeat()).toBeDefined(); - // Do something with the client }); - // } - // simpleConnect { - async function connectTo(container: StartedChromaDBContainer) { - const client = new ChromaClient({ - path: container.getHttpUrl(), - }); - const hb = await client.heartbeat(); - expect(hb).toBeDefined(); - return client; - } - // } - - // createCollection { it("should create collection and get data", async () => { + // chromaCreateCollection { await using container = await new ChromaDBContainer(IMAGE).start(); - const client = await connectTo(container); + + const client = new ChromaClient({ path: container.getHttpUrl() }); const collection = await client.createCollection({ name: "test", metadata: { "hnsw:space": "cosine" } }); expect(collection.name).toBe("test"); - expect(collection.metadata).toBeDefined(); - expect(collection.metadata?.["hnsw:space"]).toBe("cosine"); + await collection.add({ ids: ["1"], embeddings: [[1, 2, 3]], documents: ["my doc"], metadatas: [{ key: "value" }] }); const getResults = await collection.get({ ids: ["1"] }); expect(getResults.ids[0]).toBe("1"); expect(getResults.documents[0]).toStrictEqual("my doc"); expect(getResults.metadatas).toBeDefined(); expect(getResults.metadatas?.[0]?.key).toStrictEqual("value"); + // } }); - // } - // queryCollectionWithEmbeddingFunction { it("should create collection and query", async () => { + // queryCollectionWithEmbeddingFunction { await using container = await new ChromaDBContainer(IMAGE).start(); - const ollama = await new GenericContainer("ollama/ollama").withExposedPorts(11434).start(); + + await using ollama = await new GenericContainer("ollama/ollama").withExposedPorts(11434).start(); await ollama.exec(["ollama", "pull", "nomic-embed-text"]); - const client = await connectTo(container); + const client = new ChromaClient({ path: container.getHttpUrl() }); const embedder = new OllamaEmbeddingFunction({ url: `http://${ollama.getHost()}:${ollama.getMappedPort(11434)}/api/embeddings`, model: "nomic-embed-text", }); + const collection = await client.createCollection({ name: "test", metadata: { "hnsw:space": "cosine" }, embeddingFunction: embedder, }); expect(collection.name).toBe("test"); + await collection.add({ ids: ["1", "2"], documents: [ @@ -73,38 +62,38 @@ describe("ChromaDB", { timeout: 360_000 }, () => { expect(results).toBeDefined(); expect(results.ids[0]).toEqual(["1"]); expect(results.ids[0][0]).toBe("1"); + // } }); - // persistentData { it("should reconnect with volume and persistence data", async () => { - const sourcePath = fs.mkdtempSync(path.join(os.tmpdir(), "chroma-temp")); - await using container = await new ChromaDBContainer(IMAGE) - .withBindMounts([{ source: sourcePath, target: "/data" }]) - .start(); - const client = await connectTo(container); - const collection = await client.createCollection({ name: "test", metadata: { "hnsw:space": "cosine" } }); - expect(collection.name).toBe("test"); - expect(collection.metadata).toBeDefined(); - expect(collection.metadata?.["hnsw:space"]).toBe("cosine"); - await collection.add({ ids: ["1"], embeddings: [[1, 2, 3]], documents: ["my doc"] }); - const getResults = await collection.get({ ids: ["1"] }); - expect(getResults.ids[0]).toBe("1"); - expect(getResults.documents[0]).toStrictEqual("my doc"); - expect(fs.existsSync(`${sourcePath}/chroma.sqlite3`)).toBe(true); try { - fs.rmSync(sourcePath, { force: true, recursive: true }); - } catch (e) { - // Ignore clean up, when have no access on fs. - console.log(e); + // persistentData { + const sourcePath = fs.mkdtempSync(path.join(os.tmpdir(), "chroma-temp")); + await using container = await new ChromaDBContainer(IMAGE) + .withBindMounts([{ source: sourcePath, target: "/data" }]) + .start(); + + const client = new ChromaClient({ path: container.getHttpUrl() }); + const collection = await client.createCollection({ name: "test", metadata: { "hnsw:space": "cosine" } }); + expect(collection.name).toBe("test"); + + await collection.add({ ids: ["1"], embeddings: [[1, 2, 3]], documents: ["my doc"] }); + const getResults = await collection.get({ ids: ["1"] }); + expect(getResults.ids[0]).toBe("1"); + expect(getResults.documents[0]).toStrictEqual("my doc"); + expect(fs.existsSync(`${sourcePath}/chroma.sqlite3`)).toBe(true); + // } + } finally { + fs.rmSync(path.join(os.tmpdir(), "chroma-temp"), { force: true, recursive: true }); } }); - // } - // auth { it("should use auth", async () => { + // chromaAuth { const tenant = "test-tenant"; const key = "test-key"; const database = "test-db"; + await using container = await new ChromaDBContainer(IMAGE) .withEnvironment({ CHROMA_SERVER_AUTHN_CREDENTIALS: key, @@ -139,6 +128,6 @@ describe("ChromaDB", { timeout: 360_000 }, () => { const collection = await dbClient.createCollection({ name: "test-collection" }); expect(collection.name).toBe("test-collection"); + // } }); - // } }); diff --git a/packages/modules/clickhouse/src/clickhouse-container.test.ts b/packages/modules/clickhouse/src/clickhouse-container.test.ts index f1dfdaea1..8ee8f2e82 100644 --- a/packages/modules/clickhouse/src/clickhouse-container.test.ts +++ b/packages/modules/clickhouse/src/clickhouse-container.test.ts @@ -9,9 +9,10 @@ interface ClickHouseQueryResponse { } describe("ClickHouseContainer", { timeout: 180_000 }, () => { - // connectWithOptions { it("should connect using the client options object", async () => { + // connectWithOptions { await using container = await new ClickHouseContainer(IMAGE).start(); + const client = createClient(container.getClientOptions()); const result = await client.query({ @@ -19,33 +20,35 @@ describe("ClickHouseContainer", { timeout: 180_000 }, () => { format: "JSON", }); const data = (await result.json()) as ClickHouseQueryResponse<{ value: number }>; + expect(data?.data?.[0]?.value).toBe(1); await client.close(); + // } }); - // } - // connectWithUrl { it("should connect using the URL", async () => { + // connectWithUrl { await using container = await new ClickHouseContainer(IMAGE).start(); + const client = createClient({ url: container.getConnectionUrl(), }); + // } const result = await client.query({ query: "SELECT 1 AS value", format: "JSON", }); - const data = (await result.json()) as ClickHouseQueryResponse<{ value: number }>; + expect(data?.data?.[0]?.value).toBe(1); await client.close(); }); - // } - // connectWithUsernameAndPassword { it("should connect using the username and password", async () => { + // connectWithUsernameAndPassword { await using container = await new ClickHouseContainer(IMAGE) .withUsername("customUsername") .withPassword("customPassword") @@ -56,23 +59,24 @@ describe("ClickHouseContainer", { timeout: 180_000 }, () => { username: container.getUsername(), password: container.getPassword(), }); + // } const result = await client.query({ query: "SELECT 1 AS value", format: "JSON", }); - const data = (await result.json()) as ClickHouseQueryResponse<{ value: number }>; + expect(data?.data?.[0]?.value).toBe(1); await client.close(); }); - // } - // setDatabase { it("should set database", async () => { - const customDatabase = "customDatabase"; - await using container = await new ClickHouseContainer(IMAGE).withDatabase(customDatabase).start(); + // setDatabase { + const db = "customDatabase"; + await using container = await new ClickHouseContainer(IMAGE).withDatabase(db).start(); + // } const client = createClient(container.getClientOptions()); @@ -80,32 +84,12 @@ describe("ClickHouseContainer", { timeout: 180_000 }, () => { query: "SELECT currentDatabase() AS current_database", format: "JSON", }); - const data = (await result.json()) as ClickHouseQueryResponse<{ current_database: string }>; - expect(data?.data?.[0]?.current_database).toBe(customDatabase); - - await client.close(); - }); - // } - - // setUsername { - it("should set username", async () => { - const customUsername = "customUsername"; - await using container = await new ClickHouseContainer(IMAGE).withUsername(customUsername).start(); - - const client = createClient(container.getClientOptions()); - - const result = await client.query({ - query: "SELECT currentUser() AS current_user", - format: "JSON", - }); - const data = (await result.json()) as ClickHouseQueryResponse<{ current_user: string }>; - expect(data?.data?.[0]?.current_user).toBe(customUsername); + expect(data?.data?.[0]?.current_database).toBe(db); await client.close(); }); - // } it("should work with restarted container", async () => { await using container = await new ClickHouseContainer(IMAGE).start(); diff --git a/packages/modules/cockroachdb/src/cockroachdb-container.test.ts b/packages/modules/cockroachdb/src/cockroachdb-container.test.ts index 92561fe67..9c338e159 100644 --- a/packages/modules/cockroachdb/src/cockroachdb-container.test.ts +++ b/packages/modules/cockroachdb/src/cockroachdb-container.test.ts @@ -5,8 +5,8 @@ import { CockroachDbContainer } from "./cockroachdb-container"; const IMAGE = getImage(__dirname); describe("CockroachDbContainer", { timeout: 180_000 }, () => { - // connect { it("should connect and return a query result", async () => { + // cockroachConnect { await using container = await new CockroachDbContainer(IMAGE).start(); const client = new Client({ @@ -23,16 +23,18 @@ describe("CockroachDbContainer", { timeout: 180_000 }, () => { expect(result.rows[0]).toEqual({ "?column?": "1" }); await client.end(); + // } }); - // } - // uriConnect { it("should work with database URI", async () => { + // uriConnect { await using container = await new CockroachDbContainer(IMAGE).start(); const client = new Client({ connectionString: container.getConnectionUri(), }); + // } + await client.connect(); const result = await client.query("SELECT 1"); @@ -40,11 +42,11 @@ describe("CockroachDbContainer", { timeout: 180_000 }, () => { await client.end(); }); - // } - // setDatabase { it("should set database", async () => { + // setDatabase { await using container = await new CockroachDbContainer(IMAGE).withDatabase("custom_database").start(); + // } const client = new Client({ host: container.getHost(), @@ -52,6 +54,7 @@ describe("CockroachDbContainer", { timeout: 180_000 }, () => { database: container.getDatabase(), user: container.getUsername(), }); + await client.connect(); const result = await client.query("SELECT current_database()"); @@ -59,11 +62,11 @@ describe("CockroachDbContainer", { timeout: 180_000 }, () => { await client.end(); }); - // } - // setUsername { it("should set username", async () => { + // setUsername { await using container = await new CockroachDbContainer(IMAGE).withUsername("custom_username").start(); + // } const client = new Client({ host: container.getHost(), @@ -71,6 +74,7 @@ describe("CockroachDbContainer", { timeout: 180_000 }, () => { database: container.getDatabase(), user: container.getUsername(), }); + await client.connect(); const result = await client.query("SELECT current_user"); @@ -78,7 +82,6 @@ describe("CockroachDbContainer", { timeout: 180_000 }, () => { await client.end(); }); - // } it("should work with restarted container", async () => { await using container = await new CockroachDbContainer(IMAGE).start(); diff --git a/packages/modules/couchbase/Dockerfile b/packages/modules/couchbase/Dockerfile index 5a77dd6d1..57b3850ba 100644 --- a/packages/modules/couchbase/Dockerfile +++ b/packages/modules/couchbase/Dockerfile @@ -1 +1,2 @@ -FROM couchbase/server:7.6.6 +FROM couchbase/server:enterprise-7.0.3 +FROM couchbase/server:community-7.0.2 diff --git a/packages/modules/couchbase/src/couchbase-container.test.ts b/packages/modules/couchbase/src/couchbase-container.test.ts index cf25d1f09..7bb72e630 100644 --- a/packages/modules/couchbase/src/couchbase-container.test.ts +++ b/packages/modules/couchbase/src/couchbase-container.test.ts @@ -1,22 +1,13 @@ import couchbase, { Bucket, Cluster } from "couchbase"; +import { getImage } from "../../../testcontainers/src/utils/test-helper"; import { BucketDefinition } from "./bucket-definition"; import { CouchbaseContainer } from "./couchbase-container"; import { CouchbaseService } from "./couchbase-service"; -describe("CouchbaseContainer", { timeout: 180_000 }, () => { - // upsertAndGet { - const upsertAndGet = async ( - bucket: Bucket, - key: string, - value: Record - ): Promise => { - const coll = bucket.defaultCollection(); - await coll.upsert(key, value); - - return coll.get(key); - }; - // } +const ENTERPRISE_IMAGE = getImage(__dirname, 0); +const COMMUNITY_IMAGE = getImage(__dirname, 1); +describe("CouchbaseContainer", { timeout: 180_000 }, () => { const flushBucketAndCheckExists = async ( cluster: Cluster, bucket: Bucket, @@ -28,74 +19,33 @@ describe("CouchbaseContainer", { timeout: 180_000 }, () => { return coll.exists(key); }; - describe("Enterprise Image", () => { - const COUCHBASE_IMAGE_ENTERPRISE = "couchbase/server:enterprise-7.0.3"; - + it.each([ENTERPRISE_IMAGE, COMMUNITY_IMAGE])("should connect and query using %s image", async (image) => { // connectAndQuery { - it("should connect and query using enterprise image", async () => { - const bucketDefinition = new BucketDefinition("mybucket"); - const container = new CouchbaseContainer(COUCHBASE_IMAGE_ENTERPRISE).withBucket(bucketDefinition); - - await using startedTestContainer = await container.start(); - - const cluster = await couchbase.Cluster.connect(startedTestContainer.getConnectionString(), { - username: startedTestContainer.getUsername(), - password: startedTestContainer.getPassword(), - }); + const bucketDefinition = new BucketDefinition("mybucket"); + await using container = await new CouchbaseContainer(image).withBucket(bucketDefinition).start(); - const bucket = cluster.bucket(bucketDefinition.getName()); - const result = await upsertAndGet(bucket, "testdoc", { foo: "bar" }); - - expect(result.content).toEqual({ foo: "bar" }); - await cluster.close(); + const cluster = await couchbase.Cluster.connect(container.getConnectionString(), { + username: container.getUsername(), + password: container.getPassword(), }); - // } - it("should flush bucket if flushEnabled and check any document exists", async () => { - const bucketDefinition = new BucketDefinition("mybucket").withFlushEnabled(true); - const container = new CouchbaseContainer(COUCHBASE_IMAGE_ENTERPRISE).withBucket(bucketDefinition); - - await using startedTestContainer = await container.start(); - const cluster = await couchbase.Cluster.connect(startedTestContainer.getConnectionString(), { - username: startedTestContainer.getUsername(), - password: startedTestContainer.getPassword(), - }); - - const bucket = cluster.bucket(bucketDefinition.getName()); - const coll = bucket.defaultCollection(); + const bucket = cluster.bucket(bucketDefinition.getName()); - await coll.upsert("testdoc", { foo: "bar" }); + const coll = bucket.defaultCollection(); + await coll.upsert("testdoc", { foo: "bar" }); + const result = await coll.get("testdoc"); - const existResult = await flushBucketAndCheckExists(cluster, bucket, "testdoc"); + expect(result.content).toEqual({ foo: "bar" }); - expect(existResult.exists).toBe(false); - await cluster.close(); - }); + await cluster.close(); + // } }); - describe("Community Image", () => { - const COUCHBASE_IMAGE_COMMUNITY = "couchbase/server:community-7.0.2"; - - it("should connect and query using community image", async () => { - const bucketDefinition = new BucketDefinition("mybucket"); - const container = new CouchbaseContainer(COUCHBASE_IMAGE_COMMUNITY).withBucket(bucketDefinition); - - await using startedTestContainer = await container.start(); - const cluster = await couchbase.Cluster.connect(startedTestContainer.getConnectionString(), { - username: startedTestContainer.getUsername(), - password: startedTestContainer.getPassword(), - }); - - const bucket = cluster.bucket(bucketDefinition.getName()); - const result = await upsertAndGet(bucket, "testdoc", { foo: "bar" }); - - expect(result.content).toEqual({ foo: "bar" }); - await cluster.close(); - }); - - it("should flush bucket if flushEnabled and check any document exists", async () => { + it.each([ENTERPRISE_IMAGE, COMMUNITY_IMAGE])( + "should flush bucket if flushEnabled and check any document exists with %s image", + async (image) => { const bucketDefinition = new BucketDefinition("mybucket").withFlushEnabled(true); - const container = new CouchbaseContainer(COUCHBASE_IMAGE_COMMUNITY).withBucket(bucketDefinition); + const container = new CouchbaseContainer(image).withBucket(bucketDefinition); await using startedTestContainer = await container.start(); const cluster = await couchbase.Cluster.connect(startedTestContainer.getConnectionString(), { @@ -112,28 +62,28 @@ describe("CouchbaseContainer", { timeout: 180_000 }, () => { expect(existResult.exists).toBe(false); await cluster.close(); - }); - - it("should throw error if analytics service enabled with community version", async () => { - const container = new CouchbaseContainer(COUCHBASE_IMAGE_COMMUNITY).withEnabledServices( - CouchbaseService.KV, - CouchbaseService.ANALYTICS - ); - - await expect(() => container.start()).rejects.toThrowError( - "The Analytics Service is only supported with the Enterprise version" - ); - }); + } + ); + + it("should throw error if analytics service enabled with community version", async () => { + const container = new CouchbaseContainer(COMMUNITY_IMAGE).withEnabledServices( + CouchbaseService.KV, + CouchbaseService.ANALYTICS + ); + + await expect(() => container.start()).rejects.toThrowError( + "The Analytics Service is only supported with the Enterprise version" + ); + }); - it("should throw error if eventing service enabled with community version", async () => { - const container = new CouchbaseContainer(COUCHBASE_IMAGE_COMMUNITY).withEnabledServices( - CouchbaseService.KV, - CouchbaseService.EVENTING - ); + it("should throw error if eventing service enabled with community version", async () => { + const container = new CouchbaseContainer(COMMUNITY_IMAGE).withEnabledServices( + CouchbaseService.KV, + CouchbaseService.EVENTING + ); - await expect(() => container.start()).rejects.toThrowError( - "The Eventing Service is only supported with the Enterprise version" - ); - }); + await expect(() => container.start()).rejects.toThrowError( + "The Eventing Service is only supported with the Enterprise version" + ); }); }); diff --git a/packages/modules/elasticsearch/src/elasticsearch-container.test.ts b/packages/modules/elasticsearch/src/elasticsearch-container.test.ts index 5803b61b9..886365a0e 100644 --- a/packages/modules/elasticsearch/src/elasticsearch-container.test.ts +++ b/packages/modules/elasticsearch/src/elasticsearch-container.test.ts @@ -6,9 +6,10 @@ const IMAGE = getImage(__dirname); const images = ["elasticsearch:7.17.28", "elasticsearch:8.18.1", IMAGE]; describe("ElasticsearchContainer", { timeout: 180_000 }, () => { - // createIndex { it.each(images)("should create an index with %s", async (image) => { + // createIndex { await using container = await new ElasticsearchContainer(image).start(); + const client = new Client({ node: container.getHttpUrl(), auth: { username: container.getUsername(), password: container.getPassword() }, @@ -17,12 +18,13 @@ describe("ElasticsearchContainer", { timeout: 180_000 }, () => { await client.indices.create({ index: "people" }); expect(await client.indices.exists({ index: "people" })).toBe(true); + // } }); - // } - // indexDocument { it("should index a document", async () => { + // indexDocument { await using container = await new ElasticsearchContainer(IMAGE).start(); + const client = new Client({ node: container.getHttpUrl(), auth: { username: container.getUsername(), password: container.getPassword() }, @@ -39,25 +41,27 @@ describe("ElasticsearchContainer", { timeout: 180_000 }, () => { }); expect((await client.get({ index: "people", id: document.id }))._source).toStrictEqual(document); + // } }); - // } - it("should work with restarted container", async () => { - await using container = await new ElasticsearchContainer(IMAGE).start(); - await container.restart(); + it("should set custom password", async () => { + // withPassword { + await using container = await new ElasticsearchContainer(IMAGE).withPassword("testPassword").start(); const client = new Client({ node: container.getHttpUrl(), auth: { username: container.getUsername(), password: container.getPassword() }, }); + // } await client.indices.create({ index: "people" }); expect(await client.indices.exists({ index: "people" })).toBe(true); - }); // } + }); - it("should set custom password", async () => { - await using container = await new ElasticsearchContainer(IMAGE).withPassword("testPassword").start(); + it("should work with restarted container", async () => { + await using container = await new ElasticsearchContainer(IMAGE).start(); + await container.restart(); const client = new Client({ node: container.getHttpUrl(), @@ -67,5 +71,5 @@ describe("ElasticsearchContainer", { timeout: 180_000 }, () => { await client.indices.create({ index: "people" }); expect(await client.indices.exists({ index: "people" })).toBe(true); - }); + }); // } }); diff --git a/packages/modules/etcd/src/etcd-container.test.ts b/packages/modules/etcd/src/etcd-container.test.ts index a70e0d053..dd28a7ce5 100644 --- a/packages/modules/etcd/src/etcd-container.test.ts +++ b/packages/modules/etcd/src/etcd-container.test.ts @@ -1,13 +1,12 @@ import { Etcd3 } from "etcd3"; -import { setTimeout } from "node:timers/promises"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; import { EtcdContainer } from "./etcd-container"; const IMAGE = getImage(__dirname); -describe("etcd", () => { - // readWrite { +describe("EtcdContainer", { timeout: 180_000 }, () => { it("should connect and perform read/write operations", async () => { + // readWrite { await using container = await new EtcdContainer(IMAGE).start(); const client = new Etcd3({ @@ -20,13 +19,11 @@ describe("etcd", () => { const result = await client.get(key).string(); expect(result).toEqual(value); + // } }); - // } - // subscribe { it("should subscribe to key changes", async () => { - const subscriber = vi.fn(); - + // etcdSubscribe { await using container = await new EtcdContainer(IMAGE).start(); const client = new Etcd3({ @@ -36,13 +33,12 @@ describe("etcd", () => { const key = "foo"; const value = "bar"; const watcher = await client.watch().key(key).create(); + const subscriber = vi.fn(); watcher.on("put", subscriber); await client.put(key).value(value); - await setTimeout(1_000); - - expect(subscriber).toHaveBeenCalled(); + await vi.waitFor(() => expect(subscriber).toHaveBeenCalled(), 1_000); await watcher.cancel(); + // } }); - // } }); diff --git a/packages/modules/gcloud/src/bigquery-emulator-container.test.ts b/packages/modules/gcloud/src/bigquery-emulator-container.test.ts index 3eebfb7e0..642cfd40c 100644 --- a/packages/modules/gcloud/src/bigquery-emulator-container.test.ts +++ b/packages/modules/gcloud/src/bigquery-emulator-container.test.ts @@ -1,36 +1,33 @@ import { BigQuery, TableSchema } from "@google-cloud/bigquery"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; -import { BigQueryEmulatorContainer, StartedBigQueryEmulatorContainer } from "./bigquery-emulator-container"; +import { BigQueryEmulatorContainer } from "./bigquery-emulator-container"; const IMAGE = getImage(__dirname, 2); describe("BigQueryEmulatorContainer", { timeout: 240_000 }, () => { it("should work using default version", async () => { - await using bigQueryEmulatorContainer = await new BigQueryEmulatorContainer(IMAGE).start(); + // bigqueryExample { + await using container = await new BigQueryEmulatorContainer(IMAGE).start(); - await checkBigQuery(bigQueryEmulatorContainer); - }); + const bigQuery = new BigQuery({ + projectId: container.getProjectId(), + apiEndpoint: container.getEmulatorEndpoint(), + }); - async function checkBigQuery(bigQueryEmulatorContainer: StartedBigQueryEmulatorContainer) { - expect(bigQueryEmulatorContainer).toBeDefined(); - const testDataset = "test-dataset"; - const testTable = "test-table"; - const testSchema: TableSchema = { fields: [{ name: "message", type: "STRING" }] }; - const config = { - projectId: bigQueryEmulatorContainer.getProjectId(), - apiEndpoint: bigQueryEmulatorContainer.getEmulatorEndpoint(), - }; - const bigQuery = new BigQuery(config); + const dataset = "test-dataset"; + const table = "test-table"; + const schema: TableSchema = { fields: [{ name: "message", type: "STRING" }] }; - await bigQuery.dataset(testDataset).create(); - await bigQuery.dataset(testDataset).table(testTable).create({ schema: testSchema }); + await bigQuery.dataset(dataset).create(); + await bigQuery.dataset(dataset).table(table).create({ schema: schema }); await bigQuery - .dataset(testDataset) - .table(testTable) + .dataset(dataset) + .table(table) .insert([{ message: "Hello, BigQuery!" }]); - const [rows] = await bigQuery.dataset(testDataset).table(testTable).getRows(); + const [rows] = await bigQuery.dataset(dataset).table(table).getRows(); expect(rows).toEqual([{ message: "Hello, BigQuery!" }]); - } + // } + }); }); diff --git a/packages/modules/gcloud/src/cloudstorage-emulator-container.test.ts b/packages/modules/gcloud/src/cloudstorage-emulator-container.test.ts index 44cbb7959..ddc79a397 100644 --- a/packages/modules/gcloud/src/cloudstorage-emulator-container.test.ts +++ b/packages/modules/gcloud/src/cloudstorage-emulator-container.test.ts @@ -2,7 +2,7 @@ import { Storage } from "@google-cloud/storage"; import { setupServer } from "msw/node"; import { ReadableStream } from "node:stream/web"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; -import { CloudStorageEmulatorContainer, StartedCloudStorageEmulatorContainer } from "./cloudstorage-emulator-container"; +import { CloudStorageEmulatorContainer } from "./cloudstorage-emulator-container"; const IMAGE = getImage(__dirname, 1); @@ -47,30 +47,38 @@ describe.sequential("CloudStorageEmulatorContainer", { timeout: 240_000 }, () => server.close(); }); - // cloud-storage { it("should work using default version", async () => { - await using cloudstorageEmulatorContainer = await new CloudStorageEmulatorContainer(IMAGE).start(); + // cloudstorageExample { + await using container = await new CloudStorageEmulatorContainer(IMAGE).start(); + + const storage = new Storage({ + projectId: "test-project", + apiEndpoint: container.getExternalUrl(), + }); + + await storage.createBucket("test-bucket"); + const [buckets] = await storage.getBuckets(); - await checkCloudStorage(cloudstorageEmulatorContainer); + expect(buckets[0].name).toBe("test-bucket"); + // } }); - // } it("should use the provided external URL", async () => { - await using cloudstorageEmulatorContainer = await new CloudStorageEmulatorContainer(IMAGE) + await using container = await new CloudStorageEmulatorContainer(IMAGE) .withExternalURL("http://cdn.company.local") .start(); - expect(cloudstorageEmulatorContainer).toBeDefined(); - expect(cloudstorageEmulatorContainer.getExternalUrl()).toBe("http://cdn.company.local"); + expect(container).toBeDefined(); + expect(container.getExternalUrl()).toBe("http://cdn.company.local"); }); it("should be able update the external URL of running instance", async () => { - await using cloudstorageEmulatorContainer = await new CloudStorageEmulatorContainer(IMAGE) + await using container = await new CloudStorageEmulatorContainer(IMAGE) .withExternalURL("http://cdn.company.local") .start(); - expect(cloudstorageEmulatorContainer).toBeDefined(); - expect(cloudstorageEmulatorContainer.getExternalUrl()).toBe("http://cdn.company.local"); + expect(container).toBeDefined(); + expect(container.getExternalUrl()).toBe("http://cdn.company.local"); const executedRequests: Request[] = []; @@ -82,13 +90,13 @@ describe.sequential("CloudStorageEmulatorContainer", { timeout: 240_000 }, () => } }); - await cloudstorageEmulatorContainer.updateExternalUrl("http://files.company.local"); + await container.updateExternalUrl("http://files.company.local"); expect(executedRequests).toHaveLength(1); const [requestInfo] = executedRequests; - const expectedRequestUrl = cloudstorageEmulatorContainer.getEmulatorEndpoint() + "/_internal/config"; + const expectedRequestUrl = container.getEmulatorEndpoint() + "/_internal/config"; expect(requestInfo.url).toContain(expectedRequestUrl); expect(requestInfo.method).toBe("PUT"); @@ -97,7 +105,7 @@ describe.sequential("CloudStorageEmulatorContainer", { timeout: 240_000 }, () => const requestBodyAsJson = JSON.parse(requestBody); expect(requestBodyAsJson).toEqual(expect.objectContaining({ externalUrl: "http://files.company.local" })); - expect(cloudstorageEmulatorContainer.getExternalUrl()).toBe("http://files.company.local"); + expect(container.getExternalUrl()).toBe("http://files.company.local"); }); it("should use emulator endpoint as default external URL", async () => { @@ -127,23 +135,4 @@ describe.sequential("CloudStorageEmulatorContainer", { timeout: 240_000 }, () => expect(container.getExternalUrl()).toBe(undefined); expect((await fetch(`${container.getEmulatorEndpoint()}/_internal/healthcheck`)).status).toBe(200); }); - - async function checkCloudStorage(cloudstorageEmulatorContainer: StartedCloudStorageEmulatorContainer) { - expect(cloudstorageEmulatorContainer).toBeDefined(); - - const cloudStorageClient = new Storage({ - projectId: "test-project", - apiEndpoint: cloudstorageEmulatorContainer.getExternalUrl(), - }); - expect(cloudStorageClient).toBeDefined(); - - const createdBucket = await cloudStorageClient.createBucket("test-bucket"); - expect(createdBucket).toBeDefined(); - - const [buckets] = await cloudStorageClient.getBuckets(); - expect(buckets).toBeDefined(); - expect(buckets).toHaveLength(1); - const [firstBucket] = buckets; - expect(firstBucket.name).toBe("test-bucket"); - } }); diff --git a/packages/modules/gcloud/src/datastore-emulator-container.test.ts b/packages/modules/gcloud/src/datastore-emulator-container.test.ts index 074e9f7ad..b16ae743d 100644 --- a/packages/modules/gcloud/src/datastore-emulator-container.test.ts +++ b/packages/modules/gcloud/src/datastore-emulator-container.test.ts @@ -1,42 +1,28 @@ import { Datastore } from "@google-cloud/datastore"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; -import { DatastoreEmulatorContainer, StartedDatastoreEmulatorContainer } from "./datastore-emulator-container"; +import { DatastoreEmulatorContainer } from "./datastore-emulator-container"; const IMAGE = getImage(__dirname); describe("DatastoreEmulatorContainer", { timeout: 240_000 }, () => { - // datastore4 { - it("should work using default version", async () => { - await using datastoreEmulatorContainer = await new DatastoreEmulatorContainer(IMAGE).start(); - - await checkDatastore(datastoreEmulatorContainer); - }); - // } - - // datastore5 { - it("should work using version 468.0.0", async () => { - await using datastoreEmulatorContainer = await new DatastoreEmulatorContainer( - "gcr.io/google.com/cloudsdktool/google-cloud-cli:468.0.0-emulators" - ).start(); - - await checkDatastore(datastoreEmulatorContainer); - }); - - // } - - async function checkDatastore(datastoreEmulatorContainer: StartedDatastoreEmulatorContainer) { - expect(datastoreEmulatorContainer).toBeDefined(); - const testProjectId = "test-project"; - const testKind = "test-kind"; - const testId = "123"; - const databaseConfig = { projectId: testProjectId, apiEndpoint: datastoreEmulatorContainer.getEmulatorEndpoint() }; - const datastore = new Datastore(databaseConfig); - - const key = datastore.key([testKind, testId]); - const data = { message: "Hello, Datastore!" }; - await datastore.save({ key, data }); - const [entity] = await datastore.get(key); - - expect(entity).toEqual({ message: "Hello, Datastore!", [Datastore.KEY]: key }); - } + it.each([IMAGE, "gcr.io/google.com/cloudsdktool/google-cloud-cli:468.0.0-emulators"])( + "should work with %s", + async (image) => { + // datastoreExample { + await using container = await new DatastoreEmulatorContainer(image).start(); + + const datastore = new Datastore({ + projectId: "test-project", + apiEndpoint: container.getEmulatorEndpoint(), + }); + + const key = datastore.key(["test-kind", "123"]); + const data = { message: "Hello, Datastore!" }; + await datastore.save({ key, data }); + const [entity] = await datastore.get(key); + + expect(entity).toEqual({ message: "Hello, Datastore!", [Datastore.KEY]: key }); + // } + } + ); }); diff --git a/packages/modules/gcloud/src/firestore-emulator-container.test.ts b/packages/modules/gcloud/src/firestore-emulator-container.test.ts index f4a05d322..4059f3818 100644 --- a/packages/modules/gcloud/src/firestore-emulator-container.test.ts +++ b/packages/modules/gcloud/src/firestore-emulator-container.test.ts @@ -1,46 +1,31 @@ import admin from "firebase-admin"; import { randomUuid } from "testcontainers"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; -import { FirestoreEmulatorContainer, StartedFirestoreEmulatorContainer } from "./firestore-emulator-container"; +import { FirestoreEmulatorContainer } from "./firestore-emulator-container"; const IMAGE = getImage(__dirname); describe("FirestoreEmulatorContainer", { timeout: 240_000 }, () => { - // firestore4 { - it("should work using default version", async () => { - await using firestoreEmulatorContainer = await new FirestoreEmulatorContainer(IMAGE).start(); - - await checkFirestore(firestoreEmulatorContainer); - }); - // } - - // firestore5 { - it("should work using version 468.0.0", async () => { - await using firestoreEmulatorContainer = await new FirestoreEmulatorContainer( - "gcr.io/google.com/cloudsdktool/google-cloud-cli:468.0.0-emulators" - ).start(); - - await checkFirestore(firestoreEmulatorContainer); - }); - - // } - - async function checkFirestore(firestoreEmulatorContainer: StartedFirestoreEmulatorContainer) { - expect(firestoreEmulatorContainer).toBeDefined(); - const testProjectId = "test-project"; - const testAppName = `test-app-${randomUuid()}`; - const testCollection = "test-collection"; - const testDocument = "test-doc"; - const firebaseConfig = { projectId: testProjectId }; - const firestore = admin.initializeApp(firebaseConfig, testAppName).firestore(); - firestore.settings({ host: firestoreEmulatorContainer.getEmulatorEndpoint(), ssl: false }); - - const docRef = firestore.collection(testCollection).doc(testDocument); - await docRef.set({ message: "Hello, Firestore!" }); - - const snapshot = await docRef.get(); - - expect(snapshot.exists).toBeTruthy(); - expect(snapshot.data()).toEqual({ message: "Hello, Firestore!" }); - } + it.each([IMAGE, "gcr.io/google.com/cloudsdktool/google-cloud-cli:468.0.0-emulators"])( + "should work with %s", + async (image) => { + // firestoreExample { + await using container = await new FirestoreEmulatorContainer(image).start(); + + const firestore = admin.initializeApp({ projectId: "test-project" }, `test-app-${randomUuid()}`).firestore(); + firestore.settings({ + host: container.getEmulatorEndpoint(), + ssl: false, + }); + + const collection = "test-collection"; + const document = "test-doc"; + const docRef = firestore.collection(collection).doc(document); + await docRef.set({ message: "Hello, Firestore!" }); + const snapshot = await docRef.get(); + + expect(snapshot.data()).toEqual({ message: "Hello, Firestore!" }); + // } + } + ); }); diff --git a/packages/modules/gcloud/src/pubsub-emulator-container.test.ts b/packages/modules/gcloud/src/pubsub-emulator-container.test.ts index a30bc64a1..f5982f32e 100644 --- a/packages/modules/gcloud/src/pubsub-emulator-container.test.ts +++ b/packages/modules/gcloud/src/pubsub-emulator-container.test.ts @@ -1,28 +1,22 @@ import { PubSub } from "@google-cloud/pubsub"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; -import { PubSubEmulatorContainer, StartedPubSubEmulatorContainer } from "./pubsub-emulator-container"; +import { PubSubEmulatorContainer } from "./pubsub-emulator-container"; const IMAGE = getImage(__dirname); describe("PubSubEmulatorContainer", { timeout: 240_000 }, () => { it("should work using default version", async () => { - await using pubsubEmulatorContainer = await new PubSubEmulatorContainer(IMAGE).start(); + // pubsubExample { + await using container = await new PubSubEmulatorContainer(IMAGE).start(); - await checkPubSub(pubsubEmulatorContainer); - }); - - async function checkPubSub(pubsubEmulatorContainer: StartedPubSubEmulatorContainer) { - expect(pubsubEmulatorContainer).toBeDefined(); - - const pubSubClient = new PubSub({ + const pubSub = new PubSub({ projectId: "test-project", - apiEndpoint: pubsubEmulatorContainer.getEmulatorEndpoint(), + apiEndpoint: container.getEmulatorEndpoint(), }); - expect(pubSubClient).toBeDefined(); - const [createdTopic] = await pubSubClient.createTopic("test-topic"); - expect(createdTopic).toBeDefined(); - // Note: topic name format is projects//topics/ + const [createdTopic] = await pubSub.createTopic("test-topic"); + expect(createdTopic.name).toContain("test-topic"); - } + // } + }); }); diff --git a/packages/modules/gcloud/src/spanner-emulator-container.test.ts b/packages/modules/gcloud/src/spanner-emulator-container.test.ts index 8b005959c..29559928d 100644 --- a/packages/modules/gcloud/src/spanner-emulator-container.test.ts +++ b/packages/modules/gcloud/src/spanner-emulator-container.test.ts @@ -6,52 +6,47 @@ import { SpannerEmulatorContainer } from "./spanner-emulator-container"; const IMAGE = getImage(__dirname, 3); describe("SpannerEmulatorContainer", { timeout: 240_000 }, () => { - // startupWithExplicitClient { it("should start, expose endpoints and accept real client connections using explicitly configured client", async () => { + // startupWithExplicitClient { await using container = await new SpannerEmulatorContainer(IMAGE).withProjectId("test-project").start(); - const client = new Spanner({ + const spanner = new Spanner({ projectId: container.getProjectId(), apiEndpoint: container.getHost(), port: container.getGrpcPort(), sslCreds: container.getSslCredentials(), }); - // list instance configs - const admin = client.getInstanceAdminClient(); + const admin = spanner.getInstanceAdminClient(); const [configs] = await admin.listInstanceConfigs({ parent: admin.projectPath(container.getProjectId()), }); - // emulator always includes "emulator-config" const expectedConfigName = admin.instanceConfigPath(container.getProjectId(), "emulator-config"); expect(configs.map((c) => c.name)).toContain(expectedConfigName); + // } }); - // } describe.sequential("Shared state", () => { afterEach(() => { process.env.SPANNER_EMULATOR_HOST = ""; }); - // startupWithEnvironmentVariable { it("should start, expose endpoints and accept real client connections using projectId and SPANNER_EMULATOR_HOST", async () => { await using container = await new SpannerEmulatorContainer(IMAGE).withProjectId("test-project").start(); - // configure the client to talk to our emulator + // startupWithEnvironmentVariable { process.env.SPANNER_EMULATOR_HOST = container.getEmulatorGrpcEndpoint(); - const client = new Spanner({ projectId: container.getProjectId() }); + const spanner = new Spanner({ projectId: container.getProjectId() }); + // } - // list instance configs - const admin = client.getInstanceAdminClient(); + const admin = spanner.getInstanceAdminClient(); const [configs] = await admin.listInstanceConfigs({ parent: admin.projectPath(container.getProjectId()), }); - // emulator always includes "emulator-config" const expectedConfigName = admin.instanceConfigPath(container.getProjectId(), "emulator-config"); expect(configs.map((c) => c.name)).toContain(expectedConfigName); }); - // } }); }); diff --git a/packages/modules/gcloud/src/spanner-emulator-helper.test.ts b/packages/modules/gcloud/src/spanner-emulator-helper.test.ts index b06b9fcc6..8594f1158 100644 --- a/packages/modules/gcloud/src/spanner-emulator-helper.test.ts +++ b/packages/modules/gcloud/src/spanner-emulator-helper.test.ts @@ -6,9 +6,10 @@ import { SpannerEmulatorHelper } from "./spanner-emulator-helper"; const IMAGE = getImage(__dirname, 3); describe("SpannerEmulatorHelper", { timeout: 240_000 }, () => { - // createAndDelete { it("should create and delete instance and database via helper", async () => { + // createAndDelete { await using container = await new SpannerEmulatorContainer(IMAGE).start(); + const helper = new SpannerEmulatorHelper(container); const instanceId = "test-instance"; const databaseId = "test-db"; @@ -37,6 +38,6 @@ describe("SpannerEmulatorHelper", { timeout: 240_000 }, () => { const [instanceExistsAfter] = await client.instance(instanceId).exists(); expect(instanceExistsAfter).toBe(false); + // } }); - // } }); diff --git a/packages/modules/hivemq/src/hivemq-container.test.ts b/packages/modules/hivemq/src/hivemq-container.test.ts index 247e72984..3d6530f71 100644 --- a/packages/modules/hivemq/src/hivemq-container.test.ts +++ b/packages/modules/hivemq/src/hivemq-container.test.ts @@ -2,32 +2,28 @@ import mqtt from "mqtt"; import { expect } from "vitest"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; import { HiveMQContainer } from "./hivemq-container"; + const IMAGE = getImage(__dirname); describe("HiveMQContainer", { timeout: 240_000 }, () => { - // connect { it("should connect to HiveMQ Community Edition via MQTT.js", async () => { + // hivemqConnect { await using container = await new HiveMQContainer(IMAGE).start(); - const testMqttClient = mqtt.connect(container.getConnectionString()); + const mqttClient = await mqtt.connectAsync(container.getConnectionString()); - const promise = new Promise((resolve) => { - testMqttClient.on("message", (topic, message) => { - expect(message.toString()).toEqual("Test Message"); - testMqttClient.end(); - resolve(); - }); + const firstMessagePromise = new Promise<{ topic: string; message: Buffer }>((resolve, reject) => { + mqttClient.once("message", (topic, message) => resolve({ topic, message })); + mqttClient.once("error", (err) => reject(err)); }); - testMqttClient.on("connect", () => { - testMqttClient.subscribe("test", (error) => { - if (!error) { - testMqttClient.publish("test", "Test Message"); - } - }); - }); + await mqttClient.subscribeAsync("test"); + await mqttClient.publishAsync("test", "Test Message"); + + const { message } = await firstMessagePromise; + expect(message.toString()).toEqual("Test Message"); - await expect(promise).resolves.toBeUndefined(); + mqttClient.end(); + // } }); - // } }); diff --git a/packages/modules/k3s/src/k3s-container.test.ts b/packages/modules/k3s/src/k3s-container.test.ts index 03bdc4591..9869a7c02 100644 --- a/packages/modules/k3s/src/k3s-container.test.ts +++ b/packages/modules/k3s/src/k3s-container.test.ts @@ -1,65 +1,33 @@ import * as k8s from "@kubernetes/client-node"; -import { setTimeout } from "node:timers/promises"; import { GenericContainer, Network, Wait } from "testcontainers"; +import { getImage } from "../../../testcontainers/src/utils/test-helper"; import { K3sContainer } from "./k3s-container"; -describe("K3s", { timeout: 120_000 }, () => { - it("should construct", () => { - new K3sContainer("rancher/k3s:v1.31.2-k3s1"); - }); +const IMAGE = getImage(__dirname); +describe("K3sContainer", { timeout: 120_000 }, () => { // K3sContainer runs as a privileged container if (!process.env["CI_ROOTLESS"]) { it("should start and have listable node", async () => { - // starting_k3s { - await using container = await new K3sContainer("rancher/k3s:v1.31.2-k3s1").start(); - // } - - // connecting_with_client { - // obtain a kubeconfig file that allows us to connect to k3s - const kubeConfig = container.getKubeConfig(); - - const kc = new k8s.KubeConfig(); - kc.loadFromString(kubeConfig); + // k3sListNodes { + await using container = await new K3sContainer(IMAGE).start(); - const client = kc.makeApiClient(k8s.CoreV1Api); + const kubeConfig = new k8s.KubeConfig(); + kubeConfig.loadFromString(container.getKubeConfig()); - // interact with the running K3s server, e.g.: + const client = kubeConfig.makeApiClient(k8s.CoreV1Api); const nodeList = await client.listNode(); - // } expect(nodeList.items).toHaveLength(1); - }); - - it("should expose kubeconfig for a network alias", async () => { - await using network = await new Network().start(); - await using container = await new K3sContainer("rancher/k3s:v1.31.2-k3s1") - .withNetwork(network) - .withNetworkAliases("k3s") - .start(); - - // obtain a kubeconfig that allows us to connect on the custom network - const kubeConfig = container.getAliasedKubeConfig("k3s"); - - await using kubectlContainer = await new GenericContainer("rancher/kubectl:v1.31.2") - .withNetwork(network) - .withCopyContentToContainer([{ content: kubeConfig, target: "/home/kubectl/.kube/config" }]) - .withCommand(["get", "namespaces"]) - .withWaitStrategy(Wait.forOneShotStartup()) - .withStartupTimeout(30_000) - .start(); - - const chunks = []; - for await (const chunk of await kubectlContainer.logs()) { - chunks.push(chunk); - } - expect(chunks).toEqual(expect.arrayContaining([expect.stringContaining("kube-system")])); + // } }); it("should start a pod", async () => { - await using container = await new K3sContainer("rancher/k3s:v1.31.2-k3s1").start(); - const kc = new k8s.KubeConfig(); - kc.loadFromString(container.getKubeConfig()); + // k3sStartPod { + await using container = await new K3sContainer(IMAGE).start(); + + const kubeConfig = new k8s.KubeConfig(); + kubeConfig.loadFromString(container.getKubeConfig()); const pod = { metadata: { @@ -85,23 +53,37 @@ describe("K3s", { timeout: 120_000 }, () => { }, }; - const client = kc.makeApiClient(k8s.CoreV1Api); + const client = kubeConfig.makeApiClient(k8s.CoreV1Api); await client.createNamespacedPod({ namespace: "default", body: pod }); - // wait for pod to be ready - expect(await podIsReady(client, "default", "helloworld", 60_000)).toBe(true); + await vi.waitFor(async () => { + const { status } = await client.readNamespacedPodStatus({ namespace: "default", name: "helloworld" }); + + return ( + status?.phase === "Running" && + status?.conditions?.some((cond) => cond.type === "Ready" && cond.status === "True") + ); + }, 60_000); + // } }); - } -}); -async function podIsReady(client: k8s.CoreV1Api, namespace: string, name: string, timeout: number): Promise { - for (const startTime = Date.now(); Date.now() - startTime < timeout; ) { - const res = await client.readNamespacedPodStatus({ namespace, name }); - const ready = - res.status?.phase === "Running" && - !!res.status?.conditions?.some((cond) => cond.type === "Ready" && cond.status === "True"); - if (ready) return true; - await setTimeout(3_000); + it("should expose kubeconfig for a network alias", async () => { + // k3sAliasedKubeConfig { + await using network = await new Network().start(); + await using container = await new K3sContainer(IMAGE).withNetwork(network).withNetworkAliases("k3s").start(); + + const kubeConfig = container.getAliasedKubeConfig("k3s"); + + await using kubectlContainer = await new GenericContainer("rancher/kubectl:v1.31.2") + .withNetwork(network) + .withCopyContentToContainer([{ content: kubeConfig, target: "/home/kubectl/.kube/config" }]) + .withCommand(["get", "namespaces"]) + .withWaitStrategy(Wait.forOneShotStartup()) + .start(); + + const chunks = await (await kubectlContainer.logs()).toArray(); + expect(chunks).toEqual(expect.arrayContaining([expect.stringContaining("kube-system")])); + // } + }); } - return false; -} +}); diff --git a/packages/modules/kafka/src/kafka-container-7.test.ts b/packages/modules/kafka/src/kafka-container-7.test.ts index b2989139a..44d6da5a6 100644 --- a/packages/modules/kafka/src/kafka-container-7.test.ts +++ b/packages/modules/kafka/src/kafka-container-7.test.ts @@ -2,39 +2,39 @@ import fs from "fs"; import path from "path"; import { GenericContainer, Network } from "testcontainers"; import { KafkaContainer } from "./kafka-container"; -import { testPubSub } from "./test-helper"; +import { assertMessageProducedAndConsumed } from "./test-helper"; const IMAGE = "confluentinc/cp-kafka:7.9.1"; describe("KafkaContainer", { timeout: 240_000 }, () => { - // connectBuiltInZK { it("should connect using in-built zoo-keeper", async () => { - await using kafkaContainer = await new KafkaContainer(IMAGE).start(); - - await testPubSub(kafkaContainer); + // connectBuiltInZK { + await using container = await new KafkaContainer(IMAGE).start(); + await assertMessageProducedAndConsumed(container); + // } }); - // } it("should connect using in-built zoo-keeper and custom images", async () => { - await using kafkaContainer = await new KafkaContainer(IMAGE).start(); + await using container = await new KafkaContainer(IMAGE).start(); - await testPubSub(kafkaContainer); + await assertMessageProducedAndConsumed(container); }); it("should connect using in-built zoo-keeper and custom network", async () => { await using network = await new Network().start(); - await using kafkaContainer = await new KafkaContainer(IMAGE).withNetwork(network).start(); + await using container = await new KafkaContainer(IMAGE).withNetwork(network).start(); - await testPubSub(kafkaContainer); + await assertMessageProducedAndConsumed(container); }); - // connectProvidedZK { it("should connect using provided zoo-keeper and network", async () => { + // connectProvidedZK { await using network = await new Network().start(); const zooKeeperHost = "zookeeper"; const zooKeeperPort = 2181; + await using _ = await new GenericContainer("confluentinc/cp-zookeeper:5.5.4") .withNetwork(network) .withNetworkAliases(zooKeeperHost) @@ -42,59 +42,87 @@ describe("KafkaContainer", { timeout: 240_000 }, () => { .withExposedPorts(zooKeeperPort) .start(); - await using kafkaContainer = await new KafkaContainer(IMAGE) + await using container = await new KafkaContainer(IMAGE) .withNetwork(network) .withZooKeeper(zooKeeperHost, zooKeeperPort) .start(); + // } - await testPubSub(kafkaContainer); + await assertMessageProducedAndConsumed(container); }); - // } it("should be reusable", async () => { - await using originalKafkaContainer = await new KafkaContainer(IMAGE).withReuse().start(); - const newKafkaContainer = await new KafkaContainer(IMAGE).withReuse().start(); + await using container1 = await new KafkaContainer(IMAGE).withReuse().start(); + const container2 = await new KafkaContainer(IMAGE).withReuse().start(); - expect(newKafkaContainer.getId()).toBe(originalKafkaContainer.getId()); + expect(container2.getId()).toBe(container1.getId()); }); - describe.each([ - { - name: "and zookpeer enabled", - configure: () => ({}), - }, - { - name: "and kraft enabled", - configure: (kafkaContainer: KafkaContainer) => kafkaContainer.withKraft(), - }, - ])("when SASL SSL config listener provided $name", ({ configure }) => { + describe("when SASL SSL config listener provided with Kraft", () => { const certificatesDir = path.resolve(__dirname, "..", "test-certs"); - // ssl { - it(`should connect locally`, async () => { - const kafkaContainer = await new KafkaContainer("confluentinc/cp-kafka:7.5.0").withSaslSslListener({ - port: 9096, - sasl: { - mechanism: "SCRAM-SHA-512", - user: { - name: "app-user", - password: "userPassword", + it(`should connect locally with ZK`, async () => { + // kafkaSsl { + await using container = await new KafkaContainer("confluentinc/cp-kafka:7.5.0") + .withSaslSslListener({ + port: 9096, + sasl: { + mechanism: "SCRAM-SHA-512", + user: { + name: "app-user", + password: "userPassword", + }, }, + keystore: { + content: fs.readFileSync(path.resolve(certificatesDir, "kafka.server.keystore.pfx")), + passphrase: "serverKeystorePassword", + }, + truststore: { + content: fs.readFileSync(path.resolve(certificatesDir, "kafka.server.truststore.pfx")), + passphrase: "serverTruststorePassword", + }, + }) + .start(); + + await assertMessageProducedAndConsumed(container, { + brokers: [`${container.getHost()}:${container.getMappedPort(9096)}`], + sasl: { + username: "app-user", + password: "userPassword", + mechanism: "scram-sha-512", }, - keystore: { - content: fs.readFileSync(path.resolve(certificatesDir, "kafka.server.keystore.pfx")), - passphrase: "serverKeystorePassword", - }, - truststore: { - content: fs.readFileSync(path.resolve(certificatesDir, "kafka.server.truststore.pfx")), - passphrase: "serverTruststorePassword", + ssl: { + ca: [fs.readFileSync(path.resolve(certificatesDir, "kafka.client.truststore.pem"))], }, }); - configure(kafkaContainer); - await using startedKafkaContainer = await kafkaContainer.start(); + // } + }); + + it(`should connect locally with Kraft`, async () => { + await using container = await new KafkaContainer("confluentinc/cp-kafka:7.5.0") + .withKraft() + .withSaslSslListener({ + port: 9096, + sasl: { + mechanism: "SCRAM-SHA-512", + user: { + name: "app-user", + password: "userPassword", + }, + }, + keystore: { + content: fs.readFileSync(path.resolve(certificatesDir, "kafka.server.keystore.pfx")), + passphrase: "serverKeystorePassword", + }, + truststore: { + content: fs.readFileSync(path.resolve(certificatesDir, "kafka.server.truststore.pfx")), + passphrase: "serverTruststorePassword", + }, + }) + .start(); - await testPubSub(startedKafkaContainer, { - brokers: [`${startedKafkaContainer.getHost()}:${startedKafkaContainer.getMappedPort(9096)}`], + await assertMessageProducedAndConsumed(container, { + brokers: [`${container.getHost()}:${container.getMappedPort(9096)}`], sasl: { username: "app-user", password: "userPassword", @@ -105,7 +133,6 @@ describe("KafkaContainer", { timeout: 240_000 }, () => { }, }); }); - // } it(`should connect within Docker network`, async () => { await using network = await new Network().start(); @@ -171,13 +198,13 @@ describe("KafkaContainer", { timeout: 240_000 }, () => { }); }); - // connectKraft { it("should connect using kraft", async () => { - await using kafkaContainer = await new KafkaContainer(IMAGE).withKraft().start(); + // connectKraft { + await using container = await new KafkaContainer(IMAGE).withKraft().start(); + // } - await testPubSub(kafkaContainer); + await assertMessageProducedAndConsumed(container); }); - // } it("should throw an error when using kraft and and confluence platfom below 7.0.0", async () => { expect(() => new KafkaContainer("confluentinc/cp-kafka:6.2.14").withKraft()).toThrow( @@ -187,9 +214,9 @@ describe("KafkaContainer", { timeout: 240_000 }, () => { it("should connect using kraft and custom network", async () => { await using network = await new Network().start(); - await using kafkaContainer = await new KafkaContainer(IMAGE).withKraft().withNetwork(network).start(); + await using container = await new KafkaContainer(IMAGE).withKraft().withNetwork(network).start(); - await testPubSub(kafkaContainer); + await assertMessageProducedAndConsumed(container); }); it("should throw an error when using kraft wit sasl and confluence platfom below 7.5.0", async () => { diff --git a/packages/modules/kafka/src/kafka-container-latest.test.ts b/packages/modules/kafka/src/kafka-container-latest.test.ts index acb6d73aa..3197080ca 100644 --- a/packages/modules/kafka/src/kafka-container-latest.test.ts +++ b/packages/modules/kafka/src/kafka-container-latest.test.ts @@ -3,37 +3,37 @@ import path from "path"; import { GenericContainer, Network } from "testcontainers"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; import { KafkaContainer, SaslSslListenerOptions } from "./kafka-container"; -import { testPubSub } from "./test-helper"; +import { assertMessageProducedAndConsumed } from "./test-helper"; const IMAGE = getImage(__dirname); describe("KafkaContainer", { timeout: 240_000 }, () => { const certificatesDir = path.resolve(__dirname, "..", "test-certs"); - // connectKafkaLatest { it("should connect", async () => { - await using kafkaContainer = await new KafkaContainer(IMAGE).start(); + // kafkaLatestConnect { + await using container = await new KafkaContainer(IMAGE).start(); - await testPubSub(kafkaContainer); + await assertMessageProducedAndConsumed(container); + // } }); - // } it("should connect with custom network", async () => { await using network = await new Network().start(); - await using kafkaContainer = await new KafkaContainer(IMAGE).withNetwork(network).start(); + await using container = await new KafkaContainer(IMAGE).withNetwork(network).start(); - await testPubSub(kafkaContainer); + await assertMessageProducedAndConsumed(container); }); it("should be reusable", async () => { - await using originalKafkaContainer = await new KafkaContainer(IMAGE).withReuse().start(); - const newKafkaContainer = await new KafkaContainer(IMAGE).withReuse().start(); + await using container1 = await new KafkaContainer(IMAGE).withReuse().start(); + const container2 = await new KafkaContainer(IMAGE).withReuse().start(); - expect(newKafkaContainer.getId()).toBe(originalKafkaContainer.getId()); + expect(container2.getId()).toBe(container1.getId()); }); - // ssl { it(`should connect with SASL`, async () => { + // kafkaLatestSsl { const saslConfig: SaslSslListenerOptions = { port: 9096, sasl: { @@ -53,11 +53,10 @@ describe("KafkaContainer", { timeout: 240_000 }, () => { }, }; - const kafkaContainer = new KafkaContainer("confluentinc/cp-kafka:7.5.0").withSaslSslListener(saslConfig); - await using startedKafkaContainer = await kafkaContainer.start(); + await using container = await new KafkaContainer(IMAGE).withSaslSslListener(saslConfig).start(); - await testPubSub(startedKafkaContainer, { - brokers: [`${startedKafkaContainer.getHost()}:${startedKafkaContainer.getMappedPort(9096)}`], + await assertMessageProducedAndConsumed(container, { + brokers: [`${container.getHost()}:${container.getMappedPort(9096)}`], sasl: { username: "app-user", password: "userPassword", @@ -67,8 +66,8 @@ describe("KafkaContainer", { timeout: 240_000 }, () => { ca: [fs.readFileSync(path.resolve(certificatesDir, "kafka.client.truststore.pem"))], }, }); + // } }); - // } it(`should connect with SASL in custom network`, async () => { await using network = await new Network().start(); diff --git a/packages/modules/kafka/src/test-helper.ts b/packages/modules/kafka/src/test-helper.ts index f3232d29e..22111a0c5 100644 --- a/packages/modules/kafka/src/test-helper.ts +++ b/packages/modules/kafka/src/test-helper.ts @@ -1,34 +1,30 @@ import { Kafka, KafkaConfig, logLevel } from "kafkajs"; -import { StartedTestContainer } from "testcontainers"; +import { StartedKafkaContainer } from "./kafka-container"; -export async function testPubSub(kafkaContainer: StartedTestContainer, additionalConfig: Partial = {}) { - const kafka = new Kafka({ - logLevel: logLevel.NOTHING, - brokers: [`${kafkaContainer.getHost()}:${kafkaContainer.getMappedPort(9093)}`], - ...additionalConfig, - }); +// kafkaTestHelper { +export async function assertMessageProducedAndConsumed( + container: StartedKafkaContainer, + additionalConfig: Partial = {} +) { + const brokers = [`${container.getHost()}:${container.getMappedPort(9093)}`]; + const kafka = new Kafka({ logLevel: logLevel.NOTHING, brokers: brokers, ...additionalConfig }); const producer = kafka.producer(); await producer.connect(); - const consumer = kafka.consumer({ groupId: "test-group" }); await consumer.connect(); - await producer.send({ - topic: "test-topic", - messages: [{ value: "test message" }], - }); - + await producer.send({ topic: "test-topic", messages: [{ value: "test message" }] }); await consumer.subscribe({ topic: "test-topic", fromBeginning: true }); - const consumedMessage = await new Promise((resolve) => { + const consumedMessage = await new Promise((resolve) => consumer.run({ eachMessage: async ({ message }) => resolve(message.value?.toString()), - }); - }); - + }) + ); expect(consumedMessage).toBe("test message"); await consumer.disconnect(); await producer.disconnect(); } +// } diff --git a/packages/modules/kurrentdb/src/kurrentdb-container.test.ts b/packages/modules/kurrentdb/src/kurrentdb-container.test.ts index 5c2ed9a13..005db3377 100644 --- a/packages/modules/kurrentdb/src/kurrentdb-container.test.ts +++ b/packages/modules/kurrentdb/src/kurrentdb-container.test.ts @@ -5,8 +5,8 @@ import { KurrentDbContainer } from "./kurrentdb-container"; const IMAGE = getImage(__dirname); describe.sequential("KurrentDbContainer", { timeout: 240_000 }, () => { - // startContainer { it("should execute write and read", async () => { + // startContainer { await using container = await new KurrentDbContainer(IMAGE).start(); const client = KurrentDBClient.connectionString(container.getConnectionString()); @@ -17,23 +17,17 @@ describe.sequential("KurrentDbContainer", { timeout: 240_000 }, () => { data: { email: "john@foo.local" }, type: "UserCreated", id: "28ab6bca-d9ae-418b-a1af-eb65dd653c38", - metadata: { - someMetadata: "bar", - }, + metadata: { someMetadata: "bar" }, }, ]); expect(await consumeSteamingRead(client.readStream("User-1"))).toEqual([ expect.objectContaining({ event: expect.objectContaining({ - data: { - email: "john@foo.local", - }, + data: { email: "john@foo.local" }, id: "28ab6bca-d9ae-418b-a1af-eb65dd653c38", isJson: true, - metadata: { - someMetadata: "bar", - }, + metadata: { someMetadata: "bar" }, revision: 0, streamId: "User-1", type: "UserCreated", @@ -42,11 +36,19 @@ describe.sequential("KurrentDbContainer", { timeout: 240_000 }, () => { ]); await client.dispose(); + + async function consumeSteamingRead(read: AsyncIterableIterator): Promise { + const events = []; + for await (const event of read) { + events.push(event); + } + return events; + } + // } }); - // } - // usingStandardProjections { it("should use built-in projections", async () => { + // usingStandardProjections { await using container = await new KurrentDbContainer(IMAGE).start(); const client = KurrentDBClient.connectionString(container.getConnectionString()); @@ -86,22 +88,12 @@ describe.sequential("KurrentDbContainer", { timeout: 240_000 }, () => { ); await stream.unsubscribe(); await client.dispose(); + + async function getStreamFirstEvent(stream: StreamSubscription): Promise { + for await (const event of stream) { + return event; + } + } + // } }); - // } }); - -async function consumeSteamingRead(read: AsyncIterableIterator): Promise { - const events = []; - - for await (const event of read) { - events.push(event); - } - - return events; -} - -async function getStreamFirstEvent(stream: StreamSubscription): Promise { - for await (const event of stream) { - return event; - } -} diff --git a/packages/modules/localstack/src/localstack-container.test.ts b/packages/modules/localstack/src/localstack-container.test.ts index 21cdaa4b2..27bdc0107 100644 --- a/packages/modules/localstack/src/localstack-container.test.ts +++ b/packages/modules/localstack/src/localstack-container.test.ts @@ -17,8 +17,8 @@ const runAwsCliAgainstDockerNetworkContainer = async ( }; describe("LocalStackContainer", { timeout: 180_000 }, () => { - // createS3Bucket { it("should create a S3 bucket", async () => { + // localstackCreateS3Bucket { await using container = await new LocalstackContainer(IMAGE).start(); const client = new S3Client({ @@ -30,17 +30,14 @@ describe("LocalStackContainer", { timeout: 180_000 }, () => { accessKeyId: "test", }, }); - const input = { - Bucket: "testcontainers", - }; + + const input = { Bucket: "testcontainers" }; const command = new CreateBucketCommand(input); - const createBucketResponse = await client.send(command); - expect(createBucketResponse.$metadata.httpStatusCode).toEqual(200); - const headBucketResponse = await client.send(new HeadBucketCommand(input)); - expect(headBucketResponse.$metadata.httpStatusCode).toEqual(200); + expect((await client.send(command)).$metadata.httpStatusCode).toEqual(200); + expect((await client.send(new HeadBucketCommand(input))).$metadata.httpStatusCode).toEqual(200); + // } }); - // } it("should use custom network", async () => { await using network = await new Network().start(); diff --git a/packages/modules/mariadb/src/mariadb-container.test.ts b/packages/modules/mariadb/src/mariadb-container.test.ts index 49b5124d6..f7dd9fb06 100644 --- a/packages/modules/mariadb/src/mariadb-container.test.ts +++ b/packages/modules/mariadb/src/mariadb-container.test.ts @@ -4,9 +4,9 @@ import { MariaDbContainer } from "./mariadb-container"; const IMAGE = getImage(__dirname); -describe("MariaDb", { timeout: 240_000 }, () => { - // connect { +describe("MariaDbContainer", { timeout: 240_000 }, () => { it("should connect and execute query", async () => { + // mariaDbConnect { await using container = await new MariaDbContainer(IMAGE).start(); const client = await mariadb.createConnection({ @@ -21,11 +21,11 @@ describe("MariaDb", { timeout: 240_000 }, () => { expect(rows).toEqual([{ res: 1 }]); await client.end(); + // } }); - // } - // uriConnect { it("should work with database URI", async () => { + // mariaDbUriConnect { const username = "testUser"; const password = "testPassword"; const database = "testDB"; @@ -36,6 +36,7 @@ describe("MariaDb", { timeout: 240_000 }, () => { .withUserPassword(password) .withDatabase(database) .start(); + expect(container.getConnectionUri()).toEqual( `mariadb://${username}:${password}@${container.getHost()}:${container.getPort()}/${database}` ); @@ -45,14 +46,15 @@ describe("MariaDb", { timeout: 240_000 }, () => { .withRootPassword(password) .withDatabase(database) .start(); + expect(rootContainer.getConnectionUri(true)).toEqual( `mariadb://root:${password}@${rootContainer.getHost()}:${rootContainer.getPort()}/${database}` ); + // } }); - // } - // setDatabase { it("should set database", async () => { + // mariaDbSetDatabase { await using container = await new MariaDbContainer(IMAGE).withDatabase("customDatabase").start(); const client = await mariadb.createConnection({ @@ -67,11 +69,11 @@ describe("MariaDb", { timeout: 240_000 }, () => { expect(rows).toEqual([{ res: "customDatabase" }]); await client.end(); + // } }); - // } - // setUsername { it("should set username", async () => { + // mariaDbSetUsername { await using container = await new MariaDbContainer(IMAGE).withUsername("customUsername").start(); const client = await mariadb.createConnection({ @@ -86,43 +88,8 @@ describe("MariaDb", { timeout: 240_000 }, () => { expect(rows).toEqual([{ res: "customUsername@%" }]); await client.end(); + // } }); - // } - - // insertAndFetchData { - it("should create a table, insert a row, and fetch that row", async () => { - await using container = await new MariaDbContainer(IMAGE).start(); - - const client = await mariadb.createConnection({ - host: container.getHost(), - port: container.getPort(), - database: container.getDatabase(), - user: container.getUsername(), - password: container.getUserPassword(), - }); - - // Create table - await client.query(` - CREATE TABLE users ( - id INT AUTO_INCREMENT PRIMARY KEY, - name VARCHAR(255) NOT NULL, - email VARCHAR(255) NOT NULL UNIQUE - ); - `); - - // Insert a row - const name = "John Doe"; - const email = "john.doe@example.com"; - const insertResult = await client.query("INSERT INTO users (name, email) VALUES (?, ?)", [name, email]); - expect(insertResult.affectedRows).toBe(1); - - // Fetch the row - const [user] = await client.query("SELECT id, name, email FROM users WHERE email = ?", [email]); - expect(user).toEqual({ id: expect.any(Number), name, email }); - - await client.end(); - }); - // } it("should work with restarted container", async () => { await using container = await new MariaDbContainer(IMAGE).start(); diff --git a/packages/modules/minio/src/minio-container.test.ts b/packages/modules/minio/src/minio-container.test.ts index 15650ac7a..767ac40f9 100644 --- a/packages/modules/minio/src/minio-container.test.ts +++ b/packages/modules/minio/src/minio-container.test.ts @@ -4,12 +4,12 @@ import { MinioContainer } from "./minio-container"; const IMAGE = getImage(__dirname); -describe("MinIO", { timeout: 240_000 }, () => { - // connectWithDefaultCredentials { +describe("MinioContainer", { timeout: 240_000 }, () => { it("should connect and upload a file", async () => { + // connectWithDefaultCredentials { await using container = await new MinioContainer(IMAGE).start(); - const minioClient = new minio.Client({ + const client = new minio.Client({ endPoint: container.getHost(), port: container.getPort(), useSSL: false, @@ -17,30 +17,27 @@ describe("MinIO", { timeout: 240_000 }, () => { secretKey: "minioadmin", }); - // Upload dummy test file. const testFile = `${__dirname}/dummy-file.txt`; + await client.makeBucket("test-bucket"); + await client.fPutObject("test-bucket", "minio-test-file.txt", testFile); - await minioClient.makeBucket("test-bucket"); - await minioClient.fPutObject("test-bucket", "minio-test-file.txt", testFile); - - // Verify upload - const objectExists = await minioClient + const objectExists = await client .statObject("test-bucket", "minio-test-file.txt") .then(() => true) .catch(() => false); expect(objectExists).toBeTruthy(); + // } }); - // } - // connectWithCustomCredentials { it("should work with custom credentials", async () => { + // connectWithCustomCredentials { await using container = await new MinioContainer(IMAGE) .withUsername("AzureDiamond") .withPassword("hunter2!") .start(); - const minioClient = new minio.Client({ + const client = new minio.Client({ endPoint: container.getHost(), port: container.getPort(), useSSL: false, @@ -48,13 +45,10 @@ describe("MinIO", { timeout: 240_000 }, () => { secretKey: "hunter2!", }); - // Create a bucket. - await minioClient.makeBucket("test-bucket"); - - // Verify bucket. - const bucketExits = await minioClient.bucketExists("test-bucket"); + await client.makeBucket("test-bucket"); + const bucketExits = await client.bucketExists("test-bucket"); expect(bucketExits).toBeTruthy(); + // } }); - // } }); diff --git a/packages/modules/mockserver/package.json b/packages/modules/mockserver/package.json index 5b908ecdd..119f39cbb 100644 --- a/packages/modules/mockserver/package.json +++ b/packages/modules/mockserver/package.json @@ -3,11 +3,6 @@ "version": "11.4.0", "description": "Mockserver module for Testcontainers", "main": "build/index.js", - "devDependencies": { - "@types/superagent": "^8.1.9", - "mockserver-client": "^5.15.0", - "superagent": "^10.2.2" - }, "scripts": { "prepack": "shx cp ../../../README.md . && shx cp ../../../LICENSE .", "build": "tsc --project tsconfig.build.json" @@ -35,5 +30,10 @@ }, "dependencies": { "testcontainers": "^11.4.0" + }, + "devDependencies": { + "@types/superagent": "^8.1.9", + "mockserver-client": "^5.15.0", + "superagent": "^10.2.2" } } diff --git a/packages/modules/mockserver/src/mockserver-container.test.ts b/packages/modules/mockserver/src/mockserver-container.test.ts index 8d91d6b85..a144639ec 100644 --- a/packages/modules/mockserver/src/mockserver-container.test.ts +++ b/packages/modules/mockserver/src/mockserver-container.test.ts @@ -6,12 +6,11 @@ import { MockserverContainer } from "./mockserver-container"; const IMAGE = getImage(__dirname); describe("MockserverContainer", { timeout: 240_000 }, () => { - // startContainer { it("should start and accept mocks", async () => { + // httpMockServer { await using container = await new MockserverContainer(IMAGE).start(); - const client = mockServerClient(container.getHost(), container.getMockserverPort()); - const url = container.getUrl(); + const client = mockServerClient(container.getHost(), container.getMockserverPort()); await client.mockAnyResponse({ httpRequest: { method: "GET", @@ -25,12 +24,12 @@ describe("MockserverContainer", { timeout: 240_000 }, () => { }, }); - const response = await superagent.get(`${url}/foo`); + const response = await superagent.get(`${container.getUrl()}/foo`); expect(response.statusCode).toBe(200); expect(response.text).toBe("bar"); + // } }); - // } it("should return an https url", async () => { await using container = await new MockserverContainer(IMAGE).start(); @@ -38,11 +37,11 @@ describe("MockserverContainer", { timeout: 240_000 }, () => { expect(secureUrl.startsWith("https://")).to.equal(true, `${secureUrl} does not start with https://`); }); - // httpsRequests { it("should respond to https requests", async () => { + // mockServerHttps { await using container = await new MockserverContainer(IMAGE).start(); - const client = mockServerClient(container.getHost(), container.getMockserverPort()); + const client = mockServerClient(container.getHost(), container.getMockserverPort()); await client.mockAnyResponse({ httpRequest: { method: "GET", @@ -61,6 +60,6 @@ describe("MockserverContainer", { timeout: 240_000 }, () => { expect(response.statusCode).toBe(200); expect(response.text).toBe("bar"); + // } }); - // } }); diff --git a/packages/modules/mongodb/src/mongodb-container.test.ts b/packages/modules/mongodb/src/mongodb-container.test.ts index a74750460..e6c014f40 100644 --- a/packages/modules/mongodb/src/mongodb-container.test.ts +++ b/packages/modules/mongodb/src/mongodb-container.test.ts @@ -7,16 +7,15 @@ const IMAGE = getImage(__dirname); describe("MongoDBContainer", { timeout: 240_000 }, () => { it.each([IMAGE, "mongo:6.0.25", "mongo:4.4.29"])("should work with %s", async (image) => { // connectMongo { - await using mongodbContainer = await new MongoDBContainer(image).start(); + await using container = await new MongoDBContainer(image).start(); - const db = mongoose.createConnection(mongodbContainer.getConnectionString(), { directConnection: true }); - const fooCollection = db.collection("foo"); - const obj = { value: 1 }; + const db = mongoose.createConnection(container.getConnectionString(), { directConnection: true }); - const session = await db.startSession(); - await session.withTransaction(async () => await fooCollection.insertOne(obj)); + const obj = { value: 1 }; + const collection = db.collection("test"); + await collection.insertOne(obj); - const result = await fooCollection.findOne({ value: 1 }); + const result = await collection.findOne({ value: 1 }); expect(result).toEqual(obj); await db.close(); @@ -25,14 +24,15 @@ describe("MongoDBContainer", { timeout: 240_000 }, () => { it("should connect with credentials", async () => { // connectWithCredentials { - await using mongodbContainer = await new MongoDBContainer(IMAGE) - .withUsername("mongo_user") - .withPassword("mongo_password") + await using container = await new MongoDBContainer(IMAGE) + .withUsername("customUsername") + .withPassword("customPassword") .start(); + // } - const db = mongoose.createConnection(mongodbContainer.getConnectionString(), { directConnection: true }); + const db = mongoose.createConnection(container.getConnectionString(), { directConnection: true }); - const result = await db.collection("testcontainers").insertOne({ title: "testcontainers" }); + const result = await db.collection("test").insertOne({ title: "test" }); const resultId = result.insertedId.toString(); expect(resultId).toBeTruthy(); @@ -40,6 +40,5 @@ describe("MongoDBContainer", { timeout: 240_000 }, () => { expect(rsStatus?.set).toBe("rs0"); await db.close(); - // } }); }); diff --git a/packages/modules/mssqlserver/src/mssqlserver-container.test.ts b/packages/modules/mssqlserver/src/mssqlserver-container.test.ts index 9623fc42d..053255450 100644 --- a/packages/modules/mssqlserver/src/mssqlserver-container.test.ts +++ b/packages/modules/mssqlserver/src/mssqlserver-container.test.ts @@ -4,9 +4,9 @@ import { MSSQLServerContainer } from "./mssqlserver-container"; const IMAGE = getImage(__dirname); -describe("MSSqlServerContainer", { timeout: 180_000 }, () => { - // connect { +describe("MSSQLServerContainer", { timeout: 180_000 }, () => { it("should connect and return a query result", async () => { + // mssqlConnect { await using container = await new MSSQLServerContainer(IMAGE).acceptLicense().start(); const sqlConfig: config = { @@ -15,27 +15,20 @@ describe("MSSqlServerContainer", { timeout: 180_000 }, () => { database: container.getDatabase(), server: container.getHost(), port: container.getPort(), - pool: { - max: 1, - min: 0, - idleTimeoutMillis: 30000, - }, - options: { - trustServerCertificate: true, - }, + pool: { max: 1, min: 0, idleTimeoutMillis: 30000 }, + options: { trustServerCertificate: true }, }; - const connection = await sql.connect(sqlConfig); const { recordset } = await connection.query`SELECT 1;`; expect(recordset).toStrictEqual([{ "": 1 }]); await connection.close(); + // } }); - // } - // uriConnect { it("should connect and return a query result with database URI", async () => { + // mssqlUriConnect { await using container = await new MSSQLServerContainer(IMAGE).acceptLicense().start(); const connectionString = container.getConnectionUri(); @@ -45,12 +38,13 @@ describe("MSSqlServerContainer", { timeout: 180_000 }, () => { expect(recordset).toStrictEqual([{ "": 1 }]); await connection.close(); + // } }); - // } - // validPassword { it("should connect and return a query result with valid custom password", async () => { + // mssqlValidPassword { await using container = await new MSSQLServerContainer(IMAGE).acceptLicense().withPassword("I!@M#$eCur3").start(); + // } const connectionString = container.getConnectionUri(); const connection = await sql.connect(connectionString); @@ -60,24 +54,22 @@ describe("MSSqlServerContainer", { timeout: 180_000 }, () => { await connection.close(); }); - // } - // invalidPassword { it("should throw error with invalid password", async () => { const container = new MSSQLServerContainer(IMAGE).acceptLicense().withPassword("password"); await expect(container.start()).rejects.toThrow( Error('Log stream ended and message "/.*Recovery is complete.*/" was not received') ); }); - // } - // expressEdition { it("should start db with express edition", async () => { + // mssqlExpressEdition { await using container = await new MSSQLServerContainer(IMAGE) - .withWaitForMessage(/.*Attribute synchronization manager initialized*/) .acceptLicense() .withEnvironment({ MSSQL_PID: "Express" }) + .withWaitForMessage(/.*Attribute synchronization manager initialized*/) .start(); + // } const { output, exitCode } = await container.exec([ "/opt/mssql-tools/bin/sqlcmd", @@ -94,5 +86,4 @@ describe("MSSqlServerContainer", { timeout: 180_000 }, () => { expect(exitCode).toBe(0); expect(output).toContain("Express Edition"); }); - // } }); diff --git a/packages/modules/mysql/src/mysql-container.test.ts b/packages/modules/mysql/src/mysql-container.test.ts index a28226b67..3b0a4ebea 100644 --- a/packages/modules/mysql/src/mysql-container.test.ts +++ b/packages/modules/mysql/src/mysql-container.test.ts @@ -5,8 +5,8 @@ import { MySqlContainer } from "./mysql-container"; const IMAGE = getImage(__dirname); describe("MySqlContainer", { timeout: 240_000 }, () => { - // connect { it("should connect and execute query", async () => { + // mysqlConnect { await using container = await new MySqlContainer(IMAGE).start(); const client = await createConnection({ @@ -21,38 +21,38 @@ describe("MySqlContainer", { timeout: 240_000 }, () => { expect(rows).toEqual([{ res: 1 }]); await client.end(); + // } }); - // } - // uriConnect { it("should work with database URI", async () => { + // mysqlUriConnect { const username = "testUser"; const password = "testPassword"; const database = "testDB"; - // Test non-root user await using container = await new MySqlContainer(IMAGE) .withUsername(username) .withUserPassword(password) .withDatabase(database) .start(); + expect(container.getConnectionUri()).toEqual( `mysql://${username}:${password}@${container.getHost()}:${container.getPort()}/${database}` ); - // Test root user await using rootContainer = await new MySqlContainer(IMAGE) .withRootPassword(password) .withDatabase(database) .start(); + expect(rootContainer.getConnectionUri(true)).toEqual( `mysql://root:${password}@${rootContainer.getHost()}:${rootContainer.getPort()}/${database}` ); + // } }); - // } - // setDatabase { it("should set database", async () => { + // mysqlSetDatabase { await using container = await new MySqlContainer(IMAGE).withDatabase("customDatabase").start(); const client = await createConnection({ @@ -67,10 +67,10 @@ describe("MySqlContainer", { timeout: 240_000 }, () => { expect(rows).toEqual([{ res: "customDatabase" }]); await client.end(); + // } }); - // } - // setUsername { + // mysqlSetUsername { it("should set username", async () => { await using container = await new MySqlContainer(IMAGE).withUsername("customUsername").start(); @@ -89,12 +89,13 @@ describe("MySqlContainer", { timeout: 240_000 }, () => { }); // } - // executeQuery { it("should execute a query and return the result", async () => { + // mysqlExecuteQuery { await using container = await new MySqlContainer(IMAGE).start(); - const queryResult = await container.executeQuery("SELECT 1 as res"); - expect(queryResult).toEqual(expect.stringContaining("res\n1\n")); + const result = await container.executeQuery("SELECT 1 as res"); + expect(result).toEqual(expect.stringContaining("res\n1\n")); + // } }); it("should execute a query as root user", async () => { @@ -108,7 +109,6 @@ describe("MySqlContainer", { timeout: 240_000 }, () => { const rootQueryResult = await container.executeQuery("SELECT CURRENT_USER() as user", [], true); expect(rootQueryResult).toEqual(expect.stringContaining("user\nroot")); }); - // } it("should work with restarted container", async () => { await using container = await new MySqlContainer(IMAGE).start(); diff --git a/packages/modules/nats/src/nats-container.test.ts b/packages/modules/nats/src/nats-container.test.ts index 9a5427d30..5714572e1 100644 --- a/packages/modules/nats/src/nats-container.test.ts +++ b/packages/modules/nats/src/nats-container.test.ts @@ -6,44 +6,18 @@ import { NatsContainer } from "./nats-container"; const IMAGE = getImage(__dirname); describe("NatsContainer", { timeout: 180_000 }, () => { - // connect { - it("should start, connect and close", async () => { - await using container = await new NatsContainer(IMAGE).start(); - - // establish connection - const nc = await connect(container.getConnectionOptions()); - // close the connection - await nc.close(); - // check if the close was OK - const err = await nc.closed(); - expect(err).toBe(undefined); - }); - // } - - it("should start, connect and close using scratch image", async () => { - await using container = await new NatsContainer("nats:2.11").start(); - - // establish connection - const nc = await connect(container.getConnectionOptions()); - // close the connection - await nc.close(); - // check if the close was OK - const err = await nc.closed(); - expect(err).toBe(undefined); - }); - - // pubsub { it("should subscribe and receive one published message", async () => { + // natsPubsub { const SUBJECT = "HELLO"; const PAYLOAD = "WORLD"; + const TE = new TextEncoder(); + const TD = new TextDecoder(); await using container = await new NatsContainer(IMAGE).start(); const nc = await connect(container.getConnectionOptions()); - const TE = new TextEncoder(); - const TD = new TextDecoder(); - //---------------- const sub = nc.subscribe(SUBJECT); + (async () => { for await (const m of sub) { const actual = TD.decode(m.data); @@ -51,44 +25,34 @@ describe("NatsContainer", { timeout: 180_000 }, () => { } })().then(); - //---------------- nc.publish(SUBJECT, TE.encode(PAYLOAD)); - //---------------- await nc.drain(); await nc.close(); - const err = await nc.closed(); - expect(err).toBe(undefined); + // } }); - // } - // credentials { it("should start with alternative username and password ", async () => { - // set username and password like this - await using container = await new NatsContainer(IMAGE).withPass("1234").withUsername("George").start(); + // natsCredentials { + await using container = await new NatsContainer(IMAGE).withUsername("George").withPass("1234").start(); + // } const nc = await connect(container.getConnectionOptions()); - // close the connection + await nc.close(); - // check if the close was OK const err = await nc.closed(); expect(err).toBe(undefined); }); - // } - // jetstream { it("should start with JetStream ", async () => { - // enable JetStream + // natsJetstream { await using container = await new NatsContainer(IMAGE).withJetStream().start(); + // } const nc = await connect(container.getConnectionOptions()); - - // ensure JetStream is enabled, otherwise this will throw an error await jetstreamManager(nc); - // close the connection await nc.close(); - // check if the close was OK const err = await nc.closed(); expect(err).toBe(undefined); }); diff --git a/packages/modules/neo4j/src/neo4j-container.test.ts b/packages/modules/neo4j/src/neo4j-container.test.ts index 32f31cffa..049f75a6f 100755 --- a/packages/modules/neo4j/src/neo4j-container.test.ts +++ b/packages/modules/neo4j/src/neo4j-container.test.ts @@ -5,15 +5,16 @@ import { Neo4jContainer, Neo4jPlugin } from "./neo4j-container"; const IMAGE = getImage(__dirname); describe("Neo4jContainer", { timeout: 180_000 }, () => { - // createNode { it("should create a person node", async () => { + // createNode { await using container = await new Neo4jContainer(IMAGE).start(); + const driver = neo4j.driver( container.getBoltUri(), neo4j.auth.basic(container.getUsername(), container.getPassword()) ); - const session = driver.session(); + const personName = "Chris"; const result = await session.run("CREATE (a:Person {name: $name}) RETURN a", { name: personName }); const singleRecord = result.records[0]; @@ -22,8 +23,8 @@ describe("Neo4jContainer", { timeout: 180_000 }, () => { await session.close(); await driver.close(); + // } }); - // } // v5DefaultPassword { it("should connect to neo4j:v5 with default password", async () => { @@ -45,13 +46,15 @@ describe("Neo4jContainer", { timeout: 180_000 }, () => { }); // } - // setPassword { it("should connect with custom password", async () => { + // setPassword { await using container = await new Neo4jContainer(IMAGE).withPassword("xyz1234@!").start(); + const driver = neo4j.driver( container.getBoltUri(), neo4j.auth.basic(container.getUsername(), container.getPassword()) ); + // } const session = driver.session(); const personName = "Chris"; @@ -63,32 +66,33 @@ describe("Neo4jContainer", { timeout: 180_000 }, () => { await session.close(); await driver.close(); }); - // } - // apoc { it("should have APOC plugin installed", async () => { + // apoc { await using container = await new Neo4jContainer(IMAGE).withApoc().withStartupTimeout(120_000).start(); + const driver = neo4j.driver( container.getBoltUri(), neo4j.auth.basic(container.getUsername(), container.getPassword()) ); - const session = driver.session(); + const result = await session.run("CALL apoc.help('text')"); const singleRecord = result.records[0]; expect(singleRecord.length).toBeGreaterThan(0); + // } await session.close(); await driver.close(); }); - // } - // pluginsList { it("should work with plugin list", async () => { + // pluginsList { await using container = await new Neo4jContainer("neo4j:5.26.5") .withPlugins([Neo4jPlugin.APOC_EXTENDED, Neo4jPlugin.GRAPH_DATA_SCIENCE]) .withStartupTimeout(120_000) .start(); + const driver = neo4j.driver( container.getBoltUri(), neo4j.auth.basic(container.getUsername(), container.getPassword()) @@ -109,6 +113,6 @@ describe("Neo4jContainer", { timeout: 180_000 }, () => { await session.close(); await driver.close(); + // } }); - // } }); diff --git a/packages/modules/ollama/src/ollama-container.test.ts b/packages/modules/ollama/src/ollama-container.test.ts index 55de98d2c..b06b71ed7 100644 --- a/packages/modules/ollama/src/ollama-container.test.ts +++ b/packages/modules/ollama/src/ollama-container.test.ts @@ -1,4 +1,4 @@ -import { ImageName } from "testcontainers"; +import { ImageName, randomUuid } from "testcontainers"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; import { OllamaContainer } from "./ollama-container"; @@ -16,27 +16,23 @@ describe("OllamaContainer", { timeout: 180_000 }, () => { }); it.skip("download model and commit to image", async () => { + // ollamaPullModel { await using container = await new OllamaContainer(IMAGE).start(); - // pullModel { - const execResult = await container.exec(["ollama", "pull", "all-minilm"]); - // } - console.log(execResult.output); + await container.exec(["ollama", "pull", "all-minilm"]); + const response = await fetch(`${container.getEndpoint()}/api/tags`); expect(response.status).toEqual(200); const body = (await response.json()) as { models: { name: string }[] }; expect(body.models[0].name).toContain("all-minilm"); - const newImageName: string = "tc-ollama-allminilm-" + (Math.random() + 1).toString(36).substring(4).toLowerCase(); - // commitToImage { + const newImageName = "tc-ollama-allminilm-" + randomUuid().substring(4); await container.commitToImage(newImageName); - // } - // substitute { await using newContainer = await new OllamaContainer(newImageName).start(); + const newResponse = await fetch(`${newContainer.getEndpoint()}/api/tags`); + expect(newResponse.status).toEqual(200); + const newBody = (await newResponse.json()) as { models: { name: string }[] }; + expect(newBody.models[0].name).toContain("all-minilm"); // } - const response2 = await fetch(`${newContainer.getEndpoint()}/api/tags`); - expect(response2.status).toEqual(200); - const body2 = (await response2.json()) as { models: { name: string }[] }; - expect(body2.models[0].name).toContain("all-minilm"); }); }); diff --git a/packages/modules/opensearch/src/opensearch-container.test.ts b/packages/modules/opensearch/src/opensearch-container.test.ts index bff69a99d..676d5f33f 100644 --- a/packages/modules/opensearch/src/opensearch-container.test.ts +++ b/packages/modules/opensearch/src/opensearch-container.test.ts @@ -6,9 +6,10 @@ const IMAGE = getImage(__dirname); const images = ["opensearchproject/opensearch:2.19.2", IMAGE]; describe("OpenSearchContainer", { timeout: 180_000 }, () => { - // createIndex { it.each(images)("should create an index with %s", async (image) => { + // opensearchCreateIndex { await using container = await new OpenSearchContainer(image).start(); + const client = new Client({ node: container.getHttpUrl(), auth: { @@ -16,20 +17,21 @@ describe("OpenSearchContainer", { timeout: 180_000 }, () => { password: container.getPassword(), }, ssl: { - // trust the self-signed cert rejectUnauthorized: false, }, }); await client.indices.create({ index: "people" }); - const existsResponse = await client.indices.exists({ index: "people" }); - expect(existsResponse.body).toBe(true); + + const { body } = await client.indices.exists({ index: "people" }); + expect(body).toBe(true); + // } }); - // } - // indexDocument { it("should index a document", async () => { + // opensearchIndexDocument { await using container = await new OpenSearchContainer(IMAGE).start(); + const client = new Client({ node: container.getHttpUrl(), auth: { @@ -49,10 +51,10 @@ describe("OpenSearchContainer", { timeout: 180_000 }, () => { body: document, }); - const getResponse = await client.get({ index: "people", id: document.id }); - expect(getResponse.body._source).toStrictEqual(document); + const { body } = await client.get({ index: "people", id: document.id }); + expect(body._source).toEqual(document); + // } }); - // } it("should work with restarted container", async () => { await using container = await new OpenSearchContainer(IMAGE).start(); @@ -70,6 +72,7 @@ describe("OpenSearchContainer", { timeout: 180_000 }, () => { }); await client.indices.create({ index: "people" }); + const existsResponse = await client.indices.exists({ index: "people" }); expect(existsResponse.body).toBe(true); }); @@ -78,9 +81,10 @@ describe("OpenSearchContainer", { timeout: 180_000 }, () => { expect(() => new OpenSearchContainer(IMAGE).withPassword("weakpwd")).toThrowError(/Password "weakpwd" is too weak/); }); - // customPassword { it("should set custom password", async () => { + // opensearchCustomPassword { await using container = await new OpenSearchContainer(IMAGE).withPassword("Str0ng!Passw0rd2025").start(); + // } const client = new Client({ node: container.getHttpUrl(), @@ -94,8 +98,8 @@ describe("OpenSearchContainer", { timeout: 180_000 }, () => { }); await client.indices.create({ index: "people" }); - const existsResponse = await client.indices.exists({ index: "people" }); - expect(existsResponse.body).toBe(true); + + const { body } = await client.indices.exists({ index: "people" }); + expect(body).toBe(true); }); - // } }); diff --git a/packages/modules/postgresql/src/postgresql-container-snapshot.test.ts b/packages/modules/postgresql/src/postgresql-container-snapshot.test.ts index 135631340..e85838374 100644 --- a/packages/modules/postgresql/src/postgresql-container-snapshot.test.ts +++ b/packages/modules/postgresql/src/postgresql-container-snapshot.test.ts @@ -5,8 +5,8 @@ import { PostgreSqlContainer } from "./postgresql-container"; const IMAGE = getImage(__dirname); describe("PostgreSqlContainer snapshot and restore", { timeout: 180_000 }, () => { - // createAndRestoreFromSnapshot { it("should create and restore from snapshot", async () => { + // createAndRestoreFromSnapshot { await using container = await new PostgreSqlContainer(IMAGE).start(); // Connect to the database @@ -58,8 +58,8 @@ describe("PostgreSqlContainer snapshot and restore", { timeout: 180_000 }, () => expect(result.rows[0].name).toEqual("initial data"); await client.end(); + // } }); - // } it("should use custom snapshot name", async () => { await using container = await new PostgreSqlContainer(IMAGE).start(); diff --git a/packages/modules/postgresql/src/postgresql-container.test.ts b/packages/modules/postgresql/src/postgresql-container.test.ts index 303d3cd7e..04a43db7c 100644 --- a/packages/modules/postgresql/src/postgresql-container.test.ts +++ b/packages/modules/postgresql/src/postgresql-container.test.ts @@ -5,8 +5,8 @@ import { PostgreSqlContainer } from "./postgresql-container"; const IMAGE = getImage(__dirname); describe("PostgreSqlContainer", { timeout: 180_000 }, () => { - // connect { it("should connect and return a query result", async () => { + // pgConnect { await using container = await new PostgreSqlContainer(IMAGE).start(); const client = new Client({ @@ -22,16 +22,17 @@ describe("PostgreSqlContainer", { timeout: 180_000 }, () => { expect(result.rows[0]).toEqual({ "?column?": 1 }); await client.end(); + // } }); - // } - // uriConnect { it("should work with database URI", async () => { + // pgUriConnect { await using container = await new PostgreSqlContainer(IMAGE).start(); const client = new Client({ connectionString: container.getConnectionUri(), }); + // } await client.connect(); const result = await client.query("SELECT 1"); @@ -39,11 +40,11 @@ describe("PostgreSqlContainer", { timeout: 180_000 }, () => { await client.end(); }); - // } - // setDatabase { it("should set database", async () => { + // pgSetDatabase { await using container = await new PostgreSqlContainer(IMAGE).withDatabase("customDatabase").start(); + // } const client = new Client({ host: container.getHost(), @@ -59,11 +60,11 @@ describe("PostgreSqlContainer", { timeout: 180_000 }, () => { await client.end(); }); - // } - // setUsername { it("should set username", async () => { + // pgSetUsername { await using container = await new PostgreSqlContainer(IMAGE).withUsername("customUsername").start(); + // } const client = new Client({ host: container.getHost(), @@ -79,7 +80,6 @@ describe("PostgreSqlContainer", { timeout: 180_000 }, () => { await client.end(); }); - // } it("should work with restarted container", async () => { await using container = await new PostgreSqlContainer(IMAGE).start(); diff --git a/packages/modules/qdrant/src/qdrant-container.test.ts b/packages/modules/qdrant/src/qdrant-container.test.ts index 323d8429a..f7c189bb3 100644 --- a/packages/modules/qdrant/src/qdrant-container.test.ts +++ b/packages/modules/qdrant/src/qdrant-container.test.ts @@ -7,27 +7,27 @@ import { QdrantContainer } from "./qdrant-container"; const IMAGE = getImage(__dirname); describe("QdrantContainer", { timeout: 100_000 }, () => { - // connectQdrantSimple { it("should connect to the client", async () => { + // connectQdrantSimple { await using container = await new QdrantContainer(IMAGE).start(); const client = new QdrantClient({ url: `http://${container.getRestHostAddress()}` }); expect((await client.getCollections()).collections.length).toBe(0); + // } }); - // } - // connectQdrantWithApiKey { it("should work with valid API keys", async () => { + // connectQdrantWithApiKey { const apiKey = crypto.randomUUID(); await using container = await new QdrantContainer(IMAGE).withApiKey(apiKey).start(); const client = new QdrantClient({ url: `http://${container.getRestHostAddress()}`, apiKey }); + // } expect((await client.getCollections()).collections.length).toBe(0); }); - // } it("should fail for invalid API keys", async () => { const apiKey = crypto.randomUUID(); @@ -42,17 +42,17 @@ describe("QdrantContainer", { timeout: 100_000 }, () => { await expect(client.getCollections()).rejects.toThrow("Unauthorized"); }); - // connectQdrantWithConfig { it("should work with config files - valid API key", async () => { + // connectQdrantWithConfig { await using container = await new QdrantContainer(IMAGE) .withConfigFile(path.resolve(__dirname, "test_config.yaml")) .start(); const client = new QdrantClient({ url: `http://${container.getRestHostAddress()}`, apiKey: "SOME_TEST_KEY" }); + // } expect((await client.getCollections()).collections.length).toBe(0); }); - // } it("should work with config files - invalid API key", async () => { await using container = await new QdrantContainer(IMAGE) diff --git a/packages/modules/rabbitmq/src/rabbitmq-container.test.ts b/packages/modules/rabbitmq/src/rabbitmq-container.test.ts index 6a2c649c5..49f8562de 100644 --- a/packages/modules/rabbitmq/src/rabbitmq-container.test.ts +++ b/packages/modules/rabbitmq/src/rabbitmq-container.test.ts @@ -7,19 +7,19 @@ const IMAGE = getImage(__dirname); describe("RabbitMQContainer", { timeout: 240_000 }, () => { // start { it("should start, connect and close", async () => { - await using rabbitMQContainer = await new RabbitMQContainer(IMAGE).start(); + await using container = await new RabbitMQContainer(IMAGE).start(); - const connection = await amqp.connect(rabbitMQContainer.getAmqpUrl()); + const connection = await amqp.connect(container.getAmqpUrl()); await connection.close(); }); // } - // credentials { it("different username and password", async () => { + // credentials { const USER = "user"; const PASSWORD = "password"; - await using rabbitMQContainer = await new RabbitMQContainer(IMAGE) + await using container = await new RabbitMQContainer(IMAGE) .withEnvironment({ RABBITMQ_DEFAULT_USER: USER, RABBITMQ_DEFAULT_PASS: PASSWORD, @@ -29,21 +29,21 @@ describe("RabbitMQContainer", { timeout: 240_000 }, () => { const connection = await amqp.connect({ username: USER, password: PASSWORD, - port: rabbitMQContainer.getMappedPort(5672), + port: container.getMappedPort(5672), }); + // } await connection.close(); }); - // } - // pubsub { it("test publish and subscribe", async () => { + // pubsub { const QUEUE = "test"; const PAYLOAD = "Hello World"; - await using rabbitMQContainer = await new RabbitMQContainer(IMAGE).start(); - const connection = await amqp.connect(rabbitMQContainer.getAmqpUrl()); + await using container = await new RabbitMQContainer(IMAGE).start(); + const connection = await amqp.connect(container.getAmqpUrl()); const channel = await connection.createChannel(); await channel.assertQueue(QUEUE); @@ -58,6 +58,6 @@ describe("RabbitMQContainer", { timeout: 240_000 }, () => { await channel.close(); await connection.close(); + // } }, 20_000); - // } }); diff --git a/packages/modules/redis/src/redis-container.test.ts b/packages/modules/redis/src/redis-container.test.ts index 99e803d63..42c33f491 100644 --- a/packages/modules/redis/src/redis-container.test.ts +++ b/packages/modules/redis/src/redis-container.test.ts @@ -1,30 +1,31 @@ import fs from "fs"; -import os from "os"; import path from "path"; import { createClient } from "redis"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; -import { RedisContainer, StartedRedisContainer } from "./redis-container"; +import { RedisContainer } from "./redis-container"; const IMAGE = getImage(__dirname); describe("RedisContainer", { timeout: 240_000 }, () => { - // startContainer { it("should connect and execute set-get", async () => { + // redisStartContainer { await using container = await new RedisContainer(IMAGE).start(); - const client = await connectTo(container); + const client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); await client.set("key", "val"); expect(await client.get("key")).toBe("val"); client.destroy(); + // } }); - // } it("should connect with password and execute set-get", async () => { await using container = await new RedisContainer(IMAGE).withPassword("test").start(); - const client = await connectTo(container); + const client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); await client.set("key", "val"); expect(await client.get("key")).toBe("val"); @@ -32,35 +33,38 @@ describe("RedisContainer", { timeout: 240_000 }, () => { client.destroy(); }); - // persistentData { it("should reconnect with volume and persistence data", async () => { - const sourcePath = fs.mkdtempSync(path.join(os.tmpdir(), "redis-")); + // persistentData { + const sourcePath = fs.mkdtempSync("redis-"); + await using container = await new RedisContainer(IMAGE).withPassword("test").withPersistence(sourcePath).start(); - let client = await connectTo(container); + let client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); await client.set("key", "val"); client.destroy(); + await container.restart(); - client = await connectTo(container); + client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); + expect(await client.get("key")).toBe("val"); client.destroy(); - try { - fs.rmSync(sourcePath, { force: true, recursive: true }); - } catch (e) { - //Ignore clean up, when have no access on fs. - console.log(e); - } + fs.rmSync(sourcePath, { force: true, recursive: true }); + // } }); - // } - // initial data import { it("should load initial data and can read it", async () => { + // withPredefinedData { await using container = await new RedisContainer(IMAGE) .withPassword("test") .withInitialData(path.join(__dirname, "initData.redis")) .start(); - const client = await connectTo(container); + + const client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); + const user = { first_name: "David", last_name: "Bloom", @@ -69,58 +73,51 @@ describe("RedisContainer", { timeout: 240_000 }, () => { expect(await client.get("user:002")).toBe(JSON.stringify(user)); client.destroy(); + // } }); - // } - // startWithCredentials { it("should start with credentials and login", async () => { + // redisStartWithCredentials { const password = "testPassword"; - // Test authentication await using container = await new RedisContainer(IMAGE).withPassword(password).start(); + expect(container.getConnectionUrl()).toEqual(`redis://:${password}@${container.getHost()}:${container.getPort()}`); + // } - const client = await connectTo(container); + const client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); await client.set("key", "val"); expect(await client.get("key")).toBe("val"); client.destroy(); }); - // } - // executeCommand { it("should execute container cmd and return the result", async () => { + // executeCommand { await using container = await new RedisContainer(IMAGE).start(); const queryResult = await container.executeCliCmd("info", ["clients"]); + expect(queryResult).toEqual(expect.stringContaining("connected_clients:1")); + // } }); - // } - // startWithRedisStack { it("should start with redis-stack-server and json module", async () => { + // startWithRedisStack { await using container = await new RedisContainer("redis/redis-stack-server:7.4.0-v4") .withPassword("testPassword") .start(); - const client = await connectTo(container); + + const client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); await client.json.set("key", "$", { name: "test" }); const result = await client.json.get("key"); expect(result).toEqual({ name: "test" }); client.destroy(); + // } }); - // } - - // simpleConnect { - async function connectTo(container: StartedRedisContainer) { - const client = createClient({ - url: container.getConnectionUrl(), - }); - await client.connect(); - expect(client.isOpen).toBeTruthy(); - return client; - } - // } }); diff --git a/packages/modules/redpanda/src/redpanda-container.test.ts b/packages/modules/redpanda/src/redpanda-container.test.ts index 3e39b6f8d..824ba8321 100644 --- a/packages/modules/redpanda/src/redpanda-container.test.ts +++ b/packages/modules/redpanda/src/redpanda-container.test.ts @@ -1,21 +1,22 @@ -import { Kafka, KafkaConfig, logLevel } from "kafkajs"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; -import { RedpandaContainer, StartedRedpandaContainer } from "./redpanda-container"; +import { RedpandaContainer } from "./redpanda-container"; +import { assertMessageProducedAndConsumed } from "./test-helper"; const IMAGE = getImage(__dirname); describe("RedpandaContainer", { timeout: 240_000 }, () => { - // connectToKafka { it("should connect", async () => { - await using redpandaContainer = await new RedpandaContainer(IMAGE).start(); - await testPubSub(redpandaContainer); + // connectToKafka { + await using container = await new RedpandaContainer(IMAGE).start(); + + await assertMessageProducedAndConsumed(container); + // } }); - // } - // connectToSchemaRegistry { it("should connect to schema registry", async () => { - await using redpandaContainer = await new RedpandaContainer(IMAGE).start(); - const schemaRegistryUrl = redpandaContainer.getSchemaRegistryAddress(); + // connectToSchemaRegistry { + await using container = await new RedpandaContainer(IMAGE).start(); + const schemaRegistryUrl = container.getSchemaRegistryAddress(); const response = await fetch(`${schemaRegistryUrl}/subjects`, { method: "GET", @@ -25,63 +26,28 @@ describe("RedpandaContainer", { timeout: 240_000 }, () => { }); expect(response.status).toBe(200); + // } }); - // } - // connectToAdmin { it("should connect to admin", async () => { - await using redpandaContainer = await new RedpandaContainer(IMAGE).start(); - const adminUrl = `${redpandaContainer.getAdminAddress()}/v1`; + // connectToAdmin { + await using container = await new RedpandaContainer(IMAGE).start(); + const adminUrl = `${container.getAdminAddress()}/v1`; const response = await fetch(adminUrl); expect(response.status).toBe(200); + // } }); - // } - // connectToRestProxy { it("should connect to rest proxy", async () => { - await using redpandaContainer = await new RedpandaContainer(IMAGE).start(); - const restProxyUrl = `${redpandaContainer.getRestProxyAddress()}/topics`; + // connectToRestProxy { + await using container = await new RedpandaContainer(IMAGE).start(); + const restProxyUrl = `${container.getRestProxyAddress()}/topics`; const response = await fetch(restProxyUrl); expect(response.status).toBe(200); + // } }); - // } - - const testPubSub = async ( - redpandaContainer: StartedRedpandaContainer, - additionalConfig: Partial = {} - ) => { - const kafka = new Kafka({ - logLevel: logLevel.NOTHING, - brokers: [redpandaContainer.getBootstrapServers()], - ...additionalConfig, - }); - - const producer = kafka.producer(); - await producer.connect(); - - const consumer = kafka.consumer({ groupId: "test-group" }); - await consumer.connect(); - - await producer.send({ - topic: "test-topic", - messages: [{ value: "test message" }], - }); - - await consumer.subscribe({ topic: "test-topic", fromBeginning: true }); - - const consumedMessage = await new Promise((resolve) => { - consumer.run({ - eachMessage: async ({ message }) => resolve(message.value?.toString()), - }); - }); - - expect(consumedMessage).toBe("test message"); - - await consumer.disconnect(); - await producer.disconnect(); - }; }); diff --git a/packages/modules/redpanda/src/test-helper.ts b/packages/modules/redpanda/src/test-helper.ts new file mode 100644 index 000000000..d156a7a36 --- /dev/null +++ b/packages/modules/redpanda/src/test-helper.ts @@ -0,0 +1,26 @@ +import { Kafka, logLevel } from "kafkajs"; +import { StartedRedpandaContainer } from "./redpanda-container"; + +// redpandaTestHelper { +export async function assertMessageProducedAndConsumed(container: StartedRedpandaContainer) { + const kafka = new Kafka({ logLevel: logLevel.NOTHING, brokers: [container.getBootstrapServers()] }); + + const producer = kafka.producer(); + await producer.connect(); + const consumer = kafka.consumer({ groupId: "test-group" }); + await consumer.connect(); + + await producer.send({ topic: "test-topic", messages: [{ value: "test message" }] }); + await consumer.subscribe({ topic: "test-topic", fromBeginning: true }); + + const consumedMessage = await new Promise((resolve) => + consumer.run({ + eachMessage: async ({ message }) => resolve(message.value?.toString()), + }) + ); + expect(consumedMessage).toBe("test message"); + + await consumer.disconnect(); + await producer.disconnect(); +} +// } diff --git a/packages/modules/redpanda/tsconfig.build.json b/packages/modules/redpanda/tsconfig.build.json index ff7390b10..ee782d3d0 100644 --- a/packages/modules/redpanda/tsconfig.build.json +++ b/packages/modules/redpanda/tsconfig.build.json @@ -2,7 +2,8 @@ "extends": "./tsconfig.json", "exclude": [ "build", - "src/**/*.test.ts" + "src/**/*.test.ts", + "src/test-helper.ts" ], "references": [ { diff --git a/packages/modules/scylladb/src/scylladb-container.test.ts b/packages/modules/scylladb/src/scylladb-container.test.ts index 1326ea35a..6e33942a2 100644 --- a/packages/modules/scylladb/src/scylladb-container.test.ts +++ b/packages/modules/scylladb/src/scylladb-container.test.ts @@ -5,8 +5,8 @@ import { ScyllaContainer } from "./scylladb-container"; const IMAGE = getImage(__dirname); describe("ScyllaDB", { timeout: 240_000 }, () => { - // connectWithDefaultCredentials { it("should connect and execute a query", async () => { + // connectWithDefaultCredentials { await using container = await new ScyllaContainer(IMAGE).start(); const client = new Client({ @@ -21,8 +21,8 @@ describe("ScyllaDB", { timeout: 240_000 }, () => { expect(result.rows[0].cql_version).toBe("3.3.1"); await client.shutdown(); + // } }); - // } // createAndFetchData { it("should create keyspace, a table, insert data, and retrieve it", async () => { diff --git a/packages/modules/selenium/Dockerfile b/packages/modules/selenium/Dockerfile index 33312bb51..fb4128a43 100644 --- a/packages/modules/selenium/Dockerfile +++ b/packages/modules/selenium/Dockerfile @@ -1 +1,4 @@ -FROM selenium/standalone-chrome:138.0 +FROM selenium/standalone-chrome:112.0 +FROM seleniarm/standalone-chromium:112.0 +FROM selenium/standalone-firefox:112.0 +FROM seleniarm/standalone-firefox:112.0 \ No newline at end of file diff --git a/packages/modules/selenium/src/selenium-container.test.ts b/packages/modules/selenium/src/selenium-container.test.ts index 961752ff3..d0bd29d5d 100644 --- a/packages/modules/selenium/src/selenium-container.test.ts +++ b/packages/modules/selenium/src/selenium-container.test.ts @@ -2,42 +2,47 @@ import path from "path"; import { Browser, Builder } from "selenium-webdriver"; import { GenericContainer } from "testcontainers"; import tmp from "tmp"; +import { getImage } from "../../../testcontainers/src/utils/test-helper"; import { SELENIUM_VIDEO_IMAGE, SeleniumContainer } from "./selenium-container"; -describe("SeleniumContainer", { timeout: 180_000 }, () => { - const browsers = [ - ["CHROME", process.arch === "arm64" ? `seleniarm/standalone-chromium:112.0` : `selenium/standalone-chrome:112.0`], - ["FIREFOX", process.arch === "arm64" ? `seleniarm/standalone-firefox:112.0` : `selenium/standalone-firefox:112.0`], - ] as const; - - browsers.forEach(async ([browser, image]) => { - it(`should work for ${browser}`, async () => { - await using container = await new SeleniumContainer(image).start(); - const driver = await new Builder().forBrowser(Browser[browser]).usingServer(container.getServerUrl()).build(); - - await driver.get("https://testcontainers.com"); - expect(await driver.getTitle()).toEqual("Testcontainers"); - - await driver.quit(); - }); - - it(`should record video and save to disk for ${browser}`, async () => { - const container = await new SeleniumContainer(image).withRecording().start(); - const driver = await new Builder().forBrowser(Browser[browser]).usingServer(container.getServerUrl()).build(); - await driver.get("https://testcontainers.com"); - await driver.quit(); - const stoppedContainer = await container.stop(); - - const videoFilePath = tmp.fileSync({ keep: false, prefix: `video-${browser}`, postfix: ".mp4" }).name; - const videoFileName = path.basename(videoFilePath); - await stoppedContainer.saveRecording(videoFilePath); - - await using ffmpegContainer = await new GenericContainer(SELENIUM_VIDEO_IMAGE) - .withCommand(["sleep", "infinity"]) - .start(); - await ffmpegContainer.copyFilesToContainer([{ source: videoFilePath, target: `/tmp/${videoFileName}` }]); - const { exitCode } = await ffmpegContainer.exec(["ffprobe", `/tmp/${videoFileName}`]); - expect(exitCode).toBe(0); - }); +const browsers = [ + ["CHROME", process.arch === "arm64" ? getImage(__dirname, 1) : getImage(__dirname, 0)], + ["FIREFOX", process.arch === "arm64" ? getImage(__dirname, 3) : getImage(__dirname, 2)], +] as const; + +describe.for(browsers)("SeleniumContainer", { timeout: 240_000 }, ([browser, image]) => { + it(`should work for ${browser}`, async () => { + // seleniumExample { + await using container = await new SeleniumContainer(image).start(); + + const driver = await new Builder().forBrowser(Browser[browser]).usingServer(container.getServerUrl()).build(); + await driver.get("https://testcontainers.com"); + expect(await driver.getTitle()).toEqual("Testcontainers"); + + await driver.quit(); + // } + }); + + it(`should record video and save to disk for ${browser}`, async () => { + // seleniumVideoExample { + const container = await new SeleniumContainer(image).withRecording().start(); + + const driver = await new Builder().forBrowser(Browser[browser]).usingServer(container.getServerUrl()).build(); + await driver.get("https://testcontainers.com"); + + await driver.quit(); + const stoppedContainer = await container.stop(); + + const videoFilePath = tmp.fileSync({ keep: false, prefix: `video-${browser}`, postfix: ".mp4" }).name; + const videoFileName = path.basename(videoFilePath); + await stoppedContainer.saveRecording(videoFilePath); + + await using ffmpegContainer = await new GenericContainer(SELENIUM_VIDEO_IMAGE) + .withCommand(["sleep", "infinity"]) + .start(); + await ffmpegContainer.copyFilesToContainer([{ source: videoFilePath, target: `/tmp/${videoFileName}` }]); + const { exitCode } = await ffmpegContainer.exec(["ffprobe", `/tmp/${videoFileName}`]); + expect(exitCode).toBe(0); + // } }); }); diff --git a/packages/modules/toxiproxy/src/toxiproxy-container.test.ts b/packages/modules/toxiproxy/src/toxiproxy-container.test.ts index e25930d37..84b8835e6 100644 --- a/packages/modules/toxiproxy/src/toxiproxy-container.test.ts +++ b/packages/modules/toxiproxy/src/toxiproxy-container.test.ts @@ -5,8 +5,8 @@ import { ToxiProxyContainer, TPClient } from "./toxiproxy-container"; const IMAGE = getImage(__dirname); describe("ToxiProxyContainer", { timeout: 240_000 }, () => { - // create_proxy { it("should create a proxy to an endpoint", async () => { + // create_proxy { await using network = await new Network().start(); await using _ = await new GenericContainer("cristianrgreco/testcontainer:1.1.14") .withExposedPorts(8080) @@ -23,11 +23,11 @@ describe("ToxiProxyContainer", { timeout: 240_000 }, () => { const response = await fetch(`http://${appProxy.host}:${appProxy.port}/hello-world`); expect(response.status).toBe(200); + // } }); - // } - // enabled_disabled { it("should enable and disable a proxy", async () => { + // enabled_disabled { await using network = await new Network().start(); await using _ = await new GenericContainer("cristianrgreco/testcontainer:1.1.14") .withExposedPorts(8080) @@ -48,11 +48,11 @@ describe("ToxiProxyContainer", { timeout: 240_000 }, () => { await appProxy.setEnabled(true); const response = await fetch(`http://${appProxy.host}:${appProxy.port}/hello-world`); expect(response.status).toBe(200); + // } }); - // } - // adding_toxic { it("should add a toxic to a proxy and then remove", async () => { + // adding_toxic { await using network = await new Network().start(); await using _ = await new GenericContainer("cristianrgreco/testcontainer:1.1.14") .withExposedPorts(8080) @@ -67,7 +67,6 @@ describe("ToxiProxyContainer", { timeout: 240_000 }, () => { upstream: "app:8080", }); - // See https://github.com/ihsw/toxiproxy-node-client for details on the instance interface const toxic = await appProxy.instance.addToxic({ attributes: { jitter: 50, @@ -85,8 +84,8 @@ describe("ToxiProxyContainer", { timeout: 240_000 }, () => { expect(after - before).toBeGreaterThan(1000); await toxic.remove(); + // } }); - // } it("should create multiple proxies", async () => { await using network = await new Network().start(); diff --git a/packages/modules/valkey/src/valkey-container.test.ts b/packages/modules/valkey/src/valkey-container.test.ts index fa68dae9e..c0edcfecb 100644 --- a/packages/modules/valkey/src/valkey-container.test.ts +++ b/packages/modules/valkey/src/valkey-container.test.ts @@ -1,61 +1,71 @@ import fs from "fs"; -import os from "os"; import path from "path"; import { createClient } from "redis"; import { getImage } from "../../../testcontainers/src/utils/test-helper"; -import { StartedValkeyContainer, ValkeyContainer } from "./valkey-container"; +import { ValkeyContainer } from "./valkey-container"; const IMAGE = getImage(__dirname); describe("ValkeyContainer", { timeout: 240_000 }, () => { it("should connect and execute set-get", async () => { + // valkeyStartContainer { await using container = await new ValkeyContainer(IMAGE).start(); - const client = await connectTo(container); + const client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); await client.set("key", "val"); expect(await client.get("key")).toBe("val"); - await client.disconnect(); + client.destroy(); + // } }); it("should connect with password and execute set-get", async () => { await using container = await new ValkeyContainer(IMAGE).withPassword("test").start(); - const client = await connectTo(container); + const client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); await client.set("key", "val"); expect(await client.get("key")).toBe("val"); - await client.disconnect(); + client.destroy(); }); it("should reconnect with volume and persistence data", async () => { - const sourcePath = fs.mkdtempSync(path.join(os.tmpdir(), "valkey-")); + // valkeyWithPersistentData { + const sourcePath = fs.mkdtempSync("valkey-"); + await using container = await new ValkeyContainer(IMAGE).withPassword("test").withPersistence(sourcePath).start(); - let client = await connectTo(container); + + let client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); await client.set("key", "val"); - await client.disconnect(); + client.destroy(); + await container.restart(); - client = await connectTo(container); + client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); + expect(await client.get("key")).toBe("val"); - await client.disconnect(); - try { - fs.rmSync(sourcePath, { force: true, recursive: true }); - } catch (e) { - //Ignore clean up, when have no access on fs. - console.log(e); - } + client.destroy(); + fs.rmSync(sourcePath, { force: true, recursive: true }); + // } }); it("should load initial data and can read it", async () => { + // valkeyWithPredefinedData { await using container = await new ValkeyContainer(IMAGE) .withPassword("test") .withInitialData(path.join(__dirname, "initData.valkey")) .start(); - const client = await connectTo(container); + + const client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); + const user = { first_name: "David", last_name: "Bloom", @@ -63,36 +73,35 @@ describe("ValkeyContainer", { timeout: 240_000 }, () => { }; expect(await client.get("user:002")).toBe(JSON.stringify(user)); - await client.disconnect(); + client.destroy(); + // } }); it("should start with credentials and login", async () => { + // valkeyWithCredentials { const password = "testPassword"; await using container = await new ValkeyContainer(IMAGE).withPassword(password).start(); + expect(container.getConnectionUrl()).toEqual(`redis://:${password}@${container.getHost()}:${container.getPort()}`); + // } - const client = await connectTo(container); + const client = createClient({ url: container.getConnectionUrl() }); + await client.connect(); await client.set("key", "val"); expect(await client.get("key")).toBe("val"); - await client.disconnect(); + client.destroy(); }); it("should execute container cmd and return the result", async () => { + // valkeyExecuteCommand { await using container = await new ValkeyContainer(IMAGE).start(); const queryResult = await container.executeCliCmd("info", ["clients"]); + expect(queryResult).toEqual(expect.stringContaining("connected_clients:1")); + // } }); - - async function connectTo(container: StartedValkeyContainer) { - const client = createClient({ - url: container.getConnectionUrl(), - }); - await client.connect(); - expect(client.isOpen).toBeTruthy(); - return client; - } }); diff --git a/packages/modules/vault/src/vault-container.test.ts b/packages/modules/vault/src/vault-container.test.ts index 2a1afdcac..3bf4b6b91 100644 --- a/packages/modules/vault/src/vault-container.test.ts +++ b/packages/modules/vault/src/vault-container.test.ts @@ -6,8 +6,8 @@ const VAULT_TOKEN = "my-root-token"; const IMAGE = getImage(__dirname); describe("VaultContainer", { timeout: 180_000 }, () => { - // inside_block:readWrite { it("should start Vault and allow reading/writing secrets", async () => { + // inside_block:readWrite { await using container = await new VaultContainer(IMAGE).withVaultToken(VAULT_TOKEN).start(); const client = vault({ @@ -28,11 +28,11 @@ describe("VaultContainer", { timeout: 180_000 }, () => { expect(data.message).toBe("world"); expect(data.other).toBe("vault"); + // } }); - // } - // inside_block:initCommands { it("should execute init commands using vault CLI", async () => { + // inside_block:initCommands { await using container = await new VaultContainer(IMAGE) .withVaultToken(VAULT_TOKEN) .withInitCommands("secrets enable transit", "write -f transit/keys/my-key") @@ -42,6 +42,6 @@ describe("VaultContainer", { timeout: 180_000 }, () => { expect(result.exitCode).toBe(0); expect(result.output).toContain("my-key"); + // } }); - // } }); diff --git a/packages/modules/weaviate/src/weaviate-container.test.ts b/packages/modules/weaviate/src/weaviate-container.test.ts index 96f87d9d5..0b6cc5266 100644 --- a/packages/modules/weaviate/src/weaviate-container.test.ts +++ b/packages/modules/weaviate/src/weaviate-container.test.ts @@ -6,17 +6,17 @@ import { WeaviateContainer } from "./weaviate-container"; const IMAGE = getImage(__dirname); describe("WeaviateContainer", { timeout: 100_000 }, () => { - // connectWeaviate { it("should expose ports", async () => { + // connectWeaviate { await using container = await new WeaviateContainer(IMAGE).start(); expect(container.getHttpHostAddress()).toBeDefined(); expect(container.getGrpcHostAddress()).toBeDefined(); + // } }); - // } - // connectWeaviateWithClient { it("should connect to Weaviate", async () => { + // connectWeaviateWithClient { await using container = await new WeaviateContainer(IMAGE).start(); const client = weaviate.client({ @@ -26,11 +26,11 @@ describe("WeaviateContainer", { timeout: 100_000 }, () => { const res = await client.misc.metaGetter().do(); expect(res.version).toBeDefined(); + // } }); - // } - // connectWeaviateWithModules { it("should connect to Weaviate with modules", async () => { + // connectWeaviateWithModules { const enableModules = [ "backup-filesystem", "text2vec-openai", @@ -42,6 +42,7 @@ describe("WeaviateContainer", { timeout: 100_000 }, () => { ENABLE_MODULES: enableModules.join(","), BACKUP_FILESYSTEM_PATH: "/tmp/backups", }; + await using container = await new WeaviateContainer(IMAGE).withEnvironment(environment).start(); const client = weaviate.client({ @@ -52,9 +53,7 @@ describe("WeaviateContainer", { timeout: 100_000 }, () => { const res = await client.misc.metaGetter().do(); expect(res.version).toBeDefined(); expect(res.modules).toBeDefined(); - enableModules.forEach((module) => { - expect(res.modules[module]).toBeDefined(); - }); + enableModules.forEach((module) => expect(res.modules[module]).toBeDefined()); + // } }); - // } }); diff --git a/vitest.config.ts b/vitest.config.ts index c20a8b6f0..92209d031 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -7,6 +7,7 @@ export default defineConfig({ env: { DEBUG: "testcontainers*", }, + passWithNoTests: true, silent: "passed-only", mockReset: true, restoreMocks: true,