diff --git a/scripts/e2e/datadog-logs/compose.yaml b/scripts/e2e/datadog-logs/compose.yaml index c12b4b1fc491e..221554dded577 100644 --- a/scripts/e2e/datadog-logs/compose.yaml +++ b/scripts/e2e/datadog-logs/compose.yaml @@ -92,7 +92,6 @@ services: environment: - FEATURES=e2e-tests-datadog working_dir: /home/vector - network_mode: host command: - "/usr/bin/vector" - "-vvv" @@ -118,6 +117,7 @@ services: networks: default: name: ${VECTOR_NETWORK} + external: true volumes: log_path: {} diff --git a/scripts/e2e/datadog-metrics/compose.yaml b/scripts/e2e/datadog-metrics/compose.yaml index ecfd52135cb6a..209cfb1785450 100644 --- a/scripts/e2e/datadog-metrics/compose.yaml +++ b/scripts/e2e/datadog-metrics/compose.yaml @@ -53,7 +53,6 @@ services: environment: - FEATURES=e2e-tests-datadog working_dir: /home/vector - network_mode: host command: - "/usr/bin/vector" - "-vvv" @@ -77,6 +76,7 @@ services: networks: default: name: ${VECTOR_NETWORK} + external: true volumes: target: {} diff --git a/scripts/e2e/opentelemetry-logs/compose.yaml b/scripts/e2e/opentelemetry-logs/compose.yaml index f2c6af173bd85..4c204d5ab695d 100644 --- a/scripts/e2e/opentelemetry-logs/compose.yaml +++ b/scripts/e2e/opentelemetry-logs/compose.yaml @@ -86,3 +86,8 @@ services: volumes: vector_target: external: true + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/amqp/compose.yaml b/scripts/integration/amqp/compose.yaml index a60a27155df0b..c6561ae225008 100644 --- a/scripts/integration/amqp/compose.yaml +++ b/scripts/integration/amqp/compose.yaml @@ -13,3 +13,8 @@ services: - RABBITMQ_SSL_FAIL_IF_NO_PEER_CERT=false volumes: - ../../..:/code + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/aws/compose.yaml b/scripts/integration/aws/compose.yaml index 55e926dcdc1af..44fe67d7e9445 100644 --- a/scripts/integration/aws/compose.yaml +++ b/scripts/integration/aws/compose.yaml @@ -12,3 +12,8 @@ services: volumes: - $DOCKER_SOCKET:/var/run/docker.sock - $HOME/.aws/:/home/.aws/ + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/azure/compose.yaml b/scripts/integration/azure/compose.yaml index 6c7b00e37d6e1..933a72d235a24 100644 --- a/scripts/integration/azure/compose.yaml +++ b/scripts/integration/azure/compose.yaml @@ -6,3 +6,8 @@ services: command: azurite --blobHost 0.0.0.0 --loose volumes: - /var/run:/var/run + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/clickhouse/compose.yaml b/scripts/integration/clickhouse/compose.yaml index 62f8a90a543c2..fd823897ec4c2 100644 --- a/scripts/integration/clickhouse/compose.yaml +++ b/scripts/integration/clickhouse/compose.yaml @@ -3,3 +3,8 @@ version: '3' services: clickhouse: image: docker.io/clickhouse/clickhouse-server:${CONFIG_VERSION} + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/databend/compose.yaml b/scripts/integration/databend/compose.yaml index 65ed7a9c809d2..5304afc507960 100644 --- a/scripts/integration/databend/compose.yaml +++ b/scripts/integration/databend/compose.yaml @@ -19,3 +19,8 @@ services: - minio healthcheck: test: "curl -f localhost:8080/v1/health || exit 1" + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/datadog-agent/compose.yaml b/scripts/integration/datadog-agent/compose.yaml index 1b0b0a5a0fff6..0fe91c2d44c49 100644 --- a/scripts/integration/datadog-agent/compose.yaml +++ b/scripts/integration/datadog-agent/compose.yaml @@ -27,3 +27,8 @@ services: - DD_CMD_PORT=5002 - DD_USE_DOGSTATSD=false - DD_HOSTNAME=datadog-trace-agent + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/datadog-traces/compose.yaml b/scripts/integration/datadog-traces/compose.yaml index 8ae4c0dd6c026..f94ee0b53d089 100644 --- a/scripts/integration/datadog-traces/compose.yaml +++ b/scripts/integration/datadog-traces/compose.yaml @@ -29,3 +29,8 @@ services: - DD_APM_MAX_MEMORY=0 - DD_APM_MAX_CPU_PERCENT=0 - DD_HOSTNAME=datadog-trace-agent-to-vector + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/dnstap/compose.yaml b/scripts/integration/dnstap/compose.yaml index ae5f973aff541..00264121cb474 100644 --- a/scripts/integration/dnstap/compose.yaml +++ b/scripts/integration/dnstap/compose.yaml @@ -12,4 +12,11 @@ services: - dnstap-sockets:/bind3/etc/bind/socket volumes: - dnstap-sockets: {} + dnstap-sockets: + name: dnstap_dnstap-sockets + external: true + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/elasticsearch/compose.yaml b/scripts/integration/elasticsearch/compose.yaml index cdaf5e2b1d64c..6db0746fe8b10 100644 --- a/scripts/integration/elasticsearch/compose.yaml +++ b/scripts/integration/elasticsearch/compose.yaml @@ -25,3 +25,8 @@ services: - ES_JAVA_OPTS=-Xms400m -Xmx400m volumes: - ../../../tests/data/ca:/usr/share/elasticsearch/config/certs:ro + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/eventstoredb/compose.yaml b/scripts/integration/eventstoredb/compose.yaml index 324bfeeb84c54..eb0ae7b945650 100644 --- a/scripts/integration/eventstoredb/compose.yaml +++ b/scripts/integration/eventstoredb/compose.yaml @@ -6,3 +6,8 @@ services: command: --insecure --stats-period-sec=1 volumes: - ../../../tests/data:/etc/vector:ro + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/gcp/compose.yaml b/scripts/integration/gcp/compose.yaml index b9e6c8a917d16..f317c23a5604c 100644 --- a/scripts/integration/gcp/compose.yaml +++ b/scripts/integration/gcp/compose.yaml @@ -15,3 +15,8 @@ services: command: - -p - /public.pem + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/greptimedb/compose.yaml b/scripts/integration/greptimedb/compose.yaml index 7cd131240b49d..02a68364e7a31 100644 --- a/scripts/integration/greptimedb/compose.yaml +++ b/scripts/integration/greptimedb/compose.yaml @@ -6,3 +6,8 @@ services: command: "standalone start --http-addr=0.0.0.0:4000 --rpc-addr=0.0.0.0:4001" healthcheck: test: "curl -f localhost:4000/health || exit 1" + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/http-client/compose.yaml b/scripts/integration/http-client/compose.yaml index a1723c93d8d1a..11ad2bc76591a 100644 --- a/scripts/integration/http-client/compose.yaml +++ b/scripts/integration/http-client/compose.yaml @@ -29,3 +29,8 @@ services: - ../../../tests/data/http-client/serve:/data - ../../../tests/data/ca/intermediate_server/certs/dufs-https-chain.cert.pem:/certs/ca.cert.pem - ../../../tests/data/ca/intermediate_server/private/dufs-https.key.pem:/certs/ca.key.pem + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/humio/compose.yaml b/scripts/integration/humio/compose.yaml index a4b310c9f9557..bc1045d98445d 100644 --- a/scripts/integration/humio/compose.yaml +++ b/scripts/integration/humio/compose.yaml @@ -3,3 +3,8 @@ version: '3' services: humio: image: docker.io/humio/humio-single-node-demo:${CONFIG_VERSION} + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/influxdb/compose.yaml b/scripts/integration/influxdb/compose.yaml index 9202280345c19..7499564740491 100644 --- a/scripts/integration/influxdb/compose.yaml +++ b/scripts/integration/influxdb/compose.yaml @@ -19,3 +19,8 @@ services: command: influxd --reporting-disabled environment: - INFLUXDB_REPORTING_DISABLED=true + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/kafka/compose.yaml b/scripts/integration/kafka/compose.yaml index 4fb8fb45f2efe..0437b8b866f7a 100644 --- a/scripts/integration/kafka/compose.yaml +++ b/scripts/integration/kafka/compose.yaml @@ -34,3 +34,8 @@ services: - ../../../tests/data/ca/intermediate_server/private/kafka.pass:/etc/kafka/secrets/kafka.pass:ro - ../../../tests/data/ca/intermediate_server/private/kafka.p12:/etc/kafka/secrets/kafka.p12:ro - ../../../tests/data/kafka_server_jaas.conf:/etc/kafka/kafka_server_jaas.conf + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/logstash/compose.yaml b/scripts/integration/logstash/compose.yaml index 9d1e16f2bee19..1e7a17918435b 100644 --- a/scripts/integration/logstash/compose.yaml +++ b/scripts/integration/logstash/compose.yaml @@ -12,3 +12,8 @@ services: - /dev/null:/usr/share/logstash/pipeline/logstash.yml - ../../../tests/data/host.docker.internal.crt:/tmp/logstash.crt - ../../../tests/data/logstash/logstash.conf:/usr/share/logstash/pipeline/logstash.conf + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/loki/compose.yaml b/scripts/integration/loki/compose.yaml index bca8cd4a458e2..4350e809f8961 100644 --- a/scripts/integration/loki/compose.yaml +++ b/scripts/integration/loki/compose.yaml @@ -4,3 +4,8 @@ services: loki: image: docker.io/grafana/loki:${CONFIG_VERSION} command: -config.file=/etc/loki/local-config.yaml -auth.enabled=true + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/mongodb/compose.yaml b/scripts/integration/mongodb/compose.yaml index b76d008005193..f7cd8b6517471 100644 --- a/scripts/integration/mongodb/compose.yaml +++ b/scripts/integration/mongodb/compose.yaml @@ -30,3 +30,8 @@ services: - MONGODB_INITIAL_PRIMARY_PORT_NUMBER=27017 - MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD=toor - MONGODB_REPLICA_SET_KEY=vector + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/nats/compose.yaml b/scripts/integration/nats/compose.yaml index 9be71dc529380..24a0495772ba6 100644 --- a/scripts/integration/nats/compose.yaml +++ b/scripts/integration/nats/compose.yaml @@ -50,3 +50,8 @@ services: - /usr/share/nats/config/nats-jetstream.conf volumes: - ../../../tests/data/nats:/usr/share/nats/config + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/nginx/compose.yaml b/scripts/integration/nginx/compose.yaml index ddedd8eb2e05e..aad61bbc97a13 100644 --- a/scripts/integration/nginx/compose.yaml +++ b/scripts/integration/nginx/compose.yaml @@ -22,4 +22,7 @@ services: - proxy networks: + default: + name: ${VECTOR_NETWORK} + external: true proxy: {} diff --git a/scripts/integration/opentelemetry/compose.yaml b/scripts/integration/opentelemetry/compose.yaml index e4b100750f085..b85cc417cedaf 100644 --- a/scripts/integration/opentelemetry/compose.yaml +++ b/scripts/integration/opentelemetry/compose.yaml @@ -5,3 +5,8 @@ services: image: docker.io/otel/opentelemetry-collector-contrib:${CONFIG_VERSION} volumes: - ../../../tests/data/opentelemetry/config.yaml:/etc/otelcol-contrib/config.yaml + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/postgres/compose.yaml b/scripts/integration/postgres/compose.yaml index ab73d2bbc2303..8c3ccb12a7913 100644 --- a/scripts/integration/postgres/compose.yaml +++ b/scripts/integration/postgres/compose.yaml @@ -13,4 +13,13 @@ services: - ../../../tests/data/ca:/certs:ro volumes: - socket: {} + # Use external volume 'postgres_socket' that's shared with the test runner + # The runner creates this volume and mounts it at /pg_socket + socket: + name: postgres_socket + external: true + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/prometheus/compose.yaml b/scripts/integration/prometheus/compose.yaml index e41592c28eccf..9f618f8f521f0 100644 --- a/scripts/integration/prometheus/compose.yaml +++ b/scripts/integration/prometheus/compose.yaml @@ -21,3 +21,8 @@ services: command: --config.file=/etc/vector/prometheus.yaml volumes: - ../../../tests/data:/etc/vector:ro + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/pulsar/compose.yaml b/scripts/integration/pulsar/compose.yaml index 0e963cd2e0bd1..1e4742c8fc3fe 100644 --- a/scripts/integration/pulsar/compose.yaml +++ b/scripts/integration/pulsar/compose.yaml @@ -16,3 +16,8 @@ services: - ../../../tests/data/ca/intermediate_server/private/pulsar.key.pem:/etc/pulsar/certs/pulsar.key.pem:ro - ../../../tests/data//ca/intermediate_server/certs/pulsar.cert.pem:/etc/pulsar/certs/pulsar.cert.pem:ro - ../../../tests/data/ca/intermediate_server/certs/ca-chain.cert.pem:/etc/pulsar/certs/ca-chain.cert.pem:ro + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/redis/compose.yaml b/scripts/integration/redis/compose.yaml index 1399a12b2a655..4f0a909a6e455 100644 --- a/scripts/integration/redis/compose.yaml +++ b/scripts/integration/redis/compose.yaml @@ -24,3 +24,8 @@ services: echo "sentinel failover-timeout vector 5000" >> /etc/sentinel.conf && echo "sentinel parallel-syncs vector 1" >> /etc/sentinel.conf && redis-sentinel /etc/sentinel.conf' + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/shutdown/compose.yaml b/scripts/integration/shutdown/compose.yaml index 4fb8fb45f2efe..0437b8b866f7a 100644 --- a/scripts/integration/shutdown/compose.yaml +++ b/scripts/integration/shutdown/compose.yaml @@ -34,3 +34,8 @@ services: - ../../../tests/data/ca/intermediate_server/private/kafka.pass:/etc/kafka/secrets/kafka.pass:ro - ../../../tests/data/ca/intermediate_server/private/kafka.p12:/etc/kafka/secrets/kafka.p12:ro - ../../../tests/data/kafka_server_jaas.conf:/etc/kafka/kafka_server_jaas.conf + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/splunk/compose.yaml b/scripts/integration/splunk/compose.yaml index f42364adb2418..cfae5c07b545d 100644 --- a/scripts/integration/splunk/compose.yaml +++ b/scripts/integration/splunk/compose.yaml @@ -13,3 +13,8 @@ services: - 8000:8000 - 8088:8088 - 8089:8089 + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/scripts/integration/webhdfs/compose.yaml b/scripts/integration/webhdfs/compose.yaml index 9585927e84a10..082816fd01e1f 100644 --- a/scripts/integration/webhdfs/compose.yaml +++ b/scripts/integration/webhdfs/compose.yaml @@ -30,3 +30,8 @@ services: interval: 5s timeout: 5s retries: 3 + +networks: + default: + name: ${VECTOR_NETWORK} + external: true diff --git a/vdev/src/commands/compose_tests/active_projects.rs b/vdev/src/commands/compose_tests/active_projects.rs new file mode 100644 index 0000000000000..b2698a6cb9266 --- /dev/null +++ b/vdev/src/commands/compose_tests/active_projects.rs @@ -0,0 +1,60 @@ +use anyhow::{Context, Result}; +use std::{collections::HashSet, process::Command}; + +use crate::testing::{config::ComposeTestConfig, docker::CONTAINER_TOOL}; + +/// Query Docker Compose for active projects +pub(super) fn load_active_projects() -> Result> { + let output = Command::new(CONTAINER_TOOL.clone()) + .args(["compose", "ls", "--format", "json"]) + .output() + .with_context(|| "Failed to list compose projects")?; + + if !output.status.success() { + return Ok(HashSet::new()); + } + + let projects: Vec = serde_json::from_slice(&output.stdout) + .with_context(|| "Failed to parse docker compose ls output")?; + + Ok(projects + .iter() + .filter_map(|project| { + project + .get("Name") + .and_then(|n| n.as_str()) + .map(String::from) + }) + .collect()) +} + +/// Find the active environment for a given integration by matching Docker Compose project names +pub(super) fn find_active_environment( + active_projects: &HashSet, + prefix: &str, + config: &ComposeTestConfig, +) -> Option { + for project_name in active_projects { + if let Some(sanitized_env_name) = project_name.strip_prefix(prefix) { + // The project name has dots replaced with hyphens, so we need to check + // all environments to find a match after applying the same sanitization + for env_name in config.environments().keys() { + if env_name.replace('.', "-") == sanitized_env_name { + return Some(env_name.to_string()); + } + } + } + } + None +} + +/// Find the active environment for a given integration by querying Docker Compose +pub(super) fn find_active_environment_for_integration( + directory: &str, + integration: &str, + config: &ComposeTestConfig, +) -> Result> { + let active_projects = load_active_projects()?; + let prefix = format!("vector-{directory}-{integration}-"); + Ok(find_active_environment(&active_projects, &prefix, config)) +} diff --git a/vdev/src/commands/compose_tests/mod.rs b/vdev/src/commands/compose_tests/mod.rs index 27a033f199a12..c289e7dff3d74 100644 --- a/vdev/src/commands/compose_tests/mod.rs +++ b/vdev/src/commands/compose_tests/mod.rs @@ -1,3 +1,5 @@ +mod active_projects; + pub(crate) mod ci_paths; pub(crate) mod show; pub(crate) mod start; diff --git a/vdev/src/commands/compose_tests/show.rs b/vdev/src/commands/compose_tests/show.rs index 4991b72b04881..0fcc831e6cb3b 100644 --- a/vdev/src/commands/compose_tests/show.rs +++ b/vdev/src/commands/compose_tests/show.rs @@ -1,14 +1,15 @@ use anyhow::Result; +use std::collections::HashSet; -use crate::{ - environment::Environment, - testing::{config::ComposeTestConfig, state}, -}; +use crate::{environment::Environment, testing::config::ComposeTestConfig}; + +use super::active_projects::{find_active_environment, load_active_projects}; pub fn exec(integration: Option<&String>, path: &str) -> Result<()> { + let show = Show::new(path)?; match integration { - None => show_all(path), - Some(integration) => show_one(integration, path), + None => show.show_all(), + Some(integration) => show.show_one(integration), } } @@ -21,78 +22,93 @@ pub fn exec_environments_only(integration: &str, path: &str) -> Result<()> { Ok(()) } -fn show_all(path: &str) -> Result<()> { - let entries = ComposeTestConfig::collect_all(path)?; - let width = entries - .keys() - .fold(16, |width, entry| width.max(entry.len())); - println!("{:width$} Environment Name(s)", "Integration Name"); - println!("{:width$} -------------------", "----------------"); - for (integration, config) in entries { - let envs_dir = state::EnvsDir::new(&integration); - let active_env = envs_dir.active()?; - let environments = config - .environments() - .keys() - .map(|environment| format(active_env.as_ref(), environment)) - .collect::>() - .join(" "); - println!("{integration:width$} {environments}"); - } - Ok(()) +struct Show { + path: String, + active_projects: HashSet, } -fn show_one(integration: &str, path: &str) -> Result<()> { - let (_test_dir, config) = ComposeTestConfig::load(path, integration)?; - let envs_dir = state::EnvsDir::new(integration); - let active_env = envs_dir.active()?; - - if let Some(args) = &config.args { - println!("Test args: {}", args.join(" ")); - } else { - println!("Test args: N/A"); +impl Show { + fn new(path: &str) -> Result { + Ok(Self { + path: path.to_string(), + active_projects: load_active_projects()?, + }) } - if config.features.is_empty() { - println!("Features: N/A"); - } else { - println!("Features: {}", config.features.join(",")); - } + fn show_all(&self) -> Result<()> { + let entries = ComposeTestConfig::collect_all(&self.path)?; - println!( - "Test filter: {}", - config.test_filter.as_deref().unwrap_or("N/A") - ); - - println!("Environment:"); - print_env(" ", &config.env); - println!("Runner:"); - println!(" Environment:"); - print_env(" ", &config.runner.env); - println!(" Volumes:"); - if config.runner.volumes.is_empty() { - println!(" N/A"); - } else { - for (target, mount) in &config.runner.volumes { - println!(" {target} => {mount}"); + let width = entries + .keys() + .fold(16, |width, entry| width.max(entry.len())); + println!("{:width$} Environment Name(s)", "Integration Name"); + println!("{:width$} -------------------", "----------------"); + for (integration, config) in entries { + let prefix = format!("vector-{}-{integration}-", self.path); + let active_env = find_active_environment(&self.active_projects, &prefix, &config); + let environments = config + .environments() + .keys() + .map(|environment| format_env(active_env.as_ref(), environment)) + .collect::>() + .join(" "); + println!("{integration:width$} {environments}"); } + Ok(()) } - println!( - " Needs docker socket: {}", - config.runner.needs_docker_socket - ); - println!("Environments:"); - for environment in config.environments().keys() { - println!(" {}", format(active_env.as_ref(), environment)); - } + fn show_one(&self, integration: &str) -> Result<()> { + let (_test_dir, config) = ComposeTestConfig::load(&self.path, integration)?; + let prefix = format!("vector-{}-{integration}-", self.path); + let active_env = find_active_environment(&self.active_projects, &prefix, &config); - Ok(()) + if let Some(args) = &config.args { + println!("Test args: {}", args.join(" ")); + } else { + println!("Test args: N/A"); + } + + if config.features.is_empty() { + println!("Features: N/A"); + } else { + println!("Features: {}", config.features.join(",")); + } + + println!( + "Test filter: {}", + config.test_filter.as_deref().unwrap_or("N/A") + ); + + println!("Environment:"); + print_env(" ", &config.env); + println!("Runner:"); + println!(" Environment:"); + print_env(" ", &config.runner.env); + println!(" Volumes:"); + if config.runner.volumes.is_empty() { + println!(" N/A"); + } else { + for (target, mount) in &config.runner.volumes { + println!(" {target} => {mount}"); + } + } + println!( + " Needs docker socket: {}", + config.runner.needs_docker_socket + ); + + println!("Environments:"); + for environment in config.environments().keys() { + println!(" {}", format_env(active_env.as_ref(), environment)); + } + + Ok(()) + } } fn print_env(prefix: &str, environment: &Environment) { if environment.is_empty() { - println!("{prefix}N/A"); + println!("{prefix} N/A"); } else { for (key, value) in environment { match value { @@ -103,7 +119,7 @@ fn print_env(prefix: &str, environment: &Environment) { } } -fn format(active_env: Option<&String>, environment: &str) -> String { +fn format_env(active_env: Option<&String>, environment: &str) -> String { match active_env { Some(active) if active == environment => format!("{environment} (active)"), _ => environment.into(), diff --git a/vdev/src/commands/compose_tests/stop.rs b/vdev/src/commands/compose_tests/stop.rs index 20849a2ecd6a2..7470512efb5f1 100644 --- a/vdev/src/commands/compose_tests/stop.rs +++ b/vdev/src/commands/compose_tests/stop.rs @@ -1,17 +1,23 @@ use anyhow::Result; use crate::testing::{ + config::ComposeTestConfig, integration::{ComposeTest, ComposeTestLocalConfig}, - state::EnvsDir, }; +use super::active_projects::find_active_environment_for_integration; + pub(crate) fn exec( local_config: ComposeTestLocalConfig, test_name: &str, all_features: bool, ) -> Result<()> { - if let Some(active) = EnvsDir::new(test_name).active()? { - ComposeTest::generate(local_config, test_name, active, all_features, 0)?.stop() + let (_test_dir, config) = ComposeTestConfig::load(local_config.directory, test_name)?; + let active_environment = + find_active_environment_for_integration(local_config.directory, test_name, &config)?; + + if let Some(environment) = active_environment { + ComposeTest::generate(local_config, test_name, environment, all_features, 0)?.stop() } else { println!("No environment for {test_name} is active."); Ok(()) diff --git a/vdev/src/commands/compose_tests/test.rs b/vdev/src/commands/compose_tests/test.rs index e2a24e50943a1..ac98d7718bfcd 100644 --- a/vdev/src/commands/compose_tests/test.rs +++ b/vdev/src/commands/compose_tests/test.rs @@ -5,9 +5,10 @@ use anyhow::{Result, bail}; use crate::testing::{ config::ComposeTestConfig, integration::{ComposeTest, ComposeTestLocalConfig}, - state::EnvsDir, }; +use super::active_projects::find_active_environment_for_integration; + pub fn exec( local_config: ComposeTestLocalConfig, integration: &str, @@ -19,8 +20,9 @@ pub fn exec( let (_test_dir, config) = ComposeTestConfig::load(local_config.directory, integration)?; let envs = config.environments(); - let active = EnvsDir::new(integration).active()?; - debug!("Active environment: {environment:#?}"); + let active = + find_active_environment_for_integration(local_config.directory, integration, &config)?; + debug!("Active environment: {active:#?}"); let environments: Box> = match (environment, &active) { (Some(environment), Some(active)) if environment != active => { diff --git a/vdev/src/testing/integration.rs b/vdev/src/testing/integration.rs index 47a16b51d3d10..7256bc420e787 100644 --- a/vdev/src/testing/integration.rs +++ b/vdev/src/testing/integration.rs @@ -1,20 +1,15 @@ use std::{ - collections::BTreeMap, - fs, path::{Path, PathBuf}, process::Command, }; use anyhow::{Context, Result, bail}; -use serde_yaml::Value; -use tempfile::{Builder, NamedTempFile}; use super::{ config::{ ComposeConfig, ComposeTestConfig, E2E_TESTS_DIR, INTEGRATION_TESTS_DIR, RustToolchainConfig, }, runner::{ContainerTestRunner as _, IntegrationTestRunner, TestRunner as _}, - state::EnvsDir, }; use crate::{ app::CommandExt as _, @@ -69,7 +64,6 @@ pub(crate) struct ComposeTest { test_name: String, environment: String, config: ComposeTestConfig, - envs_dir: EnvsDir, runner: IntegrationTestRunner, compose: Option, env_config: Environment, @@ -88,7 +82,6 @@ impl ComposeTest { let test_name: String = test_name.into(); let environment = environment.into(); let (test_dir, config) = ComposeTestConfig::load(local_config.directory, &test_name)?; - let envs_dir = EnvsDir::new(&test_name); let Some(mut env_config) = config.environments().get(&environment).cloned() else { bail!("Could not find environment named {environment:?}"); }; @@ -112,7 +105,6 @@ impl ComposeTest { test_name, environment, config, - envs_dir, runner, compose, env_config: rename_environment_keys(&env_config), @@ -123,11 +115,51 @@ impl ComposeTest { Ok(compose_test) } + fn project_name(&self) -> String { + // Docker Compose project names must consist only of lowercase alphanumeric characters, + // hyphens, and underscores. Replace any dots with hyphens. + let sanitized_env = self.environment.replace('.', "-"); + format!( + "vector-{}-{}-{}", + self.local_config.directory, self.test_name, sanitized_env + ) + } + + fn is_running(&self) -> Result { + let Some(compose) = &self.compose else { + return Ok(false); + }; + + let output = Command::new(CONTAINER_TOOL.clone()) + .args([ + "compose", + "--project-name", + &self.project_name(), + "ps", + "--format", + "json", + "--status", + "running", + ]) + .current_dir(&compose.test_dir) + .envs( + compose + .env + .iter() + .filter_map(|(k, v)| v.as_ref().map(|val| (k, val))), + ) + .output() + .with_context(|| "Failed to check if compose environment is running")?; + + // If stdout is empty or "[]", no containers are running + Ok(!output.stdout.is_empty() && output.stdout != b"[]\n" && output.stdout != b"[]") + } + pub(crate) fn test(&self, extra_args: Vec) -> Result<()> { - let active = self.envs_dir.check_active(&self.environment)?; + let was_running = self.is_running()?; self.config.check_required()?; - if !active { + if !was_running { self.start()?; } @@ -181,7 +213,7 @@ impl ComposeTest { self.local_config.directory, )?; - if !active { + if self.is_running()? { self.runner.remove()?; self.stop()?; } @@ -203,29 +235,27 @@ impl ComposeTest { self.config.check_required()?; if let Some(compose) = &self.compose { self.runner.ensure_network()?; + self.runner.ensure_external_volumes()?; - if self.envs_dir.check_active(&self.environment)? { + if self.is_running()? { bail!("environment is already up"); } - compose.start(&self.env_config)?; - - self.envs_dir.save(&self.environment, &self.env_config) - } else { - Ok(()) + let project_name = self.project_name(); + compose.start(&self.env_config, &project_name)?; } + Ok(()) } pub(crate) fn stop(&self) -> Result<()> { if let Some(compose) = &self.compose { - // TODO: Is this check really needed? - if self.envs_dir.load()?.is_none() { + if !self.is_running()? { bail!("No environment for {} is up.", self.test_name); } self.runner.remove()?; - compose.stop()?; - self.envs_dir.remove()?; + let project_name = self.project_name(); + compose.stop(&self.env_config, &project_name)?; } Ok(()) @@ -234,75 +264,56 @@ impl ComposeTest { #[derive(Debug)] struct Compose { - original_path: PathBuf, + yaml_path: PathBuf, test_dir: PathBuf, env: Environment, #[cfg_attr(target_family = "windows", allow(dead_code))] config: ComposeConfig, network: String, - temp_file: NamedTempFile, } impl Compose { fn new(test_dir: PathBuf, env: Environment, network: String) -> Result> { - let original_path: PathBuf = [&test_dir, Path::new("compose.yaml")].iter().collect(); + let yaml_path: PathBuf = [&test_dir, Path::new("compose.yaml")].iter().collect(); - match original_path.try_exists() { + match yaml_path.try_exists() { Err(error) => { - Err(error).with_context(|| format!("Could not lookup {}", original_path.display())) + Err(error).with_context(|| format!("Could not lookup {}", yaml_path.display())) } Ok(false) => Ok(None), Ok(true) => { - let mut config = ComposeConfig::parse(&original_path)?; - // Inject the networks block - config.networks.insert( - "default".to_string(), - BTreeMap::from_iter([ - ("name".to_string(), Value::String(network.clone())), - ("external".to_string(), Value::Bool(true)), - ]), - ); - - // Create a named tempfile, there may be resource leakage here in case of SIGINT - // Tried tempfile::tempfile() but this returns a File object without a usable path - // https://docs.rs/tempfile/latest/tempfile/#resource-leaking - let temp_file = Builder::new() - .prefix("compose-temp-") - .suffix(".yaml") - .tempfile_in(&test_dir) - .with_context(|| "Failed to create temporary compose file")?; - - fs::write( - temp_file.path(), - serde_yaml::to_string(&config) - .with_context(|| "Failed to serialize modified compose.yaml")?, - )?; + // Parse config only for unix volume permission checking + let config = ComposeConfig::parse(&yaml_path)?; Ok(Some(Self { - original_path, + yaml_path, test_dir, env, config, network, - temp_file, })) } } } - fn start(&self, environment: &Environment) -> Result<()> { + fn start(&self, environment: &Environment, project_name: &str) -> Result<()> { #[cfg(unix)] unix::prepare_compose_volumes(&self.config, &self.test_dir, environment)?; - self.run("Starting", &["up", "--detach"], Some(environment)) + self.run( + "Starting", + &["up", "--detach"], + Some(environment), + project_name, + ) } - fn stop(&self) -> Result<()> { - // The config settings are not needed when stopping a compose setup. + fn stop(&self, environment: &Environment, project_name: &str) -> Result<()> { self.run( "Stopping", &["down", "--timeout", "0", "--volumes", "--remove-orphans"], - None, + Some(environment), + project_name, ) } @@ -311,21 +322,14 @@ impl Compose { action: &str, args: &[&'static str], environment: Option<&Environment>, + project_name: &str, ) -> Result<()> { let mut command = Command::new(CONTAINER_TOOL.clone()); command.arg("compose"); - // When the integration test environment is already active, the tempfile path does not - // exist because `Compose::new()` has not been called. In this case, the `stop` command - // needs to use the calculated path from the integration name instead of the nonexistent - // tempfile path. This is because `stop` doesn't go through the same logic as `start` - // and doesn't create a new tempfile before calling docker compose. - // If stop command needs to use some of the injected bits then we need to rebuild it + command.arg("--project-name"); + command.arg(project_name); command.arg("--file"); - if self.temp_file.path().exists() { - command.arg(self.temp_file.path()); - } else { - command.arg(&self.original_path); - } + command.arg(&self.yaml_path); command.args(args); diff --git a/vdev/src/testing/mod.rs b/vdev/src/testing/mod.rs index a899e52804387..a5ee65f2b589b 100644 --- a/vdev/src/testing/mod.rs +++ b/vdev/src/testing/mod.rs @@ -3,4 +3,3 @@ pub mod config; pub mod docker; pub mod integration; pub mod runner; -pub mod state; diff --git a/vdev/src/testing/runner.rs b/vdev/src/testing/runner.rs index a0c7d002460ea..7fe682db4418e 100644 --- a/vdev/src/testing/runner.rs +++ b/vdev/src/testing/runner.rs @@ -308,6 +308,26 @@ impl IntegrationTestRunner { Ok(()) } } + + pub(super) fn ensure_external_volumes(&self) -> Result<()> { + // Get list of existing volumes + let mut command = docker_command(["volume", "ls", "--format", "{{.Name}}"]); + let existing_volumes: HashSet = + command.check_output()?.lines().map(String::from).collect(); + + // Extract volume names from self.volumes (format is "volume_name:/mount/path") + for volume_spec in &self.volumes { + if let Some((volume_name, _)) = volume_spec.split_once(':') { + // Only create named volumes (not paths like /host/path) + if !volume_name.starts_with('/') && !existing_volumes.contains(volume_name) { + docker_command(["volume", "create", volume_name]) + .wait(format!("Creating volume {volume_name}"))?; + } + } + } + + Ok(()) + } } impl ContainerTestRunner for IntegrationTestRunner { diff --git a/vdev/src/testing/state.rs b/vdev/src/testing/state.rs deleted file mode 100644 index b97e0aa8c9858..0000000000000 --- a/vdev/src/testing/state.rs +++ /dev/null @@ -1,93 +0,0 @@ -use std::{ - fs, - io::ErrorKind, - path::{Path, PathBuf}, - sync::LazyLock, -}; - -use anyhow::{Context, Result, anyhow}; -use serde::{Deserialize, Serialize}; - -use crate::{environment::Environment, platform, util}; - -static DATA_DIR: LazyLock = LazyLock::new(|| { - [platform::data_dir(), Path::new("integration")] - .into_iter() - .collect() -}); - -#[derive(Debug)] -pub struct EnvsDir { - path: PathBuf, -} - -#[derive(Deserialize, Serialize)] -pub struct State { - pub active: String, - pub config: Environment, -} - -impl EnvsDir { - pub fn new(integration: &str) -> Self { - let config = format!("{integration}.json"); - let path = [&DATA_DIR, Path::new(&config)].iter().collect(); - Self { path } - } - - /// Check if the named environment is active. If the current config could not be loaded or a - /// different environment is active, an error is returned. - pub fn check_active(&self, name: &str) -> Result { - match self.active()? { - None => Ok(false), - Some(active) if active == name => Ok(true), - Some(active) => Err(anyhow!( - "Requested environment {name:?} does not match active one {active:?}" - )), - } - } - - /// Return the currently active environment name. - pub fn active(&self) -> Result> { - self.load().map(|state| state.map(|config| config.active)) - } - - /// Load the currently active state data. - pub fn load(&self) -> Result> { - let json = match fs::read_to_string(&self.path) { - Ok(json) => json, - Err(error) if error.kind() == ErrorKind::NotFound => return Ok(None), - Err(error) => { - return Err(error) - .context(format!("Could not read state file {}", self.path.display())); - } - }; - let state: State = serde_json::from_str(&json) - .with_context(|| format!("Could not parse state file {}", self.path.display()))?; - Ok(Some(state)) - } - - pub fn save(&self, environment: &str, config: &Environment) -> Result<()> { - let config = State { - active: environment.into(), - config: config.clone(), - }; - let path = &*DATA_DIR; - if !path.is_dir() { - fs::create_dir_all(path) - .with_context(|| format!("failed to create directory {}", path.display()))?; - } - - let config = serde_json::to_string(&config)?; - fs::write(&self.path, config) - .with_context(|| format!("failed to write file {}", self.path.display())) - } - - pub fn remove(&self) -> Result<()> { - if util::exists(&self.path)? { - fs::remove_file(&self.path) - .with_context(|| format!("failed to remove {}", self.path.display()))?; - } - - Ok(()) - } -}