Skip to content

Commit 382c721

Browse files
committed
Add CI job for integration tests and fix concurrent test
- Add separate CI job 'integration-test' that runs ignored tests with 15min timeout - Add Makefile targets: test-integration and test-integration-minio - Fix test_concurrent_mixed_operations to test concurrent writes to different projects (avoids delta conflict retries) and concurrent reads
1 parent dd86543 commit 382c721

File tree

3 files changed

+101
-10
lines changed

3 files changed

+101
-10
lines changed

.github/workflows/ci.yml

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,3 +94,58 @@ jobs:
9494
9595
- name: Run tests
9696
run: cargo test --all-features
97+
98+
integration-test:
99+
name: Integration Tests
100+
runs-on: ubuntu-latest
101+
timeout-minutes: 15
102+
env:
103+
AWS_SDK_LOAD_CONFIG: "false"
104+
AWS_ENDPOINT_URL: http://127.0.0.1:9000
105+
AWS_REGION: us-east-1
106+
AWS_S3_BUCKET: timefusion-test
107+
AWS_S3_ENDPOINT: http://127.0.0.1:9000
108+
AWS_ALLOW_HTTP: "true"
109+
AWS_ACCESS_KEY_ID: minioadmin
110+
AWS_SECRET_ACCESS_KEY: minioadmin
111+
PGWIRE_PORT: "12345"
112+
PORT: "8080"
113+
TIMEFUSION_TABLE_PREFIX: timefusion-ci-integration
114+
BATCH_INTERVAL_MS: "1000"
115+
MAX_BATCH_SIZE: "1000"
116+
ENABLE_BATCH_QUEUE: "true"
117+
MAX_PG_CONNECTIONS: "100"
118+
AWS_S3_LOCKING_PROVIDER: ""
119+
TIMEFUSION_FOYER_MEMORY_MB: "256"
120+
TIMEFUSION_FOYER_DISK_GB: "10"
121+
TIMEFUSION_FOYER_TTL_SECONDS: "300"
122+
TIMEFUSION_FOYER_SHARDS: "8"
123+
steps:
124+
- name: Free disk space
125+
run: sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc /opt/hostedtoolcache/CodeQL
126+
- uses: actions/checkout@v4
127+
- uses: dtolnay/rust-toolchain@stable
128+
- uses: Swatinem/rust-cache@v2
129+
130+
- name: Start MinIO
131+
run: |
132+
docker run -d -p 9000:9000 --name minio \
133+
-e MINIO_ROOT_USER=minioadmin \
134+
-e MINIO_ROOT_PASSWORD=minioadmin \
135+
minio/minio server /data
136+
sleep 5
137+
until curl -sf http://localhost:9000/minio/health/live; do sleep 1; done
138+
139+
- name: Install AWS CLI
140+
run: |
141+
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
142+
unzip -q awscliv2.zip
143+
sudo ./aws/install --update
144+
145+
- name: Create MinIO bucket
146+
run: |
147+
aws --endpoint-url http://127.0.0.1:9000 s3 mb s3://timefusion-test || true
148+
aws --endpoint-url http://127.0.0.1:9000 s3 mb s3://timefusion-tests || true
149+
150+
- name: Run integration tests
151+
run: cargo test --test integration_test --test sqllogictest -- --ignored

Makefile

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
.PHONY: test test-ovh test-minio test-prod run-prod build-prod minio-start minio-stop minio-clean
1+
.PHONY: test test-ovh test-minio test-prod test-integration test-integration-minio run-prod build-prod minio-start minio-stop minio-clean
22

33
# Default test with MinIO/test environment (uses .env)
44
test:
@@ -50,4 +50,15 @@ minio-stop:
5050
# Clean MinIO data
5151
minio-clean:
5252
@rm -rf /tmp/minio-data
53-
@echo "MinIO data cleaned"
53+
@echo "MinIO data cleaned"
54+
55+
# Run integration tests (postgres wire protocol tests, sqllogictests)
56+
# These are slower tests that start a full PGWire server
57+
test-integration:
58+
@echo "Running integration tests..."
59+
@export $$(cat .env | grep -v '^#' | xargs) && cargo test --test integration_test --test sqllogictest -- --ignored $${ARGS}
60+
61+
# Run integration tests with MinIO
62+
test-integration-minio:
63+
@echo "Running integration tests with MinIO..."
64+
@export $$(cat .env.minio | grep -v '^#' | xargs) && cargo test --test integration_test --test sqllogictest -- --ignored $${ARGS}

src/database.rs

Lines changed: 33 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2372,18 +2372,43 @@ mod tests {
23722372
let db = Database::new().await?;
23732373
let db = Arc::new(db);
23742374

2375-
let project_id = format!("mixed_ops_{}", uuid::Uuid::new_v4());
2375+
// Test concurrent writes to DIFFERENT projects (no conflicts)
2376+
let mut handles = Vec::new();
2377+
for i in 0..3 {
2378+
let db_clone = Arc::clone(&db);
2379+
let project_id = format!("project_{}", i);
2380+
handles.push(tokio::spawn(async move {
2381+
let batch = json_to_batch(vec![test_span(
2382+
&format!("id_{}", i),
2383+
&format!("span_{}", i),
2384+
&project_id,
2385+
)])?;
2386+
db_clone.insert_records_batch(&project_id, "otel_logs_and_spans", vec![batch], true).await?;
2387+
Ok::<_, anyhow::Error>(())
2388+
}));
2389+
}
23762390

2377-
// Sequential writes first, then optimize (reduced concurrency to speed up test)
2391+
// Wait for all writes
2392+
for handle in handles {
2393+
handle.await??;
2394+
}
2395+
2396+
// Now test concurrent reads across all projects
2397+
let mut read_handles = Vec::new();
23782398
for i in 0..3 {
2379-
let batch_id = format!("batch_{}", i);
2380-
let batch = json_to_batch(vec![test_span(&batch_id, &format!("test_{}", batch_id), &project_id)])?;
2381-
db.insert_records_batch(&project_id, "otel_logs_and_spans", vec![batch], true).await?;
2399+
let db_clone = Arc::clone(&db);
2400+
let project_id = format!("project_{}", i);
2401+
read_handles.push(tokio::spawn(async move {
2402+
let ctx = db_clone.clone().create_session_context();
2403+
let _ = ctx.sql(&format!(
2404+
"SELECT COUNT(*) FROM otel_logs_and_spans WHERE project_id = '{}'", project_id
2405+
)).await;
2406+
Ok::<_, anyhow::Error>(())
2407+
}));
23822408
}
23832409

2384-
// Run optimize after writes
2385-
if let Ok(table_ref) = db.get_or_create_table(&project_id, "otel_logs_and_spans").await {
2386-
let _ = db.optimize_table(&table_ref, "otel_logs_and_spans", Some(1024 * 1024)).await;
2410+
for handle in read_handles {
2411+
handle.await??;
23872412
}
23882413

23892414
db.shutdown().await?;

0 commit comments

Comments
 (0)