-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathe2e_tests_full.rs
More file actions
296 lines (266 loc) · 10.4 KB
/
e2e_tests_full.rs
File metadata and controls
296 lines (266 loc) · 10.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
//! Full End-to-End Testing Binary for Torrust Tracker Deployer (LOCAL DEVELOPMENT ONLY)
//!
//! This binary provides complete end-to-end testing by combining infrastructure provisioning
//! and configuration management in a single LXD VM. It's designed for local development
//! and comprehensive testing workflows.
//!
//! ⚠️ **IMPORTANT**: This binary cannot run on GitHub Actions due to network connectivity
//! issues within LXD VMs on GitHub runners. For CI environments, use the split test suites:
//! - `cargo run --bin e2e-provision-tests` - Infrastructure provisioning only
//! - `cargo run --bin e2e-config-tests` - Configuration and software installation
//!
//! ## Usage
//!
//! Run the full E2E test suite:
//!
//! ```bash
//! cargo run --bin e2e-tests-full
//! ```
//!
//! Run with custom options:
//!
//! ```bash
//! # Use specific environment name
//! cargo run --bin e2e-tests-full -- --environment e2e-staging
//!
//! # Keep test environment after completion (for debugging)
//! cargo run --bin e2e-tests-full -- --keep
//!
//! # Change logging format
//! cargo run --bin e2e-tests-full -- --log-format json
//!
//! # Show help
//! cargo run --bin e2e-tests-full -- --help
//! ```
//!
//! ## Test Workflow
//!
//! 1. **Preflight cleanup** - Remove any artifacts from previous test runs that may have failed to clean up
//! 2. **Infrastructure provisioning** - Create LXD VMs using `OpenTofu`
//! 3. **Configuration** - Apply Ansible playbooks for software installation
//! 4. **Validation** - Verify deployments are working correctly
//! 5. **Test infrastructure cleanup** - Remove test resources created during this run
//!
//! ## Two-Phase Cleanup Strategy
//!
//! The cleanup process happens in two distinct phases:
//!
//! - **Phase 1 - Preflight cleanup**: Removes artifacts from previous test runs that may have
//! failed to clean up properly (executed at the start in main function)
//! - **Phase 2 - Test infrastructure cleanup**: Destroys resources created specifically during
//! the current test run (executed at the end in main function)
//!
//! The test suite supports different VM providers (LXD, Multipass) and includes
//! comprehensive logging and error reporting.
use anyhow::Result;
use clap::Parser;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tracing::{error, info};
// Import E2E testing infrastructure
use torrust_tracker_deployer_lib::adapters::ssh::DEFAULT_SSH_PORT;
use torrust_tracker_deployer_lib::bootstrap::logging::{LogFormat, LogOutput, LoggingBuilder};
use torrust_tracker_deployer_lib::infrastructure::persistence::repository_factory::RepositoryFactory;
use torrust_tracker_deployer_lib::shared::{Clock, SystemClock};
use torrust_tracker_deployer_lib::testing::e2e::context::{TestContext, TestContextType};
use torrust_tracker_deployer_lib::testing::e2e::tasks::{
preflight_cleanup::cleanup_previous_test_data,
run_configure_command::run_configure_command,
run_create_command::run_create_command,
run_test_command::run_test_command,
virtual_machine::{
preflight_cleanup::preflight_cleanup_previous_resources,
run_destroy_command::run_destroy_command, run_provision_command::run_provision_command,
},
};
#[derive(Parser)]
#[command(name = "e2e-tests")]
#[command(about = "E2E tests for Torrust Tracker Deployer")]
struct Cli {
/// Keep the test environment after completion
#[arg(long)]
keep: bool,
/// Logging format to use
#[arg(
long,
default_value = "pretty",
help = "Logging format: pretty, json, or compact"
)]
log_format: LogFormat,
}
/// Main entry point for E2E tests.
///
/// Runs the full deployment workflow: provision infrastructure, configure services,
/// validate deployment, and cleanup resources.
///
/// # Errors
///
/// Returns an error if:
/// - Invalid environment name provided via CLI
/// - Pre-flight cleanup fails
/// - Infrastructure provisioning fails
/// - Service configuration fails
/// - Deployment validation fails
/// - Resource cleanup fails (when enabled)
///
/// # Panics
///
/// May panic during the match statement if unexpected error combinations occur
/// that are not handled by the current error handling logic.
#[allow(clippy::too_many_lines)]
#[tokio::main]
pub async fn main() -> Result<()> {
let cli = Cli::parse();
// Initialize logging based on the chosen format with stderr output for test visibility
// E2E tests use production log location: ./data/logs using the builder pattern
LoggingBuilder::new(std::path::Path::new("./data/logs"))
.with_format(cli.log_format.clone())
.with_output(LogOutput::FileAndStderr)
.init();
info!(
application = "torrust_tracker_deployer",
test_suite = "e2e_tests",
log_format = ?cli.log_format,
"Starting E2E tests"
);
// Use absolute paths to project root for SSH keys to ensure they can be found by Ansible
let project_root = std::env::current_dir().expect("Failed to get current directory");
let ssh_private_key_path = project_root.join("fixtures/testing_rsa");
let ssh_public_key_path = project_root.join("fixtures/testing_rsa.pub");
// Cleanup any artifacts from previous test runs BEFORE creating the environment
// This prevents "environment already exists" errors from stale state
// We do this before CreateCommandHandler because it checks if environment exists in repository
cleanup_previous_test_data("e2e-full").map_err(|e| anyhow::anyhow!("{e}"))?;
// Create repository factory and clock for environment creation
let repository_factory = RepositoryFactory::new(Duration::from_secs(30));
let clock: Arc<dyn Clock> = Arc::new(SystemClock);
// Create environment via CreateCommandHandler
let environment = run_create_command(
&repository_factory,
clock,
"e2e-full",
ssh_private_key_path.to_string_lossy().to_string(),
ssh_public_key_path.to_string_lossy().to_string(),
"torrust",
DEFAULT_SSH_PORT,
)
.map_err(|e| anyhow::anyhow!("{e}"))?;
let mut test_context =
TestContext::from_environment(cli.keep, environment, TestContextType::VirtualMachine)?
.init()?;
// Additional preflight cleanup for infrastructure (OpenTofu, LXD resources)
// This handles any lingering infrastructure from interrupted previous runs
preflight_cleanup_previous_resources(&test_context)?;
let test_start = Instant::now();
let deployment_result = run_full_deployment_test(&mut test_context).await;
let validation_result = match &deployment_result {
Ok(()) => run_test_command(&test_context)
.await
.map_err(|e| anyhow::anyhow!("{e}")),
Err(_) => Ok(()), // Skip validation if deployment failed
};
// Always cleanup test infrastructure created during this test run using DestroyCommandHandler
// This ensures proper resource cleanup regardless of test success or failure
// The keep_env flag is handled inside run_full_destroy_test
let destroy_result = run_full_destroy_test(&mut test_context);
let test_duration = test_start.elapsed();
info!(
performance = "test_execution",
duration_secs = test_duration.as_secs_f64(),
duration = ?test_duration,
"Test execution completed"
);
// Handle all combinations of deployment, validation, and destroy results
// Destroy failures are logged but don't override test results
match destroy_result {
Ok(()) => {
info!(
operation = "destroy",
status = "success",
"Infrastructure cleanup completed successfully"
);
}
Err(destroy_err) => {
error!(
operation = "destroy",
status = "failed",
error = %destroy_err,
"Infrastructure cleanup failed"
);
// Note: We don't fail the overall test just because cleanup failed
// The test results are more important than cleanup results
}
}
match (deployment_result, validation_result) {
(Ok(()), Ok(())) => {
info!(
test_suite = "e2e_tests",
status = "success",
"All tests passed and cleanup completed successfully"
);
Ok(())
}
(Ok(()), Err(validation_err)) => {
error!(
test_suite = "e2e_tests",
status = "failed",
error = %validation_err,
"Deployment succeeded but validation failed"
);
Err(validation_err)
}
(Err(deployment_err), Ok(())) => {
error!(
test_suite = "e2e_tests",
status = "failed",
error = %deployment_err,
"Deployment failed"
);
Err(deployment_err)
}
(Err(deployment_err), Err(_)) => {
error!(
test_suite = "e2e_tests",
status = "failed",
error = %deployment_err,
"Deployment failed (validation skipped)"
);
Err(deployment_err)
}
}
}
async fn run_full_deployment_test(test_context: &mut TestContext) -> Result<()> {
info!(
test_type = "full_deployment",
workflow = "template_based",
"Starting full deployment E2E test"
);
// Provision infrastructure - updates TestContext with provisioned state
run_provision_command(test_context)
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
// Configure infrastructure - updates TestContext with configured state
run_configure_command(test_context).map_err(|e| anyhow::anyhow!("{e}"))?;
info!(status = "success", "Deployment completed successfully");
info!(
test_type = "full_deployment",
status = "success",
note = "Docker/Docker Compose installation status varies based on network connectivity",
"Full deployment E2E test completed successfully"
);
Ok(())
}
fn run_full_destroy_test(test_context: &mut TestContext) -> Result<()> {
info!(
test_type = "full_destroy",
workflow = "template_based",
"Starting full destroy E2E test"
);
// Call the new run_destroy_command function
run_destroy_command(test_context).map_err(|e| anyhow::anyhow!("{e}"))?;
info!(
status = "success",
"Infrastructure destruction completed successfully"
);
Ok(())
}