forked from vectordotdev/vector
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathunit_test.rs
More file actions
225 lines (196 loc) · 7.07 KB
/
unit_test.rs
File metadata and controls
225 lines (196 loc) · 7.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
#![allow(missing_docs)]
use std::{
fs::File,
io::prelude::*,
path::PathBuf,
time::{Duration, Instant},
};
use clap::Parser;
use colored::*;
use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite};
use crate::{
config::{self, UnitTestResult},
signal,
};
#[derive(Parser, Debug)]
#[command(rename_all = "kebab-case")]
pub struct Opts {
/// Vector config files in TOML format to test.
#[arg(id = "config-toml", long, value_delimiter(','))]
paths_toml: Vec<PathBuf>,
/// Vector config files in JSON format to test.
#[arg(id = "config-json", long, value_delimiter(','))]
paths_json: Vec<PathBuf>,
/// Vector config files in YAML format to test.
#[arg(id = "config-yaml", long, value_delimiter(','))]
paths_yaml: Vec<PathBuf>,
/// Any number of Vector config files to test. If none are specified the
/// default config path `/etc/vector/vector.yaml` will be targeted.
#[arg(value_delimiter(','))]
paths: Vec<PathBuf>,
/// Read configuration from files in one or more directories.
/// File format is detected from the file name.
///
/// Files not ending in .toml, .json, .yaml, or .yml will be ignored.
#[arg(
id = "config-dir",
short = 'C',
long,
env = "VECTOR_CONFIG_DIR",
value_delimiter(',')
)]
pub config_dirs: Vec<PathBuf>,
/// Output path for JUnit reports
#[arg(id = "junit-report", long, value_delimiter(','))]
junit_report_paths: Option<Vec<PathBuf>>,
}
impl Opts {
fn paths_with_formats(&self) -> Vec<config::ConfigPath> {
config::merge_path_lists(vec![
(&self.paths, None),
(&self.paths_toml, Some(config::Format::Toml)),
(&self.paths_json, Some(config::Format::Json)),
(&self.paths_yaml, Some(config::Format::Yaml)),
])
.map(|(path, hint)| config::ConfigPath::File(path, hint))
.chain(
self.config_dirs
.iter()
.map(|dir| config::ConfigPath::Dir(dir.to_path_buf())),
)
.collect()
}
}
#[derive(Debug)]
pub struct JUnitReporter<'a> {
report: Report,
test_suite: TestSuite,
output_paths: Option<&'a Vec<PathBuf>>,
}
impl<'a> JUnitReporter<'a> {
fn new(paths: Option<&'a Vec<PathBuf>>) -> Self {
Self {
report: Report::new("Vector Unit Tests"),
test_suite: TestSuite::new("Test Suite"),
output_paths: paths,
}
}
fn add_test_result(&mut self, name: &str, errors: &[String], time: Duration) {
if self.output_paths.is_none() {
return;
}; // early return in case no output paths were specified
if errors.is_empty() {
// successful test
let mut test_case = TestCase::new(name.to_owned(), TestCaseStatus::success());
test_case.set_time(time);
self.test_suite.add_test_case(test_case);
} else {
// failed test
let mut status = TestCaseStatus::non_success(NonSuccessKind::Failure);
status.set_description(errors.join("\n"));
let mut test_case = TestCase::new(name.to_owned(), status);
test_case.set_time(time);
self.test_suite.add_test_case(test_case);
}
}
fn write_reports(mut self, time: Duration) -> Result<(), String> {
if self.output_paths.is_none() {
return Ok(());
}; // early return in case no output paths were specified
// create a report from the test cases
self.test_suite.set_time(time);
self.report.add_test_suite(self.test_suite);
let report_bytes = match self.report.to_string() {
Ok(report_string) => report_string.into_bytes(),
Err(error) => return Err(error.to_string()),
};
for path in self.output_paths.unwrap() {
// safe to unwrap because of the check above
match File::create(path) {
Ok(mut file) => match file.write_all(&report_bytes) {
Ok(()) => {}
Err(error) => return Err(error.to_string()),
},
Err(error) => return Err(error.to_string()),
}
}
Ok(())
}
}
pub async fn cmd(opts: &Opts, signal_handler: &mut signal::SignalHandler) -> exitcode::ExitCode {
let mut aggregated_test_errors: Vec<(String, Vec<String>)> = Vec::new();
let paths = opts.paths_with_formats();
let paths = match config::process_paths(&paths) {
Some(paths) => paths,
None => return exitcode::CONFIG,
};
let mut junit_reporter = JUnitReporter::new(opts.junit_report_paths.as_ref());
#[allow(clippy::print_stdout)]
{
println!("Running tests");
}
match config::build_unit_tests_main(&paths, signal_handler).await {
Ok(tests) => {
if tests.is_empty() {
#[allow(clippy::print_stdout)]
{
println!("{}", "No tests found.".yellow());
}
} else {
let test_suite_start = Instant::now();
for test in tests {
let name = test.name.clone();
let test_case_start = Instant::now();
let UnitTestResult { errors } = test.run().await;
let test_case_elapsed = test_case_start.elapsed();
junit_reporter.add_test_result(&name, &errors, test_case_elapsed);
if !errors.is_empty() {
#[allow(clippy::print_stdout)]
{
println!("test {} ... {}", name, "failed".red());
}
aggregated_test_errors.push((name, errors));
} else {
#[allow(clippy::print_stdout)]
{
println!("test {} ... {}", name, "passed".green());
}
}
}
let test_suite_elapsed = test_suite_start.elapsed();
match junit_reporter.write_reports(test_suite_elapsed) {
Ok(()) => {}
Err(error) => {
error!("Failed to execute tests:\n{}.", error);
return exitcode::CONFIG;
}
}
}
}
Err(errors) => {
error!("Failed to execute tests:\n{}.", errors.join("\n"));
return exitcode::CONFIG;
}
}
if !aggregated_test_errors.is_empty() {
#[allow(clippy::print_stdout)]
{
println!("\nfailures:");
}
for (test_name, fails) in aggregated_test_errors {
#[allow(clippy::print_stdout)]
{
println!("\ntest {test_name}:\n");
}
for fail in fails {
#[allow(clippy::print_stdout)]
{
println!("{fail}\n");
}
}
}
exitcode::CONFIG
} else {
exitcode::OK
}
}