Skip to content

Commit be02767

Browse files
committed
Checkpoint
1 parent 4b58ebf commit be02767

File tree

6 files changed

+868
-1
lines changed

6 files changed

+868
-1
lines changed
Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
# Requirements for performance tests
2+
3+
Each performance test consists of three phases:
4+
5+
1) Warmup
6+
1) Test operation
7+
1) Cleanup
8+
9+
## Common test inputs
10+
11+
* Duration of the test in seconds
12+
* Number of iterations of the main test loop
13+
* Parallel - number of operations to execute in parallel
14+
* Disable test cleanup
15+
* Test Proxy servers.
16+
* Results file - location to write test outputs
17+
* Warmup - Duration of the warmup in seconds.
18+
* TLS
19+
* Allow untrusted TLS certificates
20+
* Advanced options
21+
* Print job statistics (?)
22+
* Track latency and print per-operation latency statistics
23+
* Target throughput (operations/second) (?)
24+
* Language specific options
25+
* Max I/O completion threads
26+
* Minimum number of asynchronous I/O threads in the thread pool
27+
* Minimum number of worker threads the thread pool creates on demand
28+
* Sync - run a synchronous version of the test
29+
30+
## Expected test outputs
31+
32+
Each test is expected to generate the following elements:
33+
34+
* Package Versions - a set of packages tested and their versions.
35+
* Operations per second - Double precision float
36+
* Standard Output of the test
37+
* Standard Error of the test
38+
* Exception - Text of any exceptions thrown during the test.
39+
* Average CPU Use during the test - Double precision float.
40+
* Average memory use during the test - Double precision float.
41+
42+
## Perf Test Harness
43+
44+
Each performance test defines a `get_metadata()` function which returns a `TestMetadata` structure.
45+
46+
A `TestMetadata` structure contains the following fields
47+
48+
```rust
49+
pub struct TestMetadata {
50+
name: &'static str
51+
description: &'static str
52+
options: &'static[&'static TestOption]
53+
}
54+
```
55+
56+
A `TestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test.
57+
58+
```rust
59+
pub struct TestOption {
60+
/// The name of the test option. This is used as the key in the `TestArguments` map.
61+
name: &'static str,
62+
63+
long_activator: &str,
64+
65+
short_activator:&str,
66+
67+
/// Display message - displayed in the --help message.
68+
display_message: &[str],
69+
70+
/// Expected argument count
71+
expected_args_len: u16,
72+
73+
/// Required
74+
mandatory: bool,
75+
76+
/// Argument value is sensitive and should be sanitized.
77+
sensitive: bool,
78+
}
79+
```

sdk/core/azure_core_test/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ async-trait.workspace = true
2323
azure_core = { workspace = true, features = ["test"] }
2424
azure_core_test_macros.workspace = true
2525
azure_identity.workspace = true
26+
clap.workspace = true
2627
dotenvy = "0.15.7"
2728
futures.workspace = true
2829
rand.workspace = true

sdk/core/azure_core_test/src/lib.rs

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,15 @@ pub mod credentials;
77
#[cfg(doctest)]
88
mod docs;
99
pub mod http;
10+
pub mod perf;
1011
pub mod proxy;
1112
pub mod recorded;
1213
mod recording;
1314
#[cfg(doctest)]
1415
mod root_readme;
1516
pub mod stream;
1617
pub mod tracing;
17-
18+
use crate::perf::PerfRunner;
1819
use azure_core::Error;
1920
pub use azure_core::{error::ErrorKind, test::TestMode};
2021
pub use proxy::{matchers::*, sanitizers::*};
@@ -36,6 +37,7 @@ pub struct TestContext {
3637
module_name: &'static str,
3738
name: &'static str,
3839
recording: Option<Recording>,
40+
_performance: Option<PerfRunner>,
3941
}
4042

4143
impl TestContext {
@@ -59,6 +61,7 @@ impl TestContext {
5961
module_name: test_module,
6062
name,
6163
recording: None,
64+
_performance: None,
6265
})
6366
}
6467

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# Performance Tests
Lines changed: 235 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,235 @@
1+
// Copyright (c) Microsoft Corporation. All rights reserved.
2+
// Licensed under the MIT License.
3+
4+
#![doc = include_str!("README.md")]
5+
6+
use clap::{parser::MatchesError, ArgMatches};
7+
use std::any::Any;
8+
9+
/// Metadata about a performance test.
10+
#[derive(Debug, Clone)]
11+
pub struct TestMetadata {
12+
/// The name of the test.
13+
pub name: &'static str,
14+
/// A brief description of the test.
15+
pub description: &'static str,
16+
/// The set of test options supported by this test.
17+
pub options: &'static [&'static TestOption],
18+
}
19+
20+
/// #A `TestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test.
21+
#[derive(Debug, Default)]
22+
pub struct TestOption {
23+
/// The name of the test option. This is used as the key in the `TestArguments` map.
24+
pub name: &'static str,
25+
26+
/// The short form activator for this argument e.g., `-t`. Does not include the hyphen.
27+
pub short_activator: char,
28+
29+
/// The long form activator for this argument e.g., `--test-option`. Does not include the hyphens.
30+
pub long_activator: &'static str,
31+
32+
/// Display message - displayed in the --help message.
33+
pub display_message: &'static str,
34+
35+
/// Expected argument count
36+
pub expected_args_len: usize,
37+
38+
/// Required
39+
pub mandatory: bool,
40+
41+
/// Argument value is sensitive and should be sanitized.
42+
pub sensitive: bool,
43+
}
44+
45+
#[derive(Debug)]
46+
#[allow(dead_code)]
47+
struct PerfRunnerOptions {
48+
no_cleanup: bool,
49+
iterations: u32,
50+
parallel: u32,
51+
test: Option<String>,
52+
duration: u32,
53+
warmup: u32,
54+
test_results_filename: String,
55+
}
56+
57+
impl PerfRunnerOptions {}
58+
59+
impl From<&ArgMatches> for PerfRunnerOptions {
60+
fn from(matches: &ArgMatches) -> Self {
61+
Self {
62+
no_cleanup: matches.get_flag("no-cleanup"),
63+
iterations: *matches
64+
.get_one::<u32>("iterations")
65+
.expect("defaulted by clap"),
66+
parallel: *matches
67+
.get_one::<u32>("parallel")
68+
.expect("defaulted by clap"),
69+
test: matches.get_one::<String>("test").cloned(),
70+
duration: *matches
71+
.get_one::<u32>("duration")
72+
.expect("defaulted by clap"),
73+
warmup: *matches.get_one::<u32>("warmup").expect("defaulted by clap"),
74+
test_results_filename: matches
75+
.get_one::<String>("test-results")
76+
.expect("defaulted by clap")
77+
.to_string(),
78+
}
79+
}
80+
}
81+
82+
/// Context information required by performance tests.
83+
#[derive(Debug)]
84+
pub struct PerfRunner {
85+
options: PerfRunnerOptions,
86+
arguments: ArgMatches,
87+
}
88+
89+
impl PerfRunner {
90+
pub fn new(tests: Vec<TestMetadata>) -> azure_core::Result<Self> {
91+
let command = Self::get_command_from_metadata(tests);
92+
let arguments = command.get_matches();
93+
Ok(Self {
94+
options: PerfRunnerOptions::from(&arguments),
95+
arguments,
96+
})
97+
}
98+
99+
#[cfg(test)]
100+
pub fn with_command_line(
101+
tests: Vec<TestMetadata>,
102+
args: Vec<&str>,
103+
) -> azure_core::Result<Self> {
104+
let command = Self::get_command_from_metadata(tests);
105+
let arguments = command.try_get_matches_from(args).map_err(|e| {
106+
azure_core::error::Error::with_error(
107+
azure_core::error::ErrorKind::Other,
108+
e,
109+
"Failed to parse command line arguments.",
110+
)
111+
})?;
112+
Ok(Self {
113+
options: PerfRunnerOptions::from(&arguments),
114+
arguments,
115+
})
116+
}
117+
118+
/// Gets a reference to a typed argument by its id.
119+
pub fn try_get_one<T>(&self, id: &str) -> Result<Option<&T>, MatchesError>
120+
where
121+
T: Any + Clone + Send + Sync + 'static,
122+
{
123+
self.arguments.try_get_one::<T>(id)
124+
}
125+
126+
pub fn try_get_one_subcommand<T>(
127+
&self,
128+
subcommand: &str,
129+
id: &str,
130+
) -> Result<Option<&T>, MatchesError>
131+
where
132+
T: Any + Clone + Send + Sync + 'static,
133+
{
134+
let subcommand = self.arguments.subcommand_matches(subcommand);
135+
if let Some(subcommand) = subcommand {
136+
subcommand.try_get_one::<T>(id)
137+
} else {
138+
Ok(None)
139+
}
140+
}
141+
142+
#[allow(dead_code)]
143+
async fn run_test<F, Fut>(&self, test: F) -> azure_core::Result<()>
144+
where
145+
F: Fn(u32, u32) -> Fut,
146+
Fut: std::future::Future<Output = azure_core::Result<()>>,
147+
{
148+
test(self.options.iterations, self.options.parallel).await
149+
}
150+
151+
// * Disable test cleanup
152+
// * Test Proxy servers.
153+
// * TLS
154+
// * Allow untrusted TLS certificates
155+
// * Advanced options
156+
// * Print job statistics (?)
157+
// * Track latency and print per-operation latency statistics
158+
// * Target throughput (operations/second) (?)
159+
// * Language specific options
160+
// * Max I/O completion threads
161+
// * Minimum number of asynchronous I/O threads in the thread pool
162+
// * Minimum number of worker threads the thread pool creates on demand
163+
// * Sync - run a synchronous version of the test
164+
165+
/// Constructs a `clap::Command` from the provided test metadata.
166+
fn get_command_from_metadata(tests: Vec<TestMetadata>) -> clap::Command {
167+
let mut command = clap::Command::new("perf-tests")
168+
.about("Run performance tests for the Azure SDK for Rust")
169+
.arg(
170+
clap::arg!(--iterations <COUNT> "The number of iterations to run each test")
171+
.required(false)
172+
.default_value("1")
173+
.value_parser(clap::value_parser!(u32))
174+
.global(true),
175+
)
176+
.arg(
177+
clap::arg!(--parallel <COUNT> "The number of concurrent tasks to use when running each test")
178+
.required(false)
179+
.default_value("1")
180+
.value_parser(clap::value_parser!(u32))
181+
.global(true),
182+
)
183+
.arg(
184+
clap::arg!(--test <TEST_NAME> "The name of the test to run. If not specified, all tests will be run.")
185+
.required(false)
186+
.global(true),
187+
)
188+
.arg(
189+
clap::arg!(--duration <SECONDS> "The duration of each test in seconds")
190+
.required(false)
191+
.default_value("30")
192+
.value_parser(clap::value_parser!(u32))
193+
.global(true),
194+
)
195+
.arg(
196+
clap::arg!(--warmup <SECONDS> "The duration of the warmup period in seconds")
197+
.required(false)
198+
.default_value("5")
199+
.value_parser(clap::value_parser!(u32))
200+
.global(true),
201+
).arg(
202+
clap::arg!(--"test-results" <FILE> "The file to write test results to")
203+
.required(false)
204+
.default_value("./tests/results.json")
205+
.global(true),
206+
)
207+
.arg(clap::arg!(--"no-cleanup" "Disable test cleanup")
208+
.required(false).global(true))
209+
;
210+
for test in &tests {
211+
let mut subcommand = clap::Command::new(test.name).about(test.description);
212+
for option in test.options {
213+
let mut arg = clap::Arg::new(option.name)
214+
.help(option.display_message)
215+
.long(option.long_activator)
216+
.num_args(option.expected_args_len..=option.expected_args_len)
217+
.required(option.mandatory)
218+
.global(false);
219+
if option.short_activator != '\0' {
220+
arg = arg.short(option.short_activator);
221+
}
222+
if option.sensitive {
223+
arg = arg.hide(true);
224+
}
225+
subcommand = subcommand.arg(arg);
226+
}
227+
command = command.subcommand(subcommand);
228+
}
229+
230+
command
231+
}
232+
}
233+
234+
#[cfg(test)]
235+
mod tests;

0 commit comments

Comments
 (0)