diff --git a/doc/performance-test-requirements.md b/doc/performance-test-requirements.md new file mode 100644 index 0000000000..6b0a3dd38b --- /dev/null +++ b/doc/performance-test-requirements.md @@ -0,0 +1,79 @@ +# Requirements for performance tests + +Each performance test consists of three phases: + +1) Warmup +1) Test operation +1) Cleanup + +## Common test inputs + +* Duration of the test in seconds +* Number of iterations of the main test loop +* Parallel - number of operations to execute in parallel +* Disable test cleanup +* Test Proxy servers. +* Results file - location to write test outputs +* Warmup - Duration of the warmup in seconds. +* TLS + * Allow untrusted TLS certificates +* Advanced options + * Print job statistics (?) + * Track latency and print per-operation latency statistics + * Target throughput (operations/second) (?) +* Language specific options + * Max I/O completion threads + * Minimum number of asynchronous I/O threads in the thread pool + * Minimum number of worker threads the thread pool creates on demand + * Sync - run a synchronous version of the test + +## Expected test outputs + +Each test is expected to generate the following elements: + +* Package Versions - a set of packages tested and their versions. +* Operations per second - Double precision float +* Standard Output of the test +* Standard Error of the test +* Exception - Text of any exceptions thrown during the test. +* Average CPU Use during the test - Double precision float. +* Average memory use during the test - Double precision float. + +## Perf Test Harness + +Each performance test defines a `get_metadata()` function which returns a `TestMetadata` structure. + +A `TestMetadata` structure contains the following fields + +```rust +pub struct TestMetadata { + name: &'static str + description: &'static str + options: &'static[&'static TestOption] +} +``` + +A `TestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test. + +```rust +pub struct TestOption { + /// The name of the test option. This is used as the key in the `TestArguments` map. + name: &'static str, + + long_activator: &str, + + short_activator:&str, + + /// Display message - displayed in the --help message. + display_message: &[str], + + /// Expected argument count + expected_args_len: u16, + + /// Required + mandatory: bool, + + /// Argument value is sensitive and should be sanitized. + sensitive: bool, +} +``` diff --git a/sdk/core/azure_core/CHANGELOG.md b/sdk/core/azure_core/CHANGELOG.md index 6225987745..8b79b35be1 100644 --- a/sdk/core/azure_core/CHANGELOG.md +++ b/sdk/core/azure_core/CHANGELOG.md @@ -16,7 +16,6 @@ ### Breaking Changes - - Changed `ClientOptions::retry` from `Option` to `RetryOptions`. - Changed `DeserializeWith::deserialize_with()` to be sync. - Changed `Pipeline::send()` to return a `Result`. diff --git a/sdk/core/azure_core_test/Cargo.toml b/sdk/core/azure_core_test/Cargo.toml index 13dc3eb703..34c6cdf22b 100644 --- a/sdk/core/azure_core_test/Cargo.toml +++ b/sdk/core/azure_core_test/Cargo.toml @@ -23,6 +23,7 @@ async-trait.workspace = true azure_core = { workspace = true, features = ["test"] } azure_core_test_macros.workspace = true azure_identity.workspace = true +clap.workspace = true dotenvy = "0.15.7" futures.workspace = true rand.workspace = true diff --git a/sdk/core/azure_core_test/src/lib.rs b/sdk/core/azure_core_test/src/lib.rs index 76a936ca32..960954761f 100644 --- a/sdk/core/azure_core_test/src/lib.rs +++ b/sdk/core/azure_core_test/src/lib.rs @@ -7,6 +7,7 @@ pub mod credentials; #[cfg(doctest)] mod docs; pub mod http; +pub mod perf; pub mod proxy; pub mod recorded; mod recording; @@ -14,7 +15,6 @@ mod recording; mod root_readme; pub mod stream; pub mod tracing; - use azure_core::Error; pub use azure_core::{error::ErrorKind, test::TestMode}; pub use proxy::{matchers::*, sanitizers::*}; @@ -38,6 +38,8 @@ pub struct TestContext { recording: Option, } +unsafe impl Send for TestContext {} + impl TestContext { pub(crate) fn new( crate_dir: &'static str, diff --git a/sdk/core/azure_core_test/src/perf/README.md b/sdk/core/azure_core_test/src/perf/README.md new file mode 100644 index 0000000000..09b20269d9 --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/README.md @@ -0,0 +1 @@ +# Performance Tests diff --git a/sdk/core/azure_core_test/src/perf/config_tests.rs b/sdk/core/azure_core_test/src/perf/config_tests.rs new file mode 100644 index 0000000000..0609c9ee42 --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/config_tests.rs @@ -0,0 +1,687 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +//! Tests for configuration of the performance test runner. +//! +//! These tests cover various scenarios for initializing the `PerfRunner` with different sets of +//! command-line arguments and test metadata. They ensure that the runner correctly parses +//! arguments, handles defaults, and manages errors appropriately. +//! +use super::*; +use std::{env, error::Error}; + +fn create_failed_test(_runner: &PerfRunner) -> CreatePerfTestReturn { + Box::pin(async { + Err(azure_core::Error::with_message( + azure_core::error::ErrorKind::Other, + "Intentional failure to create test instance", + )) + }) +} + +// Helper function to create a basic test metadata for testing +fn create_basic_test_metadata() -> PerfTestMetadata { + PerfTestMetadata { + name: "basic_test", + description: "A basic test for testing purposes", + options: vec![PerfTestOption { + name: "test-option", + short_activator: 't', + long_activator: "test-option", + display_message: "Test option for basic test", + expected_args_len: 1, + mandatory: false, + sensitive: false, + }], + create_test: create_failed_test, + } +} + +// Helper function to create test metadata with multiple options +fn create_complex_test_metadata() -> PerfTestMetadata { + PerfTestMetadata { + name: "complex_test", + description: "A complex test with multiple options", + options: vec![ + PerfTestOption { + name: "mandatory-option", + short_activator: 'm', + long_activator: "mandatory", + display_message: "Mandatory option", + expected_args_len: 1, + mandatory: true, + sensitive: false, + }, + PerfTestOption { + name: "sensitive-option", + short_activator: 's', + long_activator: "sensitive", + display_message: "Sensitive option", + expected_args_len: 1, + mandatory: false, + sensitive: true, + }, + PerfTestOption { + name: "flag-option", + short_activator: 'f', + long_activator: "flag", + display_message: "Flag option", + ..Default::default() + }, + ], + create_test: create_failed_test, + } +} + +// Helper function to create test metadata without short activators +fn create_no_short_activator_test_metadata() -> PerfTestMetadata { + PerfTestMetadata { + name: "no_short_test", + description: "Test without short activators", + options: vec![PerfTestOption { + name: "long-only", + short_activator: '\0', + long_activator: "long-only", + display_message: "Long activator only", + expected_args_len: 1, + mandatory: false, + sensitive: false, + }], + create_test: create_failed_test, + } +} + +#[test] +fn test_perf_runner_new_with_empty_tests() { + let tests = vec![]; + let result = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests, + vec!["perf-tests"], + ); + + assert!( + result.is_ok(), + "PerfRunner::new should succeed with empty tests" + ); + let runner = result.unwrap(); + + // Test default values + assert_eq!(runner.options.iterations, 1); + assert_eq!(runner.options.parallel, 1); + assert_eq!(runner.options.duration, Duration::seconds(30)); + assert_eq!(runner.options.warmup, Duration::seconds(5)); + assert_eq!(runner.options.test_results_filename, "./tests/results.json"); + assert!(!runner.options.no_cleanup); +} + +#[test] +fn test_perf_runner_new_with_single_test() { + let tests = vec![create_basic_test_metadata()]; + let result = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests, + vec!["perf-tests"], + ); + + assert!( + result.is_ok(), + "PerfRunner::new should succeed with single test" + ); + let runner = result.unwrap(); + + // Verify default values are set + assert_eq!(runner.options.iterations, 1); + assert_eq!(runner.options.parallel, 1); + assert_eq!(runner.options.duration, Duration::seconds(30)); + assert_eq!(runner.options.warmup, Duration::seconds(5)); +} + +#[test] +fn test_perf_runner_new_with_multiple_tests() { + let tests = vec![ + create_basic_test_metadata(), + create_complex_test_metadata(), + create_no_short_activator_test_metadata(), + ]; + let result = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests, + vec!["perf-tests"], + ); + + assert!( + result.is_ok(), + "PerfRunner::new should succeed with multiple tests" + ); + let _runner = result.unwrap(); +} + +#[test] +fn test_perf_runner_with_command_line_default_args() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with default args" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.iterations, 1); + assert_eq!(runner.options.parallel, 1); + assert_eq!(runner.options.duration, Duration::seconds(30)); + assert_eq!(runner.options.warmup, Duration::seconds(5)); + assert!(!runner.options.no_cleanup); +} + +#[test] +fn test_perf_runner_with_command_line_custom_iterations() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "10"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom iterations" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.iterations, 10); +} + +#[test] +fn test_perf_runner_with_command_line_custom_parallel() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--parallel", "5"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom parallel" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.parallel, 5); +} + +#[test] +fn test_perf_runner_with_command_line_custom_duration() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--duration", "60"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom duration" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.duration, Duration::seconds(60)); +} + +#[test] +fn test_perf_runner_with_command_line_custom_warmup() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--warmup", "10"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom warmup" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.warmup, Duration::seconds(10)); +} + +#[test] +fn test_perf_runner_with_command_line_test_results_file() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--test-results", "/tmp/results.json"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with custom test results file" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.test_results_filename, "/tmp/results.json"); +} + +#[test] +fn test_perf_runner_with_command_line_no_cleanup() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--no-cleanup"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with no-cleanup flag" + ); + + let runner = result.unwrap(); + assert!(runner.options.no_cleanup); +} + +#[test] +fn test_perf_runner_with_command_line_all_options() { + let tests = vec![create_basic_test_metadata()]; + let args = vec![ + "perf-tests", + "--iterations", + "20", + "--parallel", + "8", + "--duration", + "120", + "--warmup", + "15", + "--test-results", + "/custom/results.json", + "--no-cleanup", + ]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with all options" + ); + + let runner = result.unwrap(); + assert_eq!(runner.options.iterations, 20); + assert_eq!(runner.options.parallel, 8); + assert_eq!(runner.options.duration, Duration::seconds(120)); + assert_eq!(runner.options.warmup, Duration::seconds(15)); + assert_eq!(runner.options.test_results_filename, "/custom/results.json"); + assert!(runner.options.no_cleanup); +} + +#[test] +fn test_perf_runner_command_line_help() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--help"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_err(), + "PerfRunner::with_command_line should fail with help flag" + ); + + println!("{}", result.as_ref().err().unwrap().source().unwrap()); + + let error = result.err().unwrap(); + assert_eq!(error.kind(), &azure_core::error::ErrorKind::Other); + assert!(error.to_string().contains("Failed to parse")); +} + +#[test] +fn test_perf_runner_with_subcommand() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "basic_test", "--test-option", "value"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with subcommand" + ); + + let runner = result.unwrap(); + + let selected_test = runner + .get_selected_test_name() + .expect("A test should be selected"); + assert_eq!(selected_test, "basic_test"); + let option_value: Option<&String> = runner.try_get_test_arg("test-option").ok().flatten(); + assert!(option_value.is_some()); + assert_eq!(option_value.unwrap(), "value"); +} + +#[test] +fn test_perf_runner_with_subcommand_short_activator() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "basic_test", "-t", "short_value"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with short activator" + ); + + let runner = result.unwrap(); + let option_value: Option<&String> = runner.try_get_test_arg("test-option").ok().flatten(); + assert!(option_value.is_some()); + assert_eq!(option_value.unwrap(), "short_value"); +} + +#[test] +fn test_perf_runner_with_complex_subcommand() { + let tests = vec![create_complex_test_metadata()]; + let args = vec![ + "perf-tests", + "complex_test", + "--mandatory", + "required_value", + "--sensitive", + "secret_value", + "--flag", + ]; + + println!( + "Help: {}", + PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests.clone(), + vec!["perf-tests", "--help"] + ) + .unwrap_err() + .source() + .unwrap() + ); + println!( + "Help2 : {}", + PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + tests.clone(), + vec!["perf-tests", "complex_test", "--help"] + ) + .unwrap_err() + .source() + .unwrap() + ); + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with complex subcommand" + ); + + let runner = result.unwrap(); + + let mandatory_value: Result> = runner.try_get_test_arg("mandatory-option"); + println!("{:?}", mandatory_value); + assert!(mandatory_value.is_ok()); + let mandatory_value = mandatory_value.unwrap(); + assert!(mandatory_value.is_some()); + assert_eq!(mandatory_value.unwrap(), "required_value"); + + let sensitive_value: Option<&String> = + runner.try_get_test_arg("sensitive-option").ok().flatten(); + assert!(sensitive_value.is_some()); + assert_eq!(sensitive_value.unwrap(), "secret_value"); + + let flag_value = runner.try_get_test_arg("flag-option").ok().flatten(); + assert!(flag_value.is_some()); + let flag_value: bool = *flag_value.unwrap(); + assert!(flag_value); +} + +#[test] +fn test_perf_runner_with_no_short_activator() { + let tests = vec![create_no_short_activator_test_metadata()]; + let args = vec!["perf-tests", "no_short_test", "--long-only", "value"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with long-only activator" + ); + + let runner = result.unwrap(); + let option_value: Option<&String> = runner.try_get_test_arg("long-only").ok().flatten(); + assert!(option_value.is_some()); + assert_eq!(option_value.unwrap(), "value"); +} + +#[test] +fn test_perf_runner_get_one_nonexistent() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests"]; + + let runner = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args).unwrap(); + let result: Result> = runner.try_get_global_arg("nonexistent"); + assert!(result.is_err()); +} + +#[test] +fn test_perf_runner_get_one_different_types() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "42"]; + + let runner = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args).unwrap(); + + // Test getting u32 value + let iterations: Option<&u32> = runner.try_get_global_arg("iterations").ok().flatten(); + assert!(iterations.is_some()); + assert_eq!(*iterations.unwrap(), 42); + + // Test getting wrong type returns None + let iterations_as_string: Option<&String> = + runner.try_get_global_arg("iterations").ok().flatten(); + assert!(iterations_as_string.is_none()); +} + +#[test] +fn test_perf_runner_options_debug() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "5"]; + + let runner = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args).unwrap(); + + // Test that Debug is implemented for PerfRunner + let debug_output = format!("{:?}", runner); + assert!(debug_output.contains("PerfRunner")); + assert!(debug_output.contains("options")); + + // Test that PerfRunnerOptions Debug works + let options_debug = format!("{:?}", runner.options); + assert!(options_debug.contains("PerfRunnerOptions")); + assert!(options_debug.contains("iterations: 5")); + + let options = PerfRunnerOptions::from(&runner.arguments); + assert_eq!(options.iterations, 5); +} + +#[test] +fn test_test_option_debug_and_default() { + let option = PerfTestOption::default(); + + // Test default values + assert_eq!(option.name, ""); + assert_eq!(option.short_activator, '\0'); + assert_eq!(option.long_activator, ""); + assert_eq!(option.display_message, ""); + assert_eq!(option.expected_args_len, 0); + assert!(!option.mandatory); + assert!(!option.sensitive); + + // Test Debug implementation + let debug_output = format!("{:?}", option); + assert!(debug_output.contains("TestOption")); +} + +#[test] +fn test_perf_runner_with_invalid_numeric_value() { + let tests = vec![create_basic_test_metadata()]; + let args = vec!["perf-tests", "--iterations", "not_a_number"]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_err(), + "PerfRunner::with_command_line should fail with invalid numeric value" + ); +} + +#[test] +fn test_perf_runner_with_missing_mandatory_option() { + let tests = vec![create_complex_test_metadata()]; + let args = vec!["perf-tests", "complex_test"]; // Missing mandatory option + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_err(), + "PerfRunner::with_command_line should fail with missing mandatory option" + ); +} + +#[test] +fn test_perf_runner_with_multiple_tests_and_subcommands() { + let tests = vec![create_basic_test_metadata(), create_complex_test_metadata()]; + + // Test with first subcommand + let args = vec!["perf-tests", "basic_test", "--test-option", "value1"]; + let result = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests.clone(), args); + assert!(result.is_ok()); + + let runner = result.unwrap(); + let option_value: Option<&String> = runner.try_get_test_arg("test-option").ok().flatten(); + assert_eq!(option_value.unwrap(), "value1"); + + // Test with second subcommand + let args = vec!["perf-tests", "complex_test", "--mandatory", "required"]; + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!(result.is_ok()); + + let runner = result.unwrap(); + let mandatory_value: Option<&String> = + runner.try_get_test_arg("mandatory-option").ok().flatten(); + assert_eq!(mandatory_value.unwrap(), "required"); +} + +struct ComplexTest {} + +#[cfg_attr(target_arch = "wasm32", async_trait::async_trait(?send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] +impl PerfTest for ComplexTest { + async fn setup(&self, _context: Arc) -> azure_core::Result<()> { + println!("Setting up ComplexTest..."); + // Simulate some async setup work + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + Ok(()) + } + + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { + println!("Cleaning up ComplexTest..."); + // Simulate some async cleanup work + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + Ok(()) + } + + async fn run(&self, _context: Arc) -> azure_core::Result<()> { + // Simulate some async test work + println!("Running ComplexTest..."); + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + Ok(()) + } +} + +fn complex_test_create(_runner: &PerfRunner) -> CreatePerfTestReturn { + Box::pin(async { Ok(Box::new(ComplexTest {}) as Box) }) +} + +#[tokio::test] +async fn test_perf_runner_with_test_functions() { + let tests = vec![PerfTestMetadata { + name: "complex_test", + description: "A complex test with multiple options", + options: vec![ + PerfTestOption { + name: "mandatory-option", + short_activator: 'm', + long_activator: "mandatory", + display_message: "Mandatory option", + expected_args_len: 1, + mandatory: true, + sensitive: false, + }, + PerfTestOption { + name: "sensitive-option", + short_activator: 's', + long_activator: "sensitive", + display_message: "Sensitive option", + expected_args_len: 1, + mandatory: false, + sensitive: true, + }, + PerfTestOption { + name: "flag-option", + short_activator: 'f', + long_activator: "flag", + display_message: "Flag option", + expected_args_len: 0, + mandatory: false, + sensitive: false, + }, + ], + create_test: complex_test_create, + }]; + let args = vec![ + "perf-tests", + "complex_test", + "--mandatory", + "required_value", + "--sensitive", + "secret_value", + "--flag", + ]; + + let result = PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), tests, args); + assert!( + result.is_ok(), + "PerfRunner::with_command_line should succeed with complex subcommand" + ); + + let runner = result.unwrap(); + + let mandatory_value: Result> = runner.try_get_test_arg("mandatory-option"); + println!("{:?}", mandatory_value); + assert!(mandatory_value.is_ok()); + let mandatory_value = mandatory_value.unwrap(); + assert!(mandatory_value.is_some()); + assert_eq!(mandatory_value.unwrap(), "required_value"); + + let sensitive_value: Option<&String> = + runner.try_get_test_arg("sensitive-option").ok().flatten(); + assert!(sensitive_value.is_some()); + assert_eq!(sensitive_value.unwrap(), "secret_value"); + + let flag_value = runner.try_get_test_arg("flag-option").ok().flatten(); + assert!(flag_value.is_some()); + let flag_value: bool = *flag_value.unwrap(); + assert!(flag_value); + + let perf_tests_impl = (runner.tests[0].create_test)(&runner) + .await + .expect("Failed to create test instance"); + + let crate_dir = env!("CARGO_MANIFEST_DIR"); + + let test_context = Arc::new( + TestContext::new(crate_dir, crate_dir, runner.tests[0].name) + .expect("Failed to create TestContext"), + ); + + perf_tests_impl + .setup(test_context.clone()) + .await + .expect("Setup failed"); + perf_tests_impl + .run(test_context.clone()) + .await + .expect("Run failed"); + perf_tests_impl + .cleanup(test_context.clone()) + .await + .expect("Cleanup failed"); +} diff --git a/sdk/core/azure_core_test/src/perf/framework_tests.rs b/sdk/core/azure_core_test/src/perf/framework_tests.rs new file mode 100644 index 0000000000..d5b375c12b --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/framework_tests.rs @@ -0,0 +1,114 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +//! Tests for functioning of the performance test runner. +//! +//! These tests cover various scenarios for running the `PerfRunner` with different options and measurements. +//! +use super::*; +use std::boxed::Box; + +#[tokio::test] +async fn test_perf_runner_with_no_tests() { + let args = vec!["perf_test", "--iterations", "1", "--duration", "1"]; + let runner = + PerfRunner::with_command_line(env!("CARGO_MANIFEST_DIR"), file!(), vec![], args).unwrap(); + + let result = runner.run().await; + assert!(result.is_err()); +} + +fn create_fibonacci1_test(runner: &PerfRunner) -> CreatePerfTestReturn { + struct Fibonacci1Test { + count: u32, + } + + impl Fibonacci1Test { + fn fibonacci(n: u32) -> u32 { + if n <= 1 { + n + } else { + Self::fibonacci(n - 1) + Self::fibonacci(n - 2) + } + } + } + + #[async_trait::async_trait] + impl PerfTest for Fibonacci1Test { + async fn setup(&self, _context: Arc) -> azure_core::Result<()> { + Ok(()) + } + async fn run(&self, _context: Arc) -> azure_core::Result<()> { + let _result = Self::fibonacci(self.count); + // This is a CPU bound test, so yield to allow other tasks to run. Otherwise we jam the tokio scheduler. + // Note that this significantly reduces the performance of the test, but it is necessary to allow parallelism. + // + // In a real-world scenario, the test would be doing async work (e.g. network I/O) which would yield naturally. + tokio::task::yield_now().await; + Ok(()) + } + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { + Ok(()) + } + } + + // Helper function to handle the async creation of the test. + async fn create_test(runner: PerfRunner) -> Result> { + let count: Option<&String> = runner.try_get_test_arg("count")?; + + println!("Fibonacci1Test with count: {:?}", count); + let count = count.expect("count argument is mandatory"); + let count = count.parse::().map_err(|e| { + azure_core::Error::with_error( + azure_core::error::ErrorKind::Other, + e, + "Invalid count argument", + ) + })?; + Ok(Box::new(Fibonacci1Test { count }) as Box) + } + + // Return a pinned future that creates the test. + Box::pin(create_test(runner.clone())) +} + +#[tokio::test] +async fn test_perf_runner_with_single_test() { + let args = vec![ + "perf_test", + "--iterations", + "1", + "--parallel", + "30", + "--duration", + "10", + "--warmup", + "1", + "fibonacci1", + "-c", + "10", + ]; + let runner = PerfRunner::with_command_line( + env!("CARGO_MANIFEST_DIR"), + file!(), + vec![PerfTestMetadata { + name: "fibonacci1", + description: "A basic test for testing purposes", + options: vec![PerfTestOption { + name: "count", + mandatory: true, + short_activator: 'c', + expected_args_len: 1, + display_message: "The Fibonacci number to compute", + ..Default::default() + }], + create_test: create_fibonacci1_test, + }], + args, + ) + .unwrap(); + + let result = runner.run().await; + assert!(result.is_ok()); + println!("Result: {:?}", result); +} diff --git a/sdk/core/azure_core_test/src/perf/mod.rs b/sdk/core/azure_core_test/src/perf/mod.rs new file mode 100644 index 0000000000..b2ae1e7afc --- /dev/null +++ b/sdk/core/azure_core_test/src/perf/mod.rs @@ -0,0 +1,533 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#![doc = include_str!("README.md")] +#![cfg(not(target_arch = "wasm32"))] + +use crate::TestContext; +use azure_core::{time::Duration, Error, Result}; +use clap::ArgMatches; +use serde::Serialize; +use std::{ + any::Any, + fmt::Display, + future::Future, + pin::Pin, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; +use tokio::{select, task::JoinSet}; + +/// A trait representing a performance test. +/// +/// Performance tests have three phases: +/// 1. `setup`: Prepare the test environment. This is called once per iteration. +/// 2. `run`: Execute the performance test. This is called repeatedly for the duration of the test. +/// 3. `cleanup`: Clean up the test environment. This is called once +/// +/// Note that the "run" phase will be executed in parallel across multiple tasks, so it must be thread-safe. +#[async_trait::async_trait] +pub trait PerfTest: Send + Sync { + /// Set up the test environment. + /// + /// Performs whatever steps are needed to set up the test environment. This method is called once per iteration of the test. + /// + /// # Arguments + /// - `context`: An `Arc` to a `TestContext` that provides context information for the test. + async fn setup(&self, context: Arc) -> azure_core::Result<()>; + async fn run(&self, context: Arc) -> azure_core::Result<()>; + async fn cleanup(&self, context: Arc) -> azure_core::Result<()>; +} + +pub type CreatePerfTestReturn = + Pin>>>>; + +/// Metadata about a performance test. +#[derive(Debug, Clone)] +pub struct PerfTestMetadata { + /// The name of the test suite. + pub name: &'static str, + /// A brief description of the test suite. + pub description: &'static str, + /// The set of test options supported by this test. + pub options: Vec, + + /// A function used to create the performance test. + pub create_test: fn(&PerfRunner) -> CreatePerfTestReturn, +} + +/// #A `TestOptions` defines a set of options for the test which will be merged with the common test inputs to define the command line for the performance test. +#[derive(Debug, Default, Clone)] +pub struct PerfTestOption { + /// The name of the test option. This is used as the key in the `TestArguments` map. + pub name: &'static str, + + /// The short form activator for this argument e.g., `-t`. Does not include the hyphen. + pub short_activator: char, + + /// The long form activator for this argument e.g., `--test-option`. Does not include the hyphens. + pub long_activator: &'static str, + + /// Display message - displayed in the --help message. + pub display_message: &'static str, + + /// Expected argument count + pub expected_args_len: usize, + + /// Required + pub mandatory: bool, + + /// Argument value is sensitive and should be sanitized. + pub sensitive: bool, +} + +#[derive(Debug, Clone, Default, Serialize)] +#[allow(dead_code)] +struct PerfTestOutputs { + // * Package Versions - a set of packages tested and their versions. + pub package_versions: Vec, + pub test_name: String, + pub operations_per_second: f64, + pub average_cpu_use: Option, + pub average_memory_use: Option, +} + +#[derive(Debug, Clone)] +struct PerfRunnerOptions { + no_cleanup: bool, + iterations: u32, + parallel: usize, + duration: Duration, + warmup: Duration, + disable_progress: bool, + test_results_filename: String, +} + +impl Display for PerfRunnerOptions { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "PerfRunnerOptions {{ no_cleanup: {}, iterations: {}, parallel: {}, duration: {}, warmup: {}, disable_progress: {}, test_results_filename: '{}' }}", + self.no_cleanup, + self.iterations, + self.parallel, + self.duration, + self.warmup, + self.disable_progress, + self.test_results_filename + ) + } +} + +impl PerfRunnerOptions {} + +impl From<&ArgMatches> for PerfRunnerOptions { + fn from(matches: &ArgMatches) -> Self { + Self { + no_cleanup: matches.get_flag("no-cleanup"), + iterations: *matches + .get_one::("iterations") + .expect("defaulted by clap"), + parallel: *matches + .get_one::("parallel") + .expect("defaulted by clap"), + disable_progress: matches.get_flag("no-progress"), + duration: Duration::seconds( + *matches + .get_one::("duration") + .expect("defaulted by clap"), + ), + warmup: Duration::seconds( + *matches.get_one::("warmup").expect("defaulted by clap"), + ), + test_results_filename: matches + .get_one::("test-results") + .expect("defaulted by clap") + .to_string(), + } + } +} + +/// Context information required by performance tests. +#[derive(Debug, Clone)] +pub struct PerfRunner { + options: PerfRunnerOptions, + tests: Vec, + arguments: ArgMatches, + package_dir: &'static str, + module_name: &'static str, + progress: Arc, +} + +impl PerfRunner { + /// Run the performance tests in `tests` using the current process command line. + /// + /// # Arguments + /// + /// * package_dir - The directory containing the package with the tests. Typically `env!("CARGO_PACKAGE_DIR")` + /// * module_name - the name of the module containing the test, typically `file!()` + /// * tests - the set of tests to configure. + /// + pub fn new( + package_dir: &'static str, + module_name: &'static str, + tests: Vec, + ) -> azure_core::Result { + let command = Self::get_command_from_metadata(&tests); + let arguments = command.try_get_matches().map_err(|e| { + azure_core::error::Error::with_error( + azure_core::error::ErrorKind::Other, + e, + "Failed to parse command line arguments.", + ) + })?; + Ok(Self { + options: PerfRunnerOptions::from(&arguments), + tests, + arguments, + package_dir, + module_name, + progress: Arc::new(AtomicU64::new(0)), + }) + } + + /// Run the performance tests in `tests` with the command line specified in `args` + pub fn with_command_line( + package_dir: &'static str, + module_name: &'static str, + tests: Vec, + args: Vec<&str>, + ) -> azure_core::Result { + let command = Self::get_command_from_metadata(&tests); + let arguments = command.try_get_matches_from(args).map_err(|e| { + azure_core::error::Error::with_error( + azure_core::error::ErrorKind::Other, + e, + "Failed to parse command line arguments.", + ) + })?; + Ok(Self { + options: PerfRunnerOptions::from(&arguments), + tests, + arguments, + package_dir, + module_name, + progress: Arc::new(AtomicU64::new(0)), + }) + } + + /// Gets a reference to a typed argument by its id. + pub fn try_get_global_arg(&self, id: &str) -> Result> + where + T: Any + Clone + Send + Sync + 'static, + { + self.arguments.try_get_one::(id).map_err(|e| { + Error::with_error( + azure_core::error::ErrorKind::Other, + e, + format!("Failed to get argument '{}'.", id), + ) + }) + } + + pub fn try_get_test_arg(&self, id: &str) -> Result> + where + T: Any + Clone + Send + Sync + 'static, + { + if let Some((_, args)) = self.arguments.subcommand() { + args.try_get_one::(id).map_err(|e| { + Error::with_error( + azure_core::error::ErrorKind::Other, + e, + format!("Failed to get argument '{}' for test.", id), + ) + }) + } else { + Ok(None) + } + } + + pub fn get_selected_test_name(&self) -> Result<&str> { + match self.arguments.subcommand_name() { + Some(name) => Ok(name), + None => Err(Error::with_message( + azure_core::error::ErrorKind::Other, + "No test was selected.", + )), + } + } + + pub async fn run(&self) -> azure_core::Result<()> { + // We can only run tests if there was a test selected. + let test_name = match self.get_selected_test_name() { + Ok(name) => name, + Err(e) => { + eprintln!("Error getting selected test name: {}", e); + return Ok(()); + } + }; + + let test = self + .tests + .iter() + .find(|t| t.name == test_name) + .ok_or_else(|| { + Error::with_message( + azure_core::error::ErrorKind::Other, + format!("Test '{}' not found.", test_name), + ) + })?; + let test_instance = (test.create_test)(self).await?; + let test_instance: Arc = Arc::from(test_instance); + + let test_mode = crate::TestMode::current()?; + + let context = Arc::new( + crate::recorded::start( + test_mode, + self.package_dir, + self.module_name, + test.name, + None, + ) + .await?, + ); + + println!("Test Configuration: {:#}", self.options); + + for iteration in 0..self.options.iterations { + println!( + "Running test iteration {}/{}", + iteration + 1, + self.options.iterations + ); + + println!("========== Starting test setup =========="); + test_instance.setup(context.clone()).await?; + + println!( + "========== Starting test warmup for {} ==========", + self.options.warmup + ); + + let mut test_contexts = Vec::new(); + for _ in 0..self.options.parallel { + let context = Arc::new( + crate::recorded::start( + test_mode, + self.package_dir, + self.module_name, + test.name, + None, + ) + .await?, + ); + test_contexts.push(context); + } + + self.run_test_for(test_instance.clone(), &test_contexts, self.options.warmup) + .await?; + + println!( + "========== Starting test run for {} ==========", + self.options.duration + ); + + self.run_test_for( + Arc::clone(&test_instance), + &test_contexts, + self.options.duration, + ) + .await?; + if !self.options.no_cleanup { + println!("========== Starting test cleanup =========="); + test_instance.cleanup(context.clone()).await?; + } + + let iteration_count = self.progress.load(Ordering::SeqCst); + println!( + "Completed test iteration {}/{} - {} iterations run in {} seconds - {} iterations/second, {} seconds/iteration", + iteration + 1, + self.options.iterations, + iteration_count, + self.options.duration.as_seconds_f64(), + iteration_count as f64 / self.options.duration.as_seconds_f64(), + self.options.duration.as_seconds_f64() / iteration_count as f64 + ); + let operations_per_second = + iteration_count as f64 / self.options.duration.as_seconds_f64(); + let seconds_per_operation = + self.options.duration.as_seconds_f64() / iteration_count as f64; + let duration_per_operation = Duration::seconds_f64(seconds_per_operation); + println!("{operations_per_second:4} operations/second, {duration_per_operation:4} seconds/operation"); + + if !self.options.test_results_filename.is_empty() { + // Write out the results to a file. + println!( + "Writing test results to {}", + self.options.test_results_filename + ); + let results = PerfTestOutputs { + test_name: test.name.to_string(), + package_versions: vec![self.package_dir.to_string()], + operations_per_second, + average_cpu_use: None, + average_memory_use: None, + }; + + let json = serde_json::to_string_pretty(&results).map_err(|e| { + Error::with_error( + azure_core::error::ErrorKind::Other, + e, + "Failed to serialize test results to JSON.", + ) + })?; + println!("Test results: {}", json); + std::fs::write(&self.options.test_results_filename, json).map_err(|e| { + Error::with_error( + azure_core::error::ErrorKind::Io, + e, + "Failed to write test results to file.", + ) + })?; + } + } + Ok(()) + } + pub async fn run_test_for( + &self, + test_instance: Arc, + test_contexts: &[Arc], + duration: Duration, + ) -> azure_core::Result<()> { + // Reset the performance measurements before starting the test. + self.progress.store(0, Ordering::SeqCst); + let mut tasks: JoinSet> = JoinSet::new(); + (0..self.options.parallel).for_each(|i| { + let test_instance_clone = Arc::clone(&test_instance); + let progress = self.progress.clone(); + let test_context = test_contexts[i].clone(); + tasks.spawn(async move { + loop { + test_instance_clone.run(test_context.clone()).await?; + progress.fetch_add(1, Ordering::SeqCst); + } + }); + }); + let start = tokio::time::Instant::now(); + let timeout = tokio::time::Duration::from_secs_f64(duration.as_seconds_f64()); + select!( + _ = tokio::time::sleep(timeout) => {println!("Timeout reached, stopping test tasks: {:?}", start.elapsed());}, + _ = tasks.join_all() => {println!("All test tasks completed: {:?}", start.elapsed());}, + _ = async { + let mut last_count = 0; + loop { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + let current_total = self.progress.load(Ordering::SeqCst); + // println!("{:<10?} elapsed: {:.5} op/sec, {:4} sec/operation.", + // start.elapsed(), + // self.progress.load(Ordering::SeqCst) as f64 / start.elapsed().as_secs_f64(), + // Duration::seconds_f64( start.elapsed().as_secs_f64() / self.progress.load(Ordering::SeqCst) as f64 )); + if start.elapsed().as_secs_f64() != 0f64 && current_total != 0 { + println!("Current {:3}, Total {:5} {:4}", current_total - last_count, current_total, Duration::seconds_f64( start.elapsed().as_secs_f64() / current_total as f64 )); + } + else{ + println!("Current {:3}, Total {:5} ---", current_total - last_count, current_total); + } + + last_count = current_total; + } + }, if !self.options.disable_progress => {}, + ); + println!("Task time elapsed: {:?}", start.elapsed()); + Ok(()) + } + + // * Disable test cleanup + // * Test Proxy servers. + // * TLS + // * Allow untrusted TLS certificates + // * Advanced options + // * Print job statistics (?) + // * Track latency and print per-operation latency statistics + // * Target throughput (operations/second) (?) + // * Language specific options + // * Max I/O completion threads + // * Minimum number of asynchronous I/O threads in the thread pool + // * Minimum number of worker threads the thread pool creates on demand + // * Sync - run a synchronous version of the test + + /// Constructs a `clap::Command` from the provided test metadata. + fn get_command_from_metadata(tests: &[PerfTestMetadata]) -> clap::Command { + let mut command = clap::Command::new("perf-tests") + .about("Run performance tests for the Azure SDK for Rust") + .arg( + clap::arg!(--iterations "The number of iterations to run each test") + .required(false) + .default_value("1") + .value_parser(clap::value_parser!(u32)) + .global(false), + ) + .arg(clap::arg!(--sync).global(true).required(false)) + .arg( + clap::arg!(--parallel "The number of concurrent tasks to use when running each test") + .required(false) + .default_value("1") + .value_parser(clap::value_parser!(usize)) + .global(true), + ) + .arg(clap::arg!(--"no-progress" "Disable progress reporting").required(false).global(false)) + .arg( + clap::arg!(--duration "The duration of each test in seconds") + .required(false) + .default_value("30") + .value_parser(clap::value_parser!(i64)) + .global(true), + ) + .arg( + clap::arg!(--warmup "The duration of the warmup period in seconds") + .required(false) + .default_value("5") + .value_parser(clap::value_parser!(i64)) + .global(true), + ) + .arg( + clap::arg!(--"test-results" "The file to write test results to") + .required(false) + .default_value("./tests/results.json") + .global(false), + ) + .arg(clap::arg!(--"no-cleanup" "Disable test cleanup") + .required(false).global(true)) + ; + for test in tests { + let mut subcommand = clap::Command::new(test.name).about(test.description); + for option in test.options.iter() { + let mut arg = clap::Arg::new(option.name) + .help(option.display_message) + .long(option.long_activator) + .num_args(option.expected_args_len..=option.expected_args_len) + .required(option.mandatory) + .global(false); + if option.short_activator != '\0' { + arg = arg.short(option.short_activator); + } + if option.sensitive { + arg = arg.hide(true); + } + subcommand = subcommand.arg(arg); + } + command = command.subcommand(subcommand); + } + + command + } +} + +#[cfg(test)] +mod config_tests; + +#[cfg(test)] +mod framework_tests; diff --git a/sdk/core/azure_core_test/src/recording.rs b/sdk/core/azure_core_test/src/recording.rs index 4b572268ce..9c115e0bf9 100644 --- a/sdk/core/azure_core_test/src/recording.rs +++ b/sdk/core/azure_core_test/src/recording.rs @@ -36,7 +36,6 @@ use rand::{ use rand_chacha::ChaCha20Rng; use std::{ borrow::Cow, - cell::OnceCell, collections::HashMap, env, sync::{Arc, Mutex, OnceLock, RwLock}, @@ -51,8 +50,8 @@ pub struct Recording { #[allow(dead_code)] span: EnteredSpan, proxy: Option>, - test_mode_policy: OnceCell>, - recording_policy: OnceCell>, + test_mode_policy: OnceLock>, + recording_policy: OnceLock>, service_directory: String, recording_file: String, recording_assets_file: Option, @@ -361,8 +360,8 @@ impl Recording { test_mode, span, proxy, - test_mode_policy: OnceCell::new(), - recording_policy: OnceCell::new(), + test_mode_policy: OnceLock::new(), + recording_policy: OnceLock::new(), service_directory: service_directory.into(), recording_file, recording_assets_file, @@ -380,8 +379,8 @@ impl Recording { test_mode: TestMode::Playback, span: span.entered(), proxy: None, - test_mode_policy: OnceCell::new(), - recording_policy: OnceCell::new(), + test_mode_policy: OnceLock::new(), + recording_policy: OnceLock::new(), service_directory: String::from("sdk/core"), recording_file: String::from("none"), recording_assets_file: None, diff --git a/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml b/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml index 857666e90c..321ad9ed2f 100644 --- a/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml +++ b/sdk/keyvault/azure_security_keyvault_secrets/Cargo.toml @@ -40,3 +40,8 @@ rustc_version.workspace = true [lints] workspace = true + +[[test]] +name = "performance_tests" +path = "perf/get_secret.rs" +harness = false diff --git a/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs new file mode 100644 index 0000000000..c6f3028a4e --- /dev/null +++ b/sdk/keyvault/azure_security_keyvault_secrets/perf/get_secret.rs @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +//! Keyvault Secrets performance tests. +//! +//! This test measures the performance of getting a secret from Azure Key Vault. +//! It sets up a secret in the Key Vault during the setup phase and then repeatedly retrieves it +//! during the run phase. The test can be configured with the vault URL via command line arguments +//! to target different Key Vault instances. +//! +//! To run the test, use the following command line arguments: +//! +//! cargo test --package azure_security_keyvault_secrets --test performance_tests -- --duration 10 --parallel 20 get_secret -u https://.vault.azure.net/ +//! + +use std::sync::{Arc, OnceLock}; + +use azure_core::Result; +use azure_core_test::{ + perf::{CreatePerfTestReturn, PerfRunner, PerfTest, PerfTestMetadata, PerfTestOption}, + Recording, TestContext, +}; +use azure_security_keyvault_secrets::{ + models::SetSecretParameters, SecretClient, SecretClientOptions, +}; +struct GetSecrets { + vault_url: String, + random_key_name: OnceLock, + client: OnceLock, +} + +impl GetSecrets { + fn test_metadata() -> PerfTestMetadata { + PerfTestMetadata { + name: "get_secret", + description: "Get a secret from Key Vault", + options: vec![PerfTestOption { + name: "vault_url", + display_message: "The URL of the Key Vault to use in the test", + mandatory: true, + short_activator: 'u', + long_activator: "vault-url", + expected_args_len: 1, + ..Default::default() + }], + create_test: Self::create_new_test, + } + } + + fn create_new_test(runner: &PerfRunner) -> CreatePerfTestReturn { + async fn create_secret_client(runner: PerfRunner) -> Result> { + let vault_url_ref: Option<&String> = runner.try_get_test_arg("vault_url")?; + let vault_url = vault_url_ref + .expect("vault_url argument is mandatory") + .clone(); + Ok(Box::new(GetSecrets { + vault_url, + random_key_name: OnceLock::new(), + client: OnceLock::new(), + }) as Box) + } + + Box::pin(create_secret_client(runner.clone())) + } + + fn create_random_key_name(recording: &Recording) -> String { + let random_suffix: String = recording.random_string::<8>(Some("perf-")); + format!("perf-{}", random_suffix) + } + + fn get_random_key_name(&self, recording: &Recording) -> &String { + self.random_key_name + .get_or_init(|| Self::create_random_key_name(recording)) + } +} + +#[cfg_attr(target_arch="wasm32", async_trait::async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] +impl PerfTest for GetSecrets { + async fn setup(&self, context: Arc) -> azure_core::Result<()> { + let recording = context.recording(); + let credential = recording.credential(); + + let mut client_options = SecretClientOptions::default(); + recording.instrument(&mut client_options.client_options); + + let client = SecretClient::new( + self.vault_url.as_str(), + credential.clone(), + Some(client_options), + )?; + self.client.get_or_init(|| client); + + self.client + .get() + .unwrap() + .set_secret( + self.get_random_key_name(recording), + SetSecretParameters { + value: Some("secret_value".into()), + ..Default::default() + } + .try_into()?, + None, + ) + .await?; + Ok(()) + } + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { + Ok(()) + } + async fn run(&self, context: Arc) -> Result<()> { + let recording = context.recording(); + let _secret = self + .client + .get() + .unwrap() + .get_secret(self.get_random_key_name(recording), None) + .await? + .into_body()?; + Ok(()) + } +} + +#[tokio::main] +async fn main() -> azure_core::Result<()> { + let runner = PerfRunner::new( + env!("CARGO_MANIFEST_DIR"), + file!(), + vec![GetSecrets::test_metadata()], + )?; + + runner.run().await?; + + Ok(()) +} diff --git a/sdk/storage/azure_storage_blob/Cargo.toml b/sdk/storage/azure_storage_blob/Cargo.toml index 6ef074ae46..9cbe122431 100644 --- a/sdk/storage/azure_storage_blob/Cargo.toml +++ b/sdk/storage/azure_storage_blob/Cargo.toml @@ -37,3 +37,8 @@ azure_storage_blob_test.path = "../azure_storage_blob_test" futures.workspace = true tokio = { workspace = true, features = ["macros"] } tracing.workspace = true + +[[test]] +name = "performance_tests" +path = "perf/perf_tests.rs" +harness = false diff --git a/sdk/storage/azure_storage_blob/assets.json b/sdk/storage/azure_storage_blob/assets.json index 3bb1e158e9..e095ad34cf 100644 --- a/sdk/storage/azure_storage_blob/assets.json +++ b/sdk/storage/azure_storage_blob/assets.json @@ -1,6 +1,6 @@ { "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "rust", - "Tag": "rust/azure_storage_blob_fc6c153d44", + "Tag": "rust/azure_storage_blob_4dd8ebabce", "TagPrefix": "rust/azure_storage_blob" } diff --git a/sdk/storage/azure_storage_blob/perf/list_blob_test.rs b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs new file mode 100644 index 0000000000..4e8311f196 --- /dev/null +++ b/sdk/storage/azure_storage_blob/perf/list_blob_test.rs @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +use std::sync::Arc; + +use azure_core::{Bytes, Result}; +use azure_core_test::{ + perf::{CreatePerfTestReturn, PerfRunner, PerfTest, PerfTestMetadata, PerfTestOption}, + TestContext, +}; +use azure_identity::DeveloperToolsCredential; +use azure_storage_blob::BlobContainerClient; +use futures::TryStreamExt; + +pub struct ListBlobTest { + count: u32, + client: BlobContainerClient, +} + +impl ListBlobTest { + fn create_list_blob_test(runner: &PerfRunner) -> CreatePerfTestReturn { + async fn create_test(runner: PerfRunner) -> Result> { + let count: Option<&String> = runner.try_get_test_arg("count")?; + + println!("ListBlobTest with count: {:?}", count); + let count = count.expect("count argument is mandatory").parse::()?; + println!("Parsed count: {}", count); + + let endpoint: Option<&String> = runner.try_get_test_arg("endpoint")?; + let endpoint = match endpoint { + Some(e) => e.clone(), + None => format!( + "https://{}.blob.core.windows.net", + std::env::var("AZURE_STORAGE_ACCOUNT_NAME") + .expect("AZURE_STORAGE_ACCOUNT_NAME is not set") + ), + }; + println!("Using endpoint: {}", endpoint); + + let container_name = format!("perf-container-{}", uuid::Uuid::new_v4()); + let credential = DeveloperToolsCredential::new(None)?; + let client = BlobContainerClient::new(&endpoint, container_name, credential, None)?; + + Ok(Box::new(ListBlobTest { count, client }) as Box) + } + // Here you would create and return an instance of your performance test. + // For example: + Box::pin(create_test(runner.clone())) + } + + pub fn test_metadata() -> PerfTestMetadata { + PerfTestMetadata { + name: "list_blob", + description: "List blobs in a container", + options: vec![ + PerfTestOption { + name: "count", + display_message: "The number of blobs to list", + mandatory: true, + short_activator: 'c', + long_activator: "count", + expected_args_len: 1, + ..Default::default() + }, + PerfTestOption { + name: "endpoint", + display_message: "The endpoint of the blob storage", + mandatory: false, + short_activator: 'e', + long_activator: "endpoint", + expected_args_len: 1, + ..Default::default() + }, + ], + create_test: Self::create_list_blob_test, + } + } +} + +#[async_trait::async_trait] +impl PerfTest for ListBlobTest { + async fn setup(&self, _context: Arc) -> azure_core::Result<()> { + // Setup code before running the test + + let _result = self.client.create_container(None).await?; + + for i in 0..self.count { + let blob_name = format!("blob-{}", i); + let blob_client = self.client.blob_client(blob_name); + + let body = vec![0u8; 1024 * 1024]; // 1 MB blob + let body_bytes = Bytes::from(body); + + let _result = blob_client.upload(body_bytes.into(), true, 5, None).await?; + } + + Ok(()) + } + + async fn run(&self, _context: Arc) -> azure_core::Result<()> { + // The actual performance test code + + let mut iterator = self.client.list_blobs(None)?; + while let Some(blob_segment) = iterator.try_next().await? { + let _body = blob_segment.into_body()?; + } + + Ok(()) + } + + async fn cleanup(&self, _context: Arc) -> azure_core::Result<()> { + // Cleanup code after running the test + Ok(()) + } +} diff --git a/sdk/storage/azure_storage_blob/perf/perf_tests.rs b/sdk/storage/azure_storage_blob/perf/perf_tests.rs new file mode 100644 index 0000000000..ea01bc9069 --- /dev/null +++ b/sdk/storage/azure_storage_blob/perf/perf_tests.rs @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/// list_blob performance test. +mod list_blob_test; + +use azure_core_test::perf::PerfRunner; +use list_blob_test::ListBlobTest; + +#[tokio::main] +async fn main() -> azure_core::Result<()> { + let runner = PerfRunner::new( + env!("CARGO_MANIFEST_DIR"), + file!(), + vec![ListBlobTest::test_metadata()], + )?; + + runner.run().await?; + + Ok(()) +} diff --git a/sdk/storage/perf-resources.bicep b/sdk/storage/perf-resources.bicep new file mode 100644 index 0000000000..8a802cb3e4 --- /dev/null +++ b/sdk/storage/perf-resources.bicep @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +param baseName string = resourceGroup().name +param testApplicationOid string +param location string = resourceGroup().location + +var blobDataContributorRoleId = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' +var blobDataOwnerRoleId = 'b7e6dc6d-f1e8-4753-8033-0f276bb0955b' +var encryption = { + keySource: 'Microsoft.Storage' + services: { + blob: { + enabled: true + } + file: { + enabled: true + } + } +} +var networkAcls = { + bypass: 'AzureServices' + defaultAction: 'Allow' + ipRules: [] + virtualNetworkRules: [] +} + +resource blobDataContributor 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(blobDataContributorRoleId, resourceGroup().id) + properties: { + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataContributorRoleId) + principalId: testApplicationOid + } +} + +resource blobDataOwner 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(blobDataOwnerRoleId, resourceGroup().id) + properties: { + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataOwnerRoleId) + principalId: testApplicationOid + } +} + +resource storage 'Microsoft.Storage/storageAccounts@2024-01-01' = { + name: '${baseName}blob' + location: location + kind: 'BlockBlobStorage' + sku: { + name: 'Premium_LRS' + } + properties: { + accessTier: 'Hot' + allowSharedKeyAccess: false + encryption: encryption + networkAcls: networkAcls + supportsHttpsTrafficOnly: true + } +} + +output AZURE_STORAGE_ACCOUNT_NAME string = storage.name + +// param baseName string = resourceGroup().name +// param location string = resourceGroup().location +// param testApplicationOid string + +// var blobDataContributorRoleId = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' +// var blobDataOwnerRoleId = 'b7e6dc6d-f1e8-4753-8033-0f276bb0955b' + +// var networkAcls = { +// bypass: 'AzureServices' +// defaultAction: 'Allow' +// ipRules: [] +// virtualNetworkRules: [] +// } + +// resource blobDataContributor 'Microsoft.Authorization/roleAssignments@2022-04-01' = { +// name: guid(blobDataContributorRoleId, resourceGroup().id) +// properties: { +// roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataContributorRoleId) +// principalId: testApplicationOid +// } +// } + +// resource blobDataOwner 'Microsoft.Authorization/roleAssignments@2022-04-01' = { +// name: guid(blobDataOwnerRoleId, resourceGroup().id) +// properties: { +// roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', blobDataOwnerRoleId) +// principalId: testApplicationOid +// } +// } + +// resource storageAccount 'Microsoft.Storage/storageAccounts@2019-06-01' = { +// name: '${baseName}blob' +// location: location +// kind: 'BlockBlobStorage' +// sku: { +// name: 'Premium_LRS' +// } +// properties: { +// allowSharedKeyAccess: false +// publicNetworkAccess: 'SecuredByPerimeter' +// supportsHttpsTrafficOnly: true +// networkAcls: networkAcls +// } +// } + +// var name = storageAccount.name +// var key = storageAccount.listKeys().keys[0].value +// var connectionString = 'DefaultEndpointsProtocol=https;AccountName=${name};AccountKey=${key}' + +// output AZURE_STORAGE_ACCOUNT_NAME string = name +// output AZURE_STORAGE_ACCOUNT_KEY string = key +// output AZURE_STORAGE_CONNECTION_STRING string = connectionString +// output STANDARD_STORAGE_CONNECTION_STRING string = connectionString +// output STORAGE_CONNECTION_STRING string = connectionString diff --git a/sdk/storage/perf-tests.yml b/sdk/storage/perf-tests.yml new file mode 100644 index 0000000000..ea7e63f9a4 --- /dev/null +++ b/sdk/storage/perf-tests.yml @@ -0,0 +1,35 @@ +Service: storage-blob + +Project: azure-storage-blobs-perf + +PrimaryPackage: azure_storage_blob + +PackageVersions: +# - azure_storage_blob: 0.6.0 +# azure_core: 1.7.2 +- azure_storage_blob: source + azure_core: source + +Tests: +# - Test: download +# Class: DownloadBlob +# Arguments: +# - --size 10240 --parallel 64 +# - --size 10485760 --parallel 32 +# - --size 1073741824 --parallel 1 --warmup 60 --duration 60 +# - --size 1073741824 --parallel 8 --warmup 60 --duration 60 + +# - Test: upload +# Class: UploadBlob +# Arguments: +# - --size 10240 --parallel 64 +# - --size 10485760 --parallel 32 +# - --size 1073741824 --parallel 1 --warmup 60 --duration 60 +# - --size 1073741824 --parallel 8 --warmup 60 --duration 60 + +- Test: list-blobs + Class: list_blob + Arguments: + - --count 5 --parallel 64 + - --count 500 --parallel 32 + - --count 50000 --parallel 32 --warmup 60 --duration 60 diff --git a/sdk/storage/perf.yml b/sdk/storage/perf.yml new file mode 100644 index 0000000000..a43c7c913d --- /dev/null +++ b/sdk/storage/perf.yml @@ -0,0 +1,38 @@ +parameters: +- name: PackageVersions + displayName: PackageVersions (regex of package versions to run) + type: string + default: '12|source' +- name: Tests + displayName: Tests (regex of tests to run) + type: string + default: '^(download|upload|list-blobs)$' +- name: Arguments + displayName: Arguments (regex of arguments to run) + type: string + default: '(10240)|(10485760)|(1073741824)|(5 )|(500 )|(50000 )' +- name: Iterations + displayName: Iterations (times to run each test) + type: number + default: '5' +- name: Profile + type: boolean + default: false +- name: AdditionalArguments + displayName: AdditionalArguments (passed to PerfAutomation) + type: string + default: ' ' + +extends: + template: /eng/pipelines/templates/jobs/perf.yml + parameters: + ServiceDirectory: storage/azure_storage_blobs + PackageVersions: ${{ parameters.PackageVersions }} + Tests: ${{ parameters.Tests }} + Arguments: ${{ parameters.Arguments }} + Iterations: ${{ parameters.Iterations }} + AdditionalArguments: ${{ parameters.AdditionalArguments }} + Profile: ${{ parameters.Profile }} + EnvVars: + # This is set in the InstallLanguageSteps + VCPKG_BINARY_SOURCES_SECRET: $(VCPKG_BINARY_SOURCES_SECRET)