Skip to content

Commit 6e5f59e

Browse files
ShubhamChaturvedi7Shubham Chaturvedilucasmcdonald3
authored
feat: perf-test for python (#813)
Co-authored-by: Shubham Chaturvedi <[email protected]> Co-authored-by: Lucas McDonald <[email protected]>
1 parent a825857 commit 6e5f59e

File tree

8 files changed

+1865
-0
lines changed

8 files changed

+1865
-0
lines changed
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
# AWS Encryption SDK Python Benchmark
2+
3+
Performance testing suite for the AWS Encryption SDK Python implementation.
4+
5+
## Quick Start
6+
7+
```bash
8+
# Install dependencies
9+
pip install -r requirements.txt
10+
11+
# Run benchmark
12+
python esdk_benchmark.py
13+
14+
# Quick test (reduced iterations)
15+
python esdk_benchmark.py --quick
16+
```
17+
18+
## Options
19+
20+
- `--config` - Path to test configuration file (default: `../../config/test-scenarios.yaml`)
21+
- `--output` - Path to output results file (default: `../../results/raw-data/python_results.json`)
22+
- `--quick` - Run with reduced iterations for faster testing
23+
24+
## Configuration
25+
26+
Edit `../../config/test-scenarios.yaml` for test parameters:
27+
28+
- Data sizes (small/medium/large)
29+
- Iterations and concurrency levels
30+
31+
## Test Types
32+
33+
- **Throughput** - Measures encryption/decryption operations per second
34+
- **Memory** - Tracks memory usage and allocations during operations
35+
- **Concurrency** - Tests performance under concurrent load
36+
37+
## Output
38+
39+
Results saved as JSON to `../../results/raw-data/python_results.json` with:
40+
41+
- Performance metrics (ops/sec, latency percentiles)
42+
- Memory usage (peak, average, allocations, input data to memory ratio)
43+
- System information (CPU, memory, Python version)
Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Core benchmark module for ESDK Python benchmark
4+
"""
5+
6+
import logging
7+
import multiprocessing
8+
import secrets
9+
import sys
10+
11+
import psutil
12+
from aws_cryptographic_material_providers.mpl import AwsCryptographicMaterialProviders
13+
from aws_cryptographic_material_providers.mpl.config import MaterialProvidersConfig
14+
from aws_cryptographic_material_providers.mpl.models import (
15+
AesWrappingAlg,
16+
CreateRawAesKeyringInput,
17+
)
18+
from aws_encryption_sdk import EncryptionSDKClient, CommitmentPolicy
19+
from config import load_config
20+
21+
22+
class ESDKBenchmark:
23+
"""Main benchmark class for ESDK Python performance testing"""
24+
25+
def __init__(self, config_path: str = "../../config/test-scenarios.yaml"):
26+
self.config = load_config(config_path)
27+
self.results = []
28+
29+
self._setup_logging()
30+
self._setup_esdk()
31+
self._setup_system_info()
32+
33+
def _setup_system_info(self):
34+
"""Initialize system information"""
35+
self.cpu_count = multiprocessing.cpu_count()
36+
self.total_memory_gb = psutil.virtual_memory().total / (1024**3)
37+
38+
self.logger.info(
39+
f"Initialized ESDK Benchmark - CPU cores: {self.cpu_count}, "
40+
f"Memory: {self.total_memory_gb:.1f}GB"
41+
)
42+
43+
def _setup_logging(self):
44+
"""Setup logging configuration"""
45+
logging.basicConfig(
46+
level=logging.INFO,
47+
format="%(message)s",
48+
handlers=[logging.StreamHandler(sys.stdout)],
49+
)
50+
# Suppress AWS SDK logging
51+
logging.getLogger("aws_encryption_sdk").setLevel(logging.WARNING)
52+
logging.getLogger("botocore").setLevel(logging.WARNING)
53+
logging.getLogger("boto3").setLevel(logging.WARNING)
54+
55+
self.logger = logging.getLogger(__name__)
56+
57+
def _setup_esdk(self):
58+
"""Initialize ESDK client and raw AES keyring"""
59+
try:
60+
self.keyring = self._create_keyring()
61+
self.esdk_client = self._create_client()
62+
self.logger.info("ESDK client initialized successfully")
63+
except Exception as e:
64+
self.logger.error(f"Failed to initialize ESDK: {e}")
65+
raise
66+
67+
def _create_keyring(self):
68+
"""Create raw AES keyring"""
69+
static_key = secrets.token_bytes(32)
70+
mat_prov = AwsCryptographicMaterialProviders(config=MaterialProvidersConfig())
71+
72+
keyring_input = CreateRawAesKeyringInput(
73+
key_namespace="esdk-performance-test",
74+
key_name="test-aes-256-key",
75+
wrapping_key=static_key,
76+
wrapping_alg=AesWrappingAlg.ALG_AES256_GCM_IV12_TAG16,
77+
)
78+
79+
return mat_prov.create_raw_aes_keyring(input=keyring_input)
80+
81+
def _create_client(self):
82+
"""Create ESDK client"""
83+
return EncryptionSDKClient(
84+
commitment_policy=CommitmentPolicy.REQUIRE_ENCRYPT_REQUIRE_DECRYPT
85+
)
86+
87+
def should_run_test_type(self, test_type: str, is_quick_mode: bool = False) -> bool:
88+
"""Determine if a test type should be run based on configuration"""
89+
if is_quick_mode:
90+
quick_config = self.config.get("quick_config")
91+
if quick_config and "test_types" in quick_config:
92+
return test_type in quick_config["test_types"]
93+
return True
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Configuration module for ESDK Python benchmark
4+
"""
5+
6+
import yaml
7+
8+
9+
def load_config(config_path: str):
10+
"""Load test configuration from YAML file"""
11+
try:
12+
with open(config_path, "r") as f:
13+
return yaml.safe_load(f)
14+
except FileNotFoundError:
15+
raise FileNotFoundError(f"Config file not found: {config_path}")
16+
except Exception as e:
17+
raise RuntimeError(f"Failed to parse config file: {e}")
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
#!/usr/bin/env python3
2+
"""
3+
ESDK Performance Benchmark Suite - Python Implementation
4+
5+
This module provides comprehensive performance testing for the AWS Encryption SDK (ESDK)
6+
Python runtime, measuring throughput, latency, memory usage, and scalability.
7+
"""
8+
9+
import sys
10+
import argparse
11+
from benchmark import ESDKBenchmark
12+
from tests import run_all_benchmarks
13+
14+
15+
def main():
16+
"""Main entry point for the benchmark suite"""
17+
args = _parse_arguments()
18+
19+
try:
20+
benchmark = ESDKBenchmark(config_path=args.config)
21+
22+
if args.quick:
23+
_adjust_config_for_quick_mode(benchmark)
24+
25+
results = run_all_benchmarks(benchmark, is_quick_mode=args.quick)
26+
27+
_save_and_summarize_results(results, args.output)
28+
29+
except Exception as e:
30+
print(f"Benchmark failed: {e}")
31+
sys.exit(1)
32+
33+
34+
def _parse_arguments():
35+
"""Parse command line arguments"""
36+
parser = argparse.ArgumentParser(description="ESDK Python Performance Benchmark")
37+
parser.add_argument(
38+
"--config",
39+
default="../../config/test-scenarios.yaml",
40+
help="Path to test configuration file",
41+
)
42+
parser.add_argument(
43+
"--output",
44+
default="../../results/raw-data/python_results.json",
45+
help="Path to output results file",
46+
)
47+
parser.add_argument(
48+
"--quick", action="store_true", help="Run quick test with reduced iterations"
49+
)
50+
return parser.parse_args()
51+
52+
53+
def _adjust_config_for_quick_mode(benchmark):
54+
"""Adjust benchmark configuration for quick mode"""
55+
quick_config = benchmark.config.get("quick_config")
56+
if not quick_config:
57+
raise RuntimeError(
58+
"Quick mode requested but no quick_config found in config file"
59+
)
60+
61+
benchmark.config["iterations"]["measurement"] = quick_config["iterations"][
62+
"measurement"
63+
]
64+
benchmark.config["iterations"]["warmup"] = quick_config["iterations"]["warmup"]
65+
benchmark.config["data_sizes"]["small"] = quick_config["data_sizes"]["small"]
66+
benchmark.config["data_sizes"]["medium"] = []
67+
benchmark.config["data_sizes"]["large"] = []
68+
benchmark.config["concurrency_levels"] = quick_config["concurrency_levels"]
69+
70+
71+
def _save_and_summarize_results(results, output_path):
72+
"""Save results and print summary"""
73+
from results import save_results
74+
75+
save_results(results, output_path)
76+
77+
print("\n=== ESDK Python Benchmark Summary ===")
78+
print(f"Total tests completed: {len(results)}")
79+
print(f"Results saved to: {output_path}")
80+
81+
if results:
82+
throughput_results = [r for r in results if r.test_name == "throughput"]
83+
if throughput_results:
84+
max_throughput = max(r.ops_per_second for r in throughput_results)
85+
print("Maximum throughput: {:.2f} ops/sec".format(max_throughput))
86+
87+
88+
if __name__ == "__main__":
89+
main()
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# ESDK Performance Testing - Python Dependencies
2+
3+
# Core dependencies
4+
pyyaml>=6.0
5+
psutil>=5.9.0
6+
7+
# Performance measurement
8+
memory-profiler>=0.61.0
9+
10+
# Progress and logging
11+
tqdm>=4.65.0
12+
13+
# AWS and ESDK dependencies
14+
aws-encryption-sdk>=4.0.1
15+
aws-cryptographic-material-providers>=1.11.0
Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Results module for ESDK Python benchmark
4+
"""
5+
6+
import json
7+
import multiprocessing
8+
import sys
9+
import time
10+
from dataclasses import dataclass, asdict
11+
from pathlib import Path
12+
from typing import List
13+
14+
import psutil
15+
16+
17+
@dataclass
18+
class BenchmarkResult:
19+
"""Container for benchmark results"""
20+
21+
test_name: str
22+
language: str = "python"
23+
data_size: int = 0
24+
concurrency: int = 1
25+
encrypt_latency_ms: float = 0.0
26+
decrypt_latency_ms: float = 0.0
27+
end_to_end_latency_ms: float = 0.0
28+
ops_per_second: float = 0.0
29+
bytes_per_second: float = 0.0
30+
peak_memory_mb: float = 0.0
31+
memory_efficiency_ratio: float = 0.0
32+
p50_latency: float = 0.0
33+
p95_latency: float = 0.0
34+
p99_latency: float = 0.0
35+
timestamp: str = ""
36+
python_version: str = ""
37+
cpu_count: int = 0
38+
total_memory_gb: float = 0.0
39+
40+
def __post_init__(self):
41+
self.timestamp = self.timestamp or time.strftime("%Y-%m-%d %H:%M:%S")
42+
self.python_version = self.python_version or self._get_python_version()
43+
self.cpu_count = self.cpu_count or multiprocessing.cpu_count()
44+
self.total_memory_gb = self.total_memory_gb or self._get_total_memory()
45+
46+
def _get_python_version(self):
47+
"""Get Python version string"""
48+
return (
49+
f"{sys.version_info.major}.{sys.version_info.minor}."
50+
f"{sys.version_info.micro}"
51+
)
52+
53+
def _get_total_memory(self):
54+
"""Get total system memory in GB"""
55+
return psutil.virtual_memory().total / (1024**3)
56+
57+
58+
def save_results(results: List[BenchmarkResult], output_path: str):
59+
"""Save benchmark results to JSON file"""
60+
output_file = Path(output_path)
61+
output_file.parent.mkdir(parents=True, exist_ok=True)
62+
63+
metadata = _create_metadata(results)
64+
results_data = {
65+
"metadata": metadata,
66+
"results": [asdict(result) for result in results],
67+
}
68+
69+
with open(output_file, "w") as f:
70+
json.dump(results_data, f, indent=2)
71+
72+
73+
def _create_metadata(results: List[BenchmarkResult]):
74+
"""Create metadata for results file"""
75+
metadata = {
76+
"language": "python",
77+
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
78+
"total_tests": len(results),
79+
}
80+
81+
if results:
82+
metadata.update(
83+
{
84+
"python_version": results[0].python_version,
85+
"cpu_count": results[0].cpu_count,
86+
"total_memory_gb": results[0].total_memory_gb,
87+
}
88+
)
89+
90+
return metadata

0 commit comments

Comments
 (0)