diff --git a/.github/python.instructions.md b/.github/python.instructions.md index d8f1d7d..f1a46b2 100644 --- a/.github/python.instructions.md +++ b/.github/python.instructions.md @@ -63,6 +63,7 @@ Before completing any Python code changes, verify: ## Testing - Aim for 90+% code coverage for each file. +- Slow tests (> 0.1s runtime) should be identified and fixed, if possible. - Add or update pytest unit tests when changing behavior. - Prefer focused tests for the code being changed. - Avoid tests that require live Azure access; mock Azure CLI interactions and `azure_resources` helpers. diff --git a/.vscode/settings.json b/.vscode/settings.json index 672cac6..48426b0 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,44 +1,45 @@ { - "python.analysis.exclude": [ - "**/node_modules", - "**/__pycache__", - ".git", - "**/build", - "env/**", - "**/.*", - "**/.venv", - "**/venv", - "**/env" - ], - "files.trimTrailingWhitespace": true, - "files.insertFinalNewline": true, - "files.trimFinalNewlines": true, - "files.eol": "\n", - "editor.renderWhitespace": "trailing", - "python.defaultInterpreterPath": "./.venv/Scripts/python.exe", - "python.envFile": "${workspaceFolder}/.env", - "[python]": { - "editor.codeActionsOnSave": { - "source.organizeImports": "explicit", - "source.unusedImports": "explicit" - }, - "editor.formatOnSave": true + "[markdown]": { + "files.trimTrailingWhitespace": false + }, + "[python]": { + "editor.codeActionsOnSave": { + "source.organizeImports": "explicit", + "source.unusedImports": "explicit" }, - "[markdown]": { - "files.trimTrailingWhitespace": false - }, - "terminal.integrated.defaultProfile.windows": "PowerShell", - "plantuml.render": "Local", - "plantuml.exportFormat": "svg", - "plantuml.java": "C:\\Program Files\\OpenJDK\\jdk-22.0.2\\bin\\java.exe", - "plantuml.diagramsRoot": "assets/diagrams/src", - "plantuml.exportOutDir": "assets/diagrams/out", - "python.terminal.activateEnvironment": true, - "python.terminal.activateEnvInCurrentTerminal": true, - "python.testing.pytestEnabled": true, - "python.linting.enabled": true, - "python.linting.pylintEnabled": true, - "jupyter.kernels.trusted": [ - "./.venv/Scripts/python.exe" - ] -} \ No newline at end of file + "editor.formatOnSave": true + }, + "editor.renderWhitespace": "trailing", + "files.eol": "\n", + "files.insertFinalNewline": true, + "files.trimFinalNewlines": true, + "files.trimTrailingWhitespace": true, + "jupyter.kernels.trusted": [ + "./.venv/Scripts/python.exe" + ], + "plantuml.diagramsRoot": "assets/diagrams/src", + "plantuml.exportFormat": "svg", + "plantuml.exportOutDir": "assets/diagrams/out", + "plantuml.java": "C:\\Program Files\\OpenJDK\\jdk-22.0.2\\bin\\java.exe", + "plantuml.render": "Local", + "python.analysis.exclude": [ + "**/node_modules", + "**/__pycache__", + ".git", + "**/build", + "env/**", + "**/.*", + "**/.venv", + "**/venv", + "**/env" + ], + "python.defaultInterpreterPath": "./.venv/Scripts/python.exe", + "python.envFile": "${workspaceFolder}/.env", + "python.linting.enabled": true, + "python.linting.pylintEnabled": true, + "python.terminal.activateEnvInCurrentTerminal": true, + "python.terminal.activateEnvironment": true, + "python.terminal.useEnvFile": true, + "python.testing.pytestEnabled": true, + "terminal.integrated.defaultProfile.windows": "PowerShell" +} diff --git a/README.md b/README.md index b0f3f5c..c8472ad 100644 --- a/README.md +++ b/README.md @@ -23,8 +23,23 @@ Historically, there were two general paths to experimenting with APIM. Standing It's quick and easy to get started! -1. Follow one of the two [setup options](#️-setup). -1. Select an [infrastructure](#-list-of-infrastructures) to deploy. +### ⚡ Quick Start (Recommended Path for First-Time Users) + +1. **Choose your setup** (pick one): + - **Easiest**: Use [GitHub Codespaces or Dev Container](#️-setup) - everything is pre-configured + - **Prefer local development**: Follow [Full Local Setup](#️-setup) + +2. **Deploy an infrastructure** - Choose one based on your needs: + - **Just starting out?** → [Simple API Management][infra-simple-apim] (fastest, lowest cost) + - **Want to explore containers?** → [API Management & Container Apps][infra-apim-aca] + - **Exploring secure Azure Front Door?** → [Front Door & API Management with Private Link][infra-afd-apim-pe] + - **Prefer Application Gateway?** → [Application Gateway (Private Link) & API Management][infra-appgw-apim-pe] or [Application Gateway (VNet) & API Management][infra-appgw-apim] + +3. **Run a sample** - Open the desired sample's `create.ipynb` file and run it (nearly all samples work with all infrastructures) + +4. **Experiment** - Modify policies, make API calls, and learn! + +> 💡 **First time?** Start with the [Simple API Management][infra-simple-apim] infrastructure and the [General][sample-general] sample. It takes ~5 minutes to deploy and costs ~$1-2/hour to run. ## 📁 List of Infrastructures @@ -81,12 +96,12 @@ This menu-driven interface provides quick access to: APIM Samples supports two setup options: -### Option 1: GitHub Codespaces / Dev Container (Recommended) +### Option 1: GitHub Codespaces / Dev Container (Recommended for First-Time Users)
-**The fastest way to get started is using our pre-configured development environment.** +**The fastest way to get started is using our pre-configured development environment.** Everything is pre-installed and configured—just sign in to Azure and you're ready to go. -Each supported Python version has its own dev container, providing you with a more tailored environment that hopefully more closely resembles your own workloads. +This is especially helpful if you're new to APIM, unfamiliar with Python environments, or want to avoid local setup complexity. The entire setup takes 2-3 minutes. **GitHub Codespaces**: Click the green "Code" button → "Codespaces" → "..." → "New with options..." → "Dev container configuration" (select Python version but ignore *Default project configuration*) → "Create codespace" diff --git a/setup/local_setup.py b/setup/local_setup.py index 49b670c..32b8993 100644 --- a/setup/local_setup.py +++ b/setup/local_setup.py @@ -74,29 +74,29 @@ def check_azure_cli_installed(): """Check if Azure CLI is installed.""" az_path = shutil.which('az') or shutil.which('az.cmd') or shutil.which('az.bat') if not az_path: - print(" ❌ Azure CLI is not installed. Please install from: https://learn.microsoft.com/cli/azure/install-azure-cli") + print("❌ Azure CLI is not installed. Please install from: https://learn.microsoft.com/cli/azure/install-azure-cli") return False try: subprocess.run([az_path, '--version'], capture_output=True, text=True, check=True) - print(" ✅ Azure CLI is installed") + print("✅ Azure CLI is installed") return True except subprocess.CalledProcessError: - print(" ❌ Azure CLI is not installed. Please install from: https://learn.microsoft.com/cli/azure/install-azure-cli") + print("❌ Azure CLI is not installed. Please install from: https://learn.microsoft.com/cli/azure/install-azure-cli") return False def check_bicep_cli_installed(): """Check if Azure Bicep CLI is installed.""" az_path = shutil.which('az') or shutil.which('az.cmd') or shutil.which('az.bat') if not az_path: - print(" ❌ Azure CLI is not installed. Please install from: https://learn.microsoft.com/cli/azure/install-azure-cli") + print("❌ Azure CLI is not installed. Please install from: https://learn.microsoft.com/cli/azure/install-azure-cli") return False try: subprocess.run([az_path, 'bicep', 'version'], capture_output=True, text=True, check=True) - print(" ✅ Azure Bicep CLI is installed (via az bicep)") + print("✅ Azure Bicep CLI is installed (via az bicep)") return True except subprocess.CalledProcessError: - print(" ❌ Azure Bicep CLI is not installed. Install with: az bicep install") + print("❌ Azure Bicep CLI is not installed. Install with: az bicep install") return False def check_azure_providers_registered(): @@ -133,10 +133,10 @@ def check_azure_providers_registered(): missing_providers = [p for p in required_providers if p not in registered_providers] if not missing_providers: - print(" ✅ All required Azure resource providers are registered") + print("✅ All required Azure resource providers are registered") return True - print(f" ❌ Missing {len(missing_providers)} Azure provider(s):") + print(f"❌ Missing {len(missing_providers)} Azure provider(s):") for provider in missing_providers: print(f" • {provider}") print(" Register with: az provider register -n ") @@ -306,7 +306,7 @@ def generate_env_file() -> None: with open(env_file_path, 'w', encoding='utf-8') as f: f.write(env_content) - print(f"\nSuccessfully generated .env file: {env_file_path}\n") + print(f"\n✅ Successfully generated .env file: {env_file_path}\n") def install_jupyter_kernel(): @@ -380,8 +380,6 @@ def create_vscode_settings(): "python.terminal.activateEnvironment": True, "python.terminal.activateEnvInCurrentTerminal": True, "python.testing.pytestEnabled": True, - "python.linting.enabled": True, - "python.linting.pylintEnabled": True, "jupyter.kernels.trusted": [venv_python], } @@ -408,7 +406,8 @@ def create_vscode_settings(): ) with open(settings_file, 'w', encoding='utf-8') as f: - json.dump(merged_settings, f, indent=4) + json.dump(merged_settings, f, indent=2, sort_keys=True) + f.write('\n') print(f"✅ VS Code settings updated: {settings_file}") print(" - Existing settings preserved") @@ -419,7 +418,8 @@ def create_vscode_settings(): required_settings["python.analysis.exclude"] = DEFAULT_PYTHON_ANALYSIS_EXCLUDE with open(settings_file, 'w', encoding='utf-8') as f: - json.dump(required_settings, f, indent=4) + json.dump(required_settings, f, indent=2, sort_keys=True) + f.write('\n') print(f"✅ VS Code settings created: {settings_file}") print(" - Python interpreter configured for .venv") @@ -502,7 +502,8 @@ def force_kernel_consistency(): ) with open(settings_file, 'w', encoding='utf-8') as f: - json.dump(merged_settings, f, indent=4) + json.dump(merged_settings, f, indent=2) + f.write('\n') print("✅ Kernel trust refreshed without overriding user settings") return True @@ -522,8 +523,8 @@ def setup_complete_environment(): print("🚀 Setting up complete APIM Samples environment...\n") - # Step 0: Check Azure prerequisites - print("0. Checking Azure prerequisites...") + # Step 1: Check Azure prerequisites + print("1/5) Checking Azure prerequisites...\n") azure_cli_ok = check_azure_cli_installed() bicep_ok = check_bicep_cli_installed() providers_ok = check_azure_providers_registered() @@ -532,20 +533,20 @@ def setup_complete_environment(): print("\n⚠️ Some Azure prerequisites are missing. Please address the issues above and re-run this script.") return - # Step 1: Generate .env file - print("\n1. Generating .env file for Python path configuration...") + # Step 2: Generate .env file + print("\n2/5) Generating .env file for Python path configuration...") generate_env_file() - # Step 2: Register Jupyter kernel - print("2. Registering standardized Jupyter kernel...") + # Step 3: Register Jupyter kernel + print("3/5) Registering standardized Jupyter kernel...\n") kernel_success = install_jupyter_kernel() - # Step 3: Configure VS Code settings with minimal, merged defaults - print("\n3. Configuring VS Code workspace settings...") + # Step 4: Configure VS Code settings with minimal, merged defaults + print("\n4/5) Configuring VS Code workspace settings...\n") vscode_success = create_vscode_settings() - # Step 4: Enforce kernel consistency - print("\n4. Enforcing kernel consistency for future reliability...") + # Step 5: Enforce kernel consistency + print("\n5/5) Enforcing kernel consistency for future reliability...\n") consistency_success = force_kernel_consistency() # Summary diff --git a/setup/verify_local_setup.py b/setup/verify_local_setup.py index f1538de..6e44e82 100644 --- a/setup/verify_local_setup.py +++ b/setup/verify_local_setup.py @@ -199,7 +199,9 @@ def check_azure_cli(): try: result = subprocess.run([az_path, '--version'], capture_output=True, text=True, check=True) version_line = (result.stdout.splitlines() or ["unknown version"])[0].strip() - print_status(f"Azure CLI is installed ({version_line})") + # Extract just the version number from "azure-cli 2.81.0" + version = version_line.split()[-1] if version_line else "unknown" + print_status(f"Azure CLI is installed ({version})") return True except subprocess.CalledProcessError: print_status("Azure CLI is not installed or not in PATH", False) @@ -216,7 +218,16 @@ def check_bicep_cli(): try: result = subprocess.run([az_path, 'bicep', 'version'], capture_output=True, text=True, check=True) version_line = (result.stdout.splitlines() or ["unknown version"])[0].strip() - print_status(f"Azure Bicep CLI is installed (az bicep version: {version_line})") + # Extract version number from "Bicep CLI version 0.39.26 (1e90b06e40)" + version = "unknown" + if "version" in version_line.lower(): + parts = version_line.split() + # Find index of "version" and get the next part + for i, part in enumerate(parts): + if part.lower() == "version" and i + 1 < len(parts): + version = parts[i + 1] + break + print_status(f"Azure Bicep CLI is installed ({version})") return True except subprocess.CalledProcessError: print_status("Azure Bicep CLI is not installed. Install with: az bicep install", False) @@ -262,10 +273,10 @@ def check_azure_providers(): print(f" - {provider}") if not missing_providers: - print_status("All required Azure providers are registered") + print_status("\nAll required Azure providers are registered") return True - print_status(f"Missing {len(missing_providers)} provider(s): {', '.join(missing_providers)}", False) + print_status(f"\nMissing {len(missing_providers)} provider(s): {', '.join(missing_providers)}", False) print(" Register missing providers with:") for provider in missing_providers: print(f" az provider register -n {provider}") diff --git a/tests/python/check_python.ps1 b/tests/python/check_python.ps1 index 87dd9b4..0850c41 100644 --- a/tests/python/check_python.ps1 +++ b/tests/python/check_python.ps1 @@ -100,6 +100,46 @@ foreach ($Line in $TestOutput) { $TotalTests = $PassedTests + $FailedTests +# Parse coverage from coverage.json +$CoveragePercent = $null +$CoverageJsonPath = Join-Path $ScriptDir "..\..\coverage.json" +if (Test-Path $CoverageJsonPath) { + try { + $CoverageData = Get-Content $CoverageJsonPath -Raw | ConvertFrom-Json + if ($CoverageData.totals -and $CoverageData.totals.percent_covered) { + $CoveragePercent = $CoverageData.totals.percent_covered + } + } + catch { + # Silently continue if coverage parsing fails + } +} + +# Fallback: Parse coverage from pytest output (e.g., "TOTAL ... 95%") +if ($CoveragePercent -eq $null) { + foreach ($Line in $TestOutput) { + $LineStr = $Line.ToString() + if ($LineStr -match 'TOTAL\s+.*\s+(\d+)%') { + $CoveragePercent = [int]::Parse($matches[1]) + break + } + } +} + +# Detect slow tests (>0.1s execution time) +$SlowTestsFound = $false +foreach ($Line in $TestOutput) { + $LineStr = $Line.ToString() + # Match lines like "1.23s call test_file.py::test_name" + if ($LineStr -match '(\d+\.\d+)s\s+call\s+') { + $time = [double]::Parse($matches[1]) + if ($time -gt 0.1) { + $SlowTestsFound = $true + break + } + } +} + Write-Host "" @@ -131,11 +171,11 @@ $LintColor = if ($LintExitCode -eq 0) { "Green" } else { "Yellow" } $TestColor = if ($TestExitCode -eq 0) { "Green" } else { "Red" } # Calculate column widths for alignment -$LabelWidth = "Pylint :".Length # 7 +$LabelWidth = "Pylint :".Length # 7 $Padding = " " * ($LabelWidth - 1) # Display Pylint status with score -Write-Host "Pylint : " -NoNewline +Write-Host "Pylint : " -NoNewline Write-Host $LintStatus -ForegroundColor $LintColor -NoNewline if ($PylintScore) { Write-Host " ($PylintScore)" -ForegroundColor Gray @@ -144,19 +184,43 @@ if ($PylintScore) { } # Display Test status with counts -Write-Host "Tests : " -NoNewline +Write-Host "Tests : " -NoNewline Write-Host $TestStatus -ForegroundColor $TestColor -# Display test counts with right-aligned numbers +# Display test counts with right-aligned numbers and percentages if ($TotalTests -gt 0) { # Calculate padding for right-alignment (max 5 digits) $TotalPadded = "{0,5}" -f $TotalTests $PassedPadded = "{0,5}" -f $PassedTests $FailedPadded = "{0,5}" -f $FailedTests - Write-Host " • Total : $TotalPadded" -ForegroundColor Gray - Write-Host " • Passed : $PassedPadded" -ForegroundColor Gray - Write-Host " • Failed : $FailedPadded" -ForegroundColor Gray + # Calculate percentages + $PassedPercent = ($PassedTests / $TotalTests * 100) + $FailedPercent = ($FailedTests / $TotalTests * 100) + $PassedPercentStr = "{0,6:F2}" -f $PassedPercent + $FailedPercentStr = "{0,6:F2}" -f $FailedPercent + + Write-Host " • Total : $TotalPadded" -ForegroundColor Gray + Write-Host " • Passed : $PassedPadded (" -ForegroundColor Gray -NoNewline + Write-Host $PassedPercentStr -ForegroundColor Gray -NoNewline + Write-Host "%)" -ForegroundColor Gray + Write-Host " • Failed : $FailedPadded (" -ForegroundColor Gray -NoNewline + Write-Host $FailedPercentStr -ForegroundColor Gray -NoNewline + Write-Host "%)" -ForegroundColor Gray +} + +# Display code coverage +if ($CoveragePercent -ne $null) { + Write-Host "Coverage : " -NoNewline + Write-Host "📊 " -NoNewline + Write-Host ("{0:F2}" -f $CoveragePercent) -ForegroundColor Cyan -NoNewline + Write-Host "%" -ForegroundColor Cyan +} + +# Display slow tests warning if detected +if ($SlowTestsFound) { + Write-Host "" + Write-Host "⚠️ SLOW TESTS DETECTED (> 0.1s). Please review slowest durations in test summary." -ForegroundColor Yellow } Write-Host "" diff --git a/tests/python/check_python.sh b/tests/python/check_python.sh index 0e3093f..55c5ac8 100644 --- a/tests/python/check_python.sh +++ b/tests/python/check_python.sh @@ -79,6 +79,27 @@ PASSED_TESTS=$(echo "$TEST_OUTPUT" | grep -oE '[0-9]+ passed' | head -1 | grep - FAILED_TESTS=$(echo "$TEST_OUTPUT" | grep -oE '[0-9]+ failed' | head -1 | grep -oE '[0-9]+' || echo "0") TOTAL_TESTS=$((PASSED_TESTS + FAILED_TESTS)) +# Parse coverage from pytest output (e.g., "TOTAL ... 95%") +COVERAGE_PERCENT="" +if echo "$TEST_OUTPUT" | grep -qE 'TOTAL\s+.*\s+\d+%'; then + COVERAGE_PERCENT=$(echo "$TEST_OUTPUT" | grep -oE 'TOTAL\s+.*\s+(\d+)%' | grep -oE '[0-9]+%' | head -1) +fi + +# Detect slow tests (>0.1s execution time) +SLOW_TESTS_FOUND=0 +if echo "$TEST_OUTPUT" | grep -qE '[0-9]+\.[0-9]+s\s+call\s+'; then + # Check each line with slow test pattern for times > 0.1 + while IFS= read -r line; do + if [[ $line =~ ^([0-9]+\.[0-9]+)s\ +call ]]; then + time="${BASH_REMATCH[1]}" + if (( $(echo "$time > 0.1" | bc -l) )); then + SLOW_TESTS_FOUND=1 + break + fi + fi + done <<< "$(echo "$TEST_OUTPUT" | grep -E '[0-9]+\.[0-9]+s\s+call\s+')" +fi + echo "" @@ -106,17 +127,32 @@ else fi # Display results with proper alignment -echo "Pylint : $LINT_STATUS" +echo "Pylint : $LINT_STATUS" if [ -n "$PYLINT_SCORE" ]; then - echo " ($PYLINT_SCORE)" + echo " ($PYLINT_SCORE)" fi -echo "Tests : $TEST_STATUS" +echo "Tests : $TEST_STATUS" if [ $TOTAL_TESTS -gt 0 ]; then + # Calculate percentages (using bc for floating point) + PASSED_PERCENT=$(echo "scale=2; $PASSED_TESTS / $TOTAL_TESTS * 100" | bc) + FAILED_PERCENT=$(echo "scale=2; $FAILED_TESTS / $TOTAL_TESTS * 100" | bc) + # Right-align numbers with padding - printf " • Total : %5d\n" "$TOTAL_TESTS" - printf " • Passed : %5d\n" "$PASSED_TESTS" - printf " • Failed : %5d\n" "$FAILED_TESTS" + printf " • Total : %5d\n" "$TOTAL_TESTS" + printf " • Passed : %5d (% 6.2f%%)\n" "$PASSED_TESTS" "$PASSED_PERCENT" + printf " • Failed : %5d (% 6.2f%%)\n" "$FAILED_TESTS" "$FAILED_PERCENT" +fi + +# Display code coverage +if [ -n "$COVERAGE_PERCENT" ]; then + echo "Coverage : 📊 ${COVERAGE_PERCENT}" +fi + +# Display slow tests warning if detected +if [ $SLOW_TESTS_FOUND -eq 1 ]; then + echo "" + echo "⚠️ SLOW TESTS DETECTED (> 0.1s). Please review slowest durations in test summary." | sed 's/^/\e[33m/;s/$/\e[0m/' # Yellow color fi echo "" diff --git a/tests/python/run_tests.ps1 b/tests/python/run_tests.ps1 index 0fd5d65..d153352 100644 --- a/tests/python/run_tests.ps1 +++ b/tests/python/run_tests.ps1 @@ -19,7 +19,7 @@ $env:PYTHONUNBUFFERED = "1" Push-Location $RepoRoot try { $env:COVERAGE_FILE = (Join-Path $RepoRoot ".coverage") - pytest -v --color=yes --cov=shared/python --cov-config=tests/python/.coveragerc --cov-report=html:tests/python/htmlcov --cov-report=xml:coverage.xml --cov-report=json:coverage.json tests/python/ + pytest -v --color=yes --durations=3 --durations-min=0.1 --cov=shared/python --cov-config=tests/python/.coveragerc --cov-report=html:tests/python/htmlcov --cov-report=xml:coverage.xml --cov-report=json:coverage.json tests/python/ # Display coverage summary Write-Host "`nCoverage Summary:" -ForegroundColor Green diff --git a/tests/python/run_tests.sh b/tests/python/run_tests.sh index 69e4dc2..79b1e14 100644 --- a/tests/python/run_tests.sh +++ b/tests/python/run_tests.sh @@ -17,7 +17,7 @@ REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)" cd "${REPO_ROOT}" export COVERAGE_FILE=".coverage" -pytest -v --color=yes --cov=shared/python --cov-config=tests/python/.coveragerc --cov-report=html:htmlcov --cov-report=xml:coverage.xml --cov-report=json:coverage.json tests/python/ +pytest -v --color=yes --durations=3 --durations-min=0.1 --cov=shared/python --cov-config=tests/python/.coveragerc --cov-report=html:tests/python/htmlcov --cov-report=xml:coverage.xml --cov-report=json:coverage.json tests/python/ # Display coverage summary echo "" diff --git a/tests/python/test_apimrequests.py b/tests/python/test_apimrequests.py index 5e2066e..eff33df 100644 --- a/tests/python/test_apimrequests.py +++ b/tests/python/test_apimrequests.py @@ -155,7 +155,7 @@ def test_request_header_merging(): def test_init_missing_url(): # Negative: missing URL should raise TypeError with pytest.raises(TypeError): - ApimRequests() + ApimRequests() # pylint: disable=no-value-for-parameter @pytest.mark.http def test_print_response_code_edge(): diff --git a/tests/python/test_apimtesting.py b/tests/python/test_apimtesting.py index 94454cc..ba5207f 100644 --- a/tests/python/test_apimtesting.py +++ b/tests/python/test_apimtesting.py @@ -28,7 +28,7 @@ def test_apimtesting_init_default(): assert not testing.tests_passed assert not testing.tests_failed assert not testing.total_tests - assert testing.errors == [] + assert not testing.errors def test_apimtesting_init_with_parameters(): @@ -39,13 +39,10 @@ def test_apimtesting_init_with_parameters(): deployment=INFRASTRUCTURE.SIMPLE_APIM ) - assert testing.test_suite_name == 'Custom Tests' - assert testing.sample_name == 'test-sample' - assert testing.deployment == INFRASTRUCTURE.SIMPLE_APIM assert not testing.tests_passed assert not testing.tests_failed assert not testing.total_tests - assert testing.errors == [] + assert not testing.errors # ------------------------------ diff --git a/tests/python/test_azure_resources.py b/tests/python/test_azure_resources.py index ca245ad..9a1d8c5 100644 --- a/tests/python/test_azure_resources.py +++ b/tests/python/test_azure_resources.py @@ -820,7 +820,7 @@ def fake_run(cmd, *args, **kwargs): monkeypatch.setattr('azure_resources.run', fake_run) result = az.find_infrastructure_instances(INFRASTRUCTURE.AFD_APIM_PE) - assert result == [] + assert not result # ------------------------------ @@ -1821,7 +1821,7 @@ class TestGetAppGwEndpoint: """Test get_appgw_endpoint function.""" def test_get_appgw_endpoint_not_found(self, monkeypatch): - suppress_module_functions(monkeypatch, az, ['print_val']) + suppress_module_functions(monkeypatch, az, ['print_ok', 'print_warning']) with patch('azure_resources.run') as mock_run: mock_run.return_value = Output(False, 'No gateways found') @@ -1835,7 +1835,15 @@ def test_get_appgw_endpoint_not_found(self, monkeypatch): class TestGetUniqueInfraSuffix: """Test get_unique_suffix_for_resource_group function.""" - def test_get_unique_suffix_empty_rg(self): + def test_get_unique_suffix_empty_rg(self, monkeypatch): + # Mock the run function to avoid actual Azure CLI deployment + def mock_run(cmd, *args, **kwargs): + output = Mock() + output.success = True + output.text = 'abcd1234efgh5' + return output + + monkeypatch.setattr('azure_resources.run', mock_run) result = az.get_unique_suffix_for_resource_group('') assert isinstance(result, str) @@ -1855,7 +1863,7 @@ def mock_run(cmd, *args, **kwargs): monkeypatch.setattr('azure_resources.run', mock_run) result = az.find_infrastructure_instances(INFRASTRUCTURE.SIMPLE_APIM) - assert result == [] + assert not result class TestGetInfraRgName: diff --git a/tests/python/test_azure_resources_run.py b/tests/python/test_azure_resources_run.py index d906d61..0eb9256 100644 --- a/tests/python/test_azure_resources_run.py +++ b/tests/python/test_azure_resources_run.py @@ -39,11 +39,13 @@ def _quiet_console(monkeypatch: pytest.MonkeyPatch) -> None: def test_run_adds_az_debug_flag_and_keeps_stdout_clean_when_success(_quiet_console: None) -> None: completed = SimpleNamespace(stdout='{"ok": true}', stderr='DEBUG: noisy stderr', returncode=0) - with patch.object(az, 'is_debug_enabled', return_value=True), patch.object(az.subprocess, 'run', return_value=completed) as sp_run: + with patch.object(az, 'is_debug_enabled', return_value=True), \ + patch.object(az.subprocess, 'run', return_value=completed) as sp_run, \ + patch.object(az, 'print_plain') as mock_print_plain: output = az.run('az group list -o json') assert output.success is True - assert output.text == '{"ok": true}' + assert output.text == '{\"ok\": true}' called_command = sp_run.call_args.args[0] assert called_command.startswith('az group list') @@ -54,7 +56,7 @@ def test_run_adds_az_debug_flag_and_keeps_stdout_clean_when_success(_quiet_conso assert sp_run.call_args.kwargs['text'] is True # stderr debug noise should still be logged at DEBUG. - assert any(call.kwargs.get('level') == logging.DEBUG for call in az.print_plain.call_args_list) + assert any(call.kwargs.get('level') == logging.DEBUG for call in mock_print_plain.call_args_list) def test_run_does_not_add_debug_flag_when_not_debug_enabled(_quiet_console: None) -> None: diff --git a/tests/python/test_infrastructures.py b/tests/python/test_infrastructures.py index 9e1f495..870ecd5 100644 --- a/tests/python/test_infrastructures.py +++ b/tests/python/test_infrastructures.py @@ -805,19 +805,19 @@ def test_infrastructure_with_all_custom_components(mock_utils, mock_policy_fragm def test_infrastructure_missing_required_params(): """Test Infrastructure creation with missing required parameters.""" with pytest.raises(TypeError): - infrastructures.Infrastructure() + infrastructures.Infrastructure() # pylint: disable=no-value-for-parameter with pytest.raises(TypeError): - infrastructures.Infrastructure(infra=INFRASTRUCTURE.SIMPLE_APIM) + infrastructures.Infrastructure(infra=INFRASTRUCTURE.SIMPLE_APIM) # pylint: disable=no-value-for-parameter @pytest.mark.unit def test_concrete_infrastructure_missing_params(): """Test concrete infrastructure classes with missing parameters.""" with pytest.raises(TypeError): - infrastructures.SimpleApimInfrastructure() + infrastructures.SimpleApimInfrastructure() # pylint: disable=no-value-for-parameter with pytest.raises(TypeError): - infrastructures.SimpleApimInfrastructure(rg_location=TEST_LOCATION) + infrastructures.SimpleApimInfrastructure(rg_location=TEST_LOCATION) # pylint: disable=no-value-for-parameter # ------------------------------ @@ -1091,7 +1091,7 @@ def mock_run(command, ok_message=None, error_message=None, **kwargs): return Output(success=False, text='Deployment not found') # All other commands succeed - return Output(success=True, json_data=[]) + return Output(success=True, text='[]') monkeypatch.setattr(infrastructures.az, 'run', mock_run) suppress_module_functions(monkeypatch, infrastructures, ['print_info', 'print_message']) @@ -1257,7 +1257,7 @@ def mock_run(command, ok_message=None, error_message=None, **kwargs): return Output(success=False, text='Deployment not found') # Default empty lists for resource queries. if any(x in command for x in ['cognitiveservices account list', 'az apim list', 'az keyvault list']): - return Output(success=True, json_data=[]) + return Output(success=True, text='[]') return Output(success=True, text='{}') monkeypatch.setattr(infrastructures.az, 'run', mock_run) @@ -1460,13 +1460,11 @@ def mock_run(command, ok_message=None, error_message=None, **kwargs): # Return appropriate mock responses if 'deployment group show' in command: - return Output(success=True, json_data={ - 'properties': {'provisioningState': 'Succeeded'} - }) + return Output(success=True, text='{"properties": {"provisioningState": "Succeeded"}}') # Return empty lists for resource queries to avoid complex mocking if any(x in command for x in ['list -g', 'list']): - return Output(success=True, json_data=[]) + return Output(success=True, text='[]') return Output(success=True, text='{}') @@ -3873,7 +3871,7 @@ def mock_run(command, ok_msg=None, error_msg=None): if 'cognitiveservices account list' in command: return Output(success=False, text='List failed') if 'apim list' in command or 'keyvault list' in command: - return Output(success=True, json_data=[]) + return Output(success=True, text='[]') return Output(success=True, text='{}') monkeypatch.setattr(infrastructures.az, 'run', mock_run) @@ -3977,7 +3975,7 @@ def mock_run(command, ok_msg=None, error_msg=None): if 'deployment group show' in command: return Output(success=True, text='{}') if any(x in command for x in ['cognitiveservices', 'apim', 'keyvault']): - return Output(success=True, json_data=[]) + return Output(success=True, text='[]') return Output(success=True, text='{}') monkeypatch.setattr(infrastructures.az, 'run', mock_run) @@ -4391,8 +4389,10 @@ def test_appgw_apim_pe_deploy_approve_private_link_failure(mock_utils, mock_az): @pytest.mark.unit -def test_appgw_apim_pe_deploy_disable_public_access_failure(mock_utils, mock_az): +def test_appgw_apim_pe_deploy_disable_public_access_failure(mock_utils, mock_az, monkeypatch): """Test AppGwApimPeInfrastructure deploy when disabling public access fails.""" + suppress_module_functions(monkeypatch, infrastructures, ['print_plain', 'print_ok', 'print_error', 'print_info', 'print_command']) + mock_utils.Output.side_effect = Output infra = infrastructures.AppGwApimPeInfrastructure(rg_location='eastus', index=1) # Create mock output with all required properties for AppGW @@ -4404,17 +4404,16 @@ def test_appgw_apim_pe_deploy_disable_public_access_failure(mock_utils, mock_az) 'appgwPublicIpAddress': {'value': '1.2.3.4'} } - # Create the failure output that will be returned by utils.Output() - failure_output = Output(False, 'Failed to disable public access') - mock_utils.Output.return_value = failure_output + # Mock all the infrastructure methods directly on the instance + infra._create_keyvault = Mock(return_value=True) + infra._create_keyvault_certificate = Mock(return_value=True) + infra._approve_private_link_connections = Mock(return_value=True) + infra._verify_apim_connectivity = Mock(return_value=True) + infra._disable_apim_public_access = Mock(return_value=False) - with patch.object(infra, '_create_keyvault', return_value=True): - with patch.object(infra, '_create_keyvault_certificate', return_value=True): - with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=mock_output): - with patch.object(infra, '_approve_private_link_connections', return_value=True): - with patch.object(infra, '_disable_apim_public_access', return_value=False): - result = infra.deploy_infrastructure() - assert result.success is False and 'public access' in result.text.lower() + with patch.object(infrastructures.Infrastructure, 'deploy_infrastructure', return_value=mock_output): + result = infra.deploy_infrastructure() + assert result.success is False and 'public access' in result.text.lower() @pytest.mark.unit @@ -4604,17 +4603,11 @@ def mock_run(command, ok_msg=None, error_msg=None): if 'deployment group show' in command: return Output(success=True, text='{}') if 'cognitiveservices account list' in command: - return Output(success=True, json_data=[ - {'name': f'cog-{i}', 'location': 'eastus'} for i in range(5) - ]) + return Output(success=True, text=json.dumps([{'name': f'cog-{i}', 'location': 'eastus'} for i in range(5)])) if 'apim list' in command: - return Output(success=True, json_data=[ - {'name': f'apim-{i}', 'location': 'westus'} for i in range(3) - ]) + return Output(success=True, text=json.dumps([{'name': f'apim-{i}', 'location': 'westus'} for i in range(3)])) if 'keyvault list' in command: - return Output(success=True, json_data=[ - {'name': f'kv-{i}', 'location': 'northeurope'} for i in range(7) - ]) + return Output(success=True, text=json.dumps([{'name': f'kv-{i}', 'location': 'northeurope'} for i in range(7)])) return Output(success=True, text='{}') def mock_cleanup_parallel(resources, thread_prefix, thread_color): diff --git a/tests/python/test_slowness_detection.py b/tests/python/test_slowness_detection.py new file mode 100644 index 0000000..76f4e89 --- /dev/null +++ b/tests/python/test_slowness_detection.py @@ -0,0 +1,20 @@ +"""Verification tests for test infrastructure slowness detection.""" + +#from __future__ import annotations + + +# @pytest.mark.unit +# def test_slowness_detection_verification(): +# """Intentionally slow test to verify slowness detection feature works. + +# This test adds a deliberate delay to verify that the check_python.ps1 and +# check_python.sh scripts properly detect and highlight slow tests (>0.1s). + +# The test is intentionally commented out as to not pollute the test results. + +# The pytest durations output should show this test taking >0.1s, and the +# check scripts should display a yellow warning message with guidance to review +# slowest durations in the test summary. +# """ +# time.sleep(0.15) # Sleep 150ms to exceed 0.1s threshold +# assert True diff --git a/tests/python/test_utils.py b/tests/python/test_utils.py index 520c2e1..01fb7a8 100644 --- a/tests/python/test_utils.py +++ b/tests/python/test_utils.py @@ -1378,7 +1378,7 @@ def test_find_infrastructure_instances_no_results(monkeypatch): monkeypatch.setattr(az, 'run', lambda cmd, *args, **kwargs: utils.Output(False, 'no results')) result = az.find_infrastructure_instances(INFRASTRUCTURE.SIMPLE_APIM) - assert result == [] + assert not result def test_find_infrastructure_instances_with_index(monkeypatch): """Test find_infrastructure_instances with indexed resource groups.""" @@ -1677,10 +1677,10 @@ def test_infrastructure_notebook_helper_allow_update_false(monkeypatch, suppress def test_infrastructure_notebook_helper_missing_args(): """Test InfrastructureNotebookHelper requires all arguments.""" with pytest.raises(TypeError): - utils.InfrastructureNotebookHelper() + utils.InfrastructureNotebookHelper() # pylint: disable=no-value-for-parameter with pytest.raises(TypeError): - utils.InfrastructureNotebookHelper('eastus') + utils.InfrastructureNotebookHelper('eastus') # pylint: disable=no-value-for-parameter def test_does_infrastructure_exist_with_prompt_multiple_retries(monkeypatch, suppress_console):