diff --git a/examples/query_run.py b/examples/query_run.py index 610caa7..96ccb5b 100644 --- a/examples/query_run.py +++ b/examples/query_run.py @@ -1,413 +1,290 @@ #!/usr/bin/env python3 """ -Query Run Management Example +Query Run Individual Function Tests -This example demonstrates all available query run operations in the Python TFE SDK, -including create, read, list, logs, results, cancel, and force cancel operations. +This file provides individual test functions for each query run operation. +You can run specific functions to test individual parts of the API. + +Functions available: +- test_list() - List query runs in a workspace +- test_create() - Create a new query run +- test_read() - Read a specific query run +- test_logs() - Retrieve logs for a query run +- test_cancel() - Cancel a query run +- test_force_cancel() - Force cancel a query run Usage: - python examples/query_run.py - -Requirements: - - TFE_TOKEN environment variable set - - TFE_ADDRESS # Get logs - logs = client.query_runs.logs(query_run_id) - print(f" ✓ Retrieved execution logs ({len(logs.logs)} characters)")ironment variable set (optional, defaults to Terraform Cloud) - - An existing organization in your Terraform Cloud/Enterprise instance - -Query Run Operations Demonstrated: - 1. List query runs with various filters - 2. Create new query runs with different types - 3. Read query run details - 4. Read query run with additional options - 5. Retrieve query run logs - 6. Retrieve query run results - 7. Cancel running query runs - 8. Force cancel stuck query runs + python query_run.py + +Note: Query Runs require Terraform ~>1.14 which includes the 'terraform query' command. + These tests may fail with error status since the feature is not fully available yet. """ import os import time -from datetime import datetime from pytfe import TFEClient, TFEConfig from pytfe.models import ( - QueryRunCancelOptions, QueryRunCreateOptions, - QueryRunForceCancelOptions, QueryRunListOptions, - QueryRunReadOptions, - QueryRunStatus, - QueryRunType, + QueryRunSource, ) -def test_list_query_runs(client, organization_name): - """Test listing query runs with various options.""" - print("=== Testing Query Run List Operations ===") +def get_client_and_workspace(): + """Initialize client and get workspace ID.""" + client = TFEClient(TFEConfig.from_env()) + organization = os.getenv("TFE_ORG", "aayush-test") + workspace_name = "query-test" # Default workspace for testing - # 1. List all query runs - print("\n1. Listing All Query Runs:") - try: - query_runs = client.query_runs.list(organization_name) - print(f" ✓ Found {len(query_runs.items)} query runs") - if query_runs.items: - print(f" ✓ Latest query run: {query_runs.items[0].id}") - print(f" ✓ Status: {query_runs.items[0].status}") - print(f" ✓ Query type: {query_runs.items[0].query_type}") - except Exception as e: - print(f" ✗ Error: {e}") + # Get workspace + workspace = client.workspaces.read(workspace_name, organization=organization) + return client, workspace - # 2. List with pagination - print("\n2. Listing Query Runs with Pagination:") - try: - options = QueryRunListOptions(page_number=1, page_size=5) - query_runs = client.query_runs.list(organization_name, options) - print(f" ✓ Page 1 has {len(query_runs.items)} query runs") - print(f" ✓ Total pages: {query_runs.total_pages}") - print(f" ✓ Total count: {query_runs.total_count}") - except Exception as e: - print(f" ✗ Error: {e}") - # 3. List with filters - print("\n3. Listing Query Runs with Filters:") - try: - options = QueryRunListOptions( - query_type=QueryRunType.FILTER, - status=QueryRunStatus.COMPLETED, - page_size=10, - ) - query_runs = client.query_runs.list(organization_name, options) - print(f" ✓ Found {len(query_runs.items)} completed filter query runs") - for qr in query_runs.items[:3]: # Show first 3 - print(f" - {qr.id}: {qr.query[:50]}...") - except Exception as e: - print(f" ✗ Error: {e}") +def test_list(): + """Test 1: List query runs in a workspace.""" + print("=== Test 1: List Query Runs ===") - return query_runs.items[0] if query_runs.items else None + client, workspace = get_client_and_workspace() + try: + # Simple list + query_runs = list(client.query_runs.list(workspace.id)) + print(f"Found {len(query_runs)} query runs in workspace '{workspace.name}'") -def test_create_query_runs(client, organization_name): - """Test creating different types of query runs.""" - print("\n=== Testing Query Run Creation ===") + for i, qr in enumerate(query_runs[:5], 1): + print(f" {i}. {qr.id}") + print(f" Status: {qr.status}") + print(f" Created: {qr.created_at}") + print() - created_query_runs = [] + # List with options + options = QueryRunListOptions(page_size=5) + limited_runs = list(client.query_runs.list(workspace.id, options)) + print(f"Retrieved {len(limited_runs)} query runs (page_size=5)") - # 1. Create a filter query run - print("\n1. Creating Filter Query Run:") - try: - options = QueryRunCreateOptions( - query="SELECT id, status, created_at FROM runs WHERE status = 'completed' ORDER BY created_at DESC", - query_type=QueryRunType.FILTER, - organization_name=organization_name, - timeout_seconds=300, - max_results=100, - ) - query_run = client.query_runs.create(organization_name, options) - created_query_runs.append(query_run) - print(f" ✓ Created filter query run: {query_run.id}") - print(f" ✓ Status: {query_run.status}") - print(f" ✓ Query: {query_run.query}") - except Exception as e: - print(f" ✗ Error: {e}") + return query_runs - # 2. Create a search query run - print("\n2. Creating Search Query Run:") - try: - options = QueryRunCreateOptions( - query="SEARCH workspaces WHERE name CONTAINS 'production'", - query_type=QueryRunType.SEARCH, - organization_name=organization_name, - timeout_seconds=180, - max_results=50, - ) - query_run = client.query_runs.create(organization_name, options) - created_query_runs.append(query_run) - print(f" ✓ Created search query run: {query_run.id}") - print(f" ✓ Status: {query_run.status}") - print(f" ✓ Query type: {query_run.query_type}") except Exception as e: - print(f" ✗ Error: {e}") + print(f"Error: {e}") + return [] + + +def test_create(): + """Test 2: Create a new query run.""" + print("\n=== Test 2: Create Query Run ===") + + client, workspace = get_client_and_workspace() - # 3. Create an analytics query run - print("\n3. Creating Analytics Query Run:") try: + # Get the latest configuration version + config_versions = list(client.configuration_versions.list(workspace.id)) + if not config_versions: + print("ERROR: No configuration versions found in workspace") + return None + + config_version = config_versions[0] + print(f"Using configuration version: {config_version.id}") + + # Create query run options = QueryRunCreateOptions( - query="ANALYZE run_durations GROUP BY workspace_id ORDER BY avg_duration DESC", - query_type=QueryRunType.ANALYTICS, - organization_name=organization_name, - timeout_seconds=600, - max_results=200, - filters={"time_range": "last_30_days", "include_failed": False}, + source=QueryRunSource.API, + workspace_id=workspace.id, + configuration_version_id=config_version.id, ) - query_run = client.query_runs.create(organization_name, options) - created_query_runs.append(query_run) - print(f" ✓ Created analytics query run: {query_run.id}") - print(f" ✓ Status: {query_run.status}") - print(f" ✓ Timeout: {query_run.timeout_seconds}s") - print(f" ✓ Max results: {query_run.max_results}") + + query_run = client.query_runs.create(options) + print(f"Created query run: {query_run.id}") + print(f" Status: {query_run.status}") + print(f" Source: {query_run.source}") + print(f" Created: {query_run.created_at}") + + return query_run + except Exception as e: - print(f" ✗ Error: {e}") + print(f"Error: {e}") + return None - return created_query_runs +def test_read(query_run_id=None): + """Test 3: Read a specific query run.""" + print("\n=== Test 3: Read Query Run ===") -def test_read_query_run(client, query_run_id): - """Test reading query run details.""" - print(f"\n=== Testing Query Run Read Operations for {query_run_id} ===") + client, workspace = get_client_and_workspace() - # 1. Basic read - print("\n1. Reading Query Run Details:") try: + # If no query_run_id provided, get the first one from the list + if not query_run_id: + query_runs = list(client.query_runs.list(workspace.id)) + if not query_runs: + print("ERROR: No query runs found to read") + return None + query_run_id = query_runs[0].id + print(f"Using first query run from list: {query_run_id}") + + # Read the query run query_run = client.query_runs.read(query_run_id) - print(f" ✓ Query Run ID: {query_run.id}") - print(f" ✓ Status: {query_run.status}") - print(f" ✓ Query Type: {query_run.query_type}") - print(f" ✓ Created: {query_run.created_at}") - print(f" ✓ Updated: {query_run.updated_at}") - if query_run.results_count: - print(f" ✓ Results Count: {query_run.results_count}") - if query_run.error_message: - print(f" ✗ Error: {query_run.error_message}") - except Exception as e: - print(f" ✗ Error: {e}") - return None + print(f"Read query run: {query_run.id}") + print(f" Status: {query_run.status}") + print(f" Source: {query_run.source}") + print(f" Created: {query_run.created_at}") + + if query_run.status_timestamps: + print(" Status Timestamps:") + if query_run.status_timestamps.queued_at: + print(f" Queued: {query_run.status_timestamps.queued_at}") + if query_run.status_timestamps.running_at: + print(f" Running: {query_run.status_timestamps.running_at}") + if query_run.status_timestamps.finished_at: + print(f" Finished: {query_run.status_timestamps.finished_at}") + if query_run.status_timestamps.errored_at: + print(f" Errored: {query_run.status_timestamps.errored_at}") + + return query_run - # 2. Read with options - print("\n2. Reading Query Run with Options:") - try: - options = QueryRunReadOptions(include_results=True, include_logs=True) - query_run = client.query_runs.read_with_options(query_run_id, options) - print(" ✓ Read query run with additional data") - print(f" ✓ Status: {query_run.status}") - if query_run.logs_url: - print(f" ✓ Logs URL available: {query_run.logs_url[:50]}...") - if query_run.results_url: - print(f" ✓ Results URL available: {query_run.results_url[:50]}...") except Exception as e: - print(f" ✗ Error: {e}") + print(f"Error: {e}") + return None - return query_run +def test_logs(query_run_id=None): + """Test 4: Retrieve logs for a query run.""" + print("\n=== Test 4: Get Query Run Logs ===") -def test_query_run_logs(client, query_run_id): - """Test retrieving query run logs.""" - print(f"\n=== Testing Query Run Logs for {query_run_id} ===") + client, workspace = get_client_and_workspace() try: + # If no query_run_id provided, get the first one from the list + if not query_run_id: + query_runs = list(client.query_runs.list(workspace.id)) + if not query_runs: + print("ERROR: No query runs found to get logs") + return None + query_run_id = query_runs[0].id + print(f"Using first query run from list: {query_run_id}") + + # Get logs logs = client.query_runs.logs(query_run_id) - print(f" ✓ Retrieved logs for query run: {logs.query_run_id}") - print(f" ✓ Log level: {logs.log_level}") - if logs.timestamp: - print(f" ✓ Log timestamp: {logs.timestamp}") - - # Show first few lines of logs - log_lines = logs.logs.split("\n")[:5] - print(" ✓ Log preview:") - for line in log_lines: - if line.strip(): - print(f" {line}") - except Exception as e: - print(f" ✗ Error retrieving logs: {e}") + log_content = logs.read().decode("utf-8") + print(f"Retrieved logs for query run: {query_run_id}") + print(f" Log size: {len(log_content)} bytes") + print("\n--- Log Preview (first 500 chars) ---") + print(log_content[:500]) + if len(log_content) > 500: + print(f"\n... ({len(log_content) - 500} more characters)") + print("--- End of Log Preview ---") -def test_query_run_results(client, query_run_id): - """Test retrieving query run results.""" - print(f"\n=== Testing Query Run Results for {query_run_id} ===") + return log_content - try: - results = client.query_runs.results(query_run_id) - print(f" ✓ Retrieved results for query run: {results.query_run_id}") - print(f" ✓ Total results: {results.total_count}") - print(f" ✓ Truncated: {results.truncated}") - - # Show first few results - if results.results: - print(" ✓ Sample results:") - for i, result in enumerate(results.results[:3]): - print(f" {i + 1}. {result}") - else: - print(" ℹ No results available") except Exception as e: - print(f" ✗ Error retrieving results: {e}") + print(f"Error: {e}") + print(" Note: Logs may not be available if the query run hasn't started yet") + return None -def test_query_run_cancellation(client, query_run_id): - """Test canceling query runs.""" - print(f"\n=== Testing Query Run Cancellation for {query_run_id} ===") +def test_cancel(query_run_id=None): + """Test 5: Cancel a query run.""" + print("\n=== Test 5: Cancel Query Run ===") - # First check if the query run is in a cancelable state - try: - query_run = client.query_runs.read(query_run_id) - if query_run.status not in [QueryRunStatus.PENDING, QueryRunStatus.RUNNING]: - print( - f" ℹ Query run is {query_run.status}, creating new one for cancellation test" - ) - - # Create a new query run for cancellation test - options = QueryRunCreateOptions( - query="SELECT * FROM runs LIMIT 10000", # Large query to ensure it runs long enough - query_type=QueryRunType.FILTER, - organization_name=query_run.organization_name, - timeout_seconds=300, - ) - query_run = client.query_runs.create(query_run.organization_name, options) - query_run_id = query_run.id - print(f" ✓ Created new query run for cancellation: {query_run_id}") - except Exception as e: - print(f" ✗ Error checking query run status: {e}") - return + client, workspace = get_client_and_workspace() - # 1. Test regular cancel - print("\n1. Testing Regular Cancellation:") - try: - cancel_options = QueryRunCancelOptions( - reason="User requested cancellation for testing" - ) - canceled_query_run = client.query_runs.cancel(query_run_id, cancel_options) - print(f" ✓ Canceled query run: {canceled_query_run.id}") - print(f" ✓ New status: {canceled_query_run.status}") - except Exception as e: - print(f" ✗ Error canceling query run: {e}") - - # If regular cancel fails, try force cancel - print("\n2. Testing Force Cancellation:") - try: - force_cancel_options = QueryRunForceCancelOptions( - reason="Force cancel after regular cancel failed" - ) - force_canceled_query_run = client.query_runs.force_cancel( - query_run_id, force_cancel_options - ) - print(f" ✓ Force canceled query run: {force_canceled_query_run.id}") - print(f" ✓ New status: {force_canceled_query_run.status}") - except Exception as e: - print(f" ✗ Error force canceling query run: {e}") - - -def test_query_run_workflow(client, organization_name): - """Test a complete query run workflow.""" - print("\n=== Testing Complete Query Run Workflow ===") - - # 1. Create a query run - print("\n1. Creating Query Run:") - try: - options = QueryRunCreateOptions( - query="SELECT id, name, status FROM workspaces ORDER BY created_at DESC LIMIT 10", - query_type=QueryRunType.FILTER, - organization_name=organization_name, - timeout_seconds=120, - max_results=50, - ) - query_run = client.query_runs.create(organization_name, options) - print(f" ✓ Created: {query_run.id}") - query_run_id = query_run.id - except Exception as e: - print(f" ✗ Error creating query run: {e}") - return - - # 2. Monitor execution - print("\n2. Monitoring Execution:") - max_attempts = 30 - attempt = 0 - - while attempt < max_attempts: - try: - query_run = client.query_runs.read(query_run_id) - print(f" Attempt {attempt + 1}: Status = {query_run.status}") - - if query_run.status in [ - QueryRunStatus.COMPLETED, - QueryRunStatus.ERRORED, - QueryRunStatus.CANCELED, - ]: - break - - time.sleep(2) # Wait 2 seconds before checking again - attempt += 1 - except Exception as e: - print(f" ✗ Error monitoring query run: {e}") - break - - # 3. Get final results - print("\n3. Getting Final Results:") try: - if query_run.status == QueryRunStatus.COMPLETED: - results = client.query_runs.results(query_run_id) - print(" ✓ Query completed successfully") - print(f" ✓ Total results: {results.total_count}") - print(f" ✓ Truncated: {results.truncated}") - - # Get logs - logs = client.query_runs.logs(query_run_id) - print(f" ✓ Retrieved execution logs ({len(logs.logs)} characters)") - else: - print(f" ✗ Query run finished with status: {query_run.status}") - if query_run.error_message: - print(f" ✗ Error message: {query_run.error_message}") - except Exception as e: - print(f" ✗ Error getting final results: {e}") + # If no query_run_id provided, create a new one + if not query_run_id: + print("Creating a new query run to cancel...") + new_run = test_create() + if not new_run: + print("ERROR: Could not create query run to cancel") + return False + query_run_id = new_run.id + time.sleep(1) # Give it a moment to start + + # Cancel the query run + client.query_runs.cancel(query_run_id) + print(f"Cancel requested for query run: {query_run_id}") + + # Verify cancellation + time.sleep(2) + query_run = client.query_runs.read(query_run_id) + print(f" Status after cancel: {query_run.status}") - return query_run_id + return True + except Exception as e: + print(f"Error: {e}") + print(" Note: Query run may not be in a cancelable state") + return False -def main(): - """Main function to demonstrate query run operations.""" - # Get configuration from environment - token = os.environ.get("TFE_TOKEN") - org = os.environ.get("TFE_ORG") - address = os.environ.get("TFE_ADDRESS", "https://app.terraform.io") - if not token: - print("Error: TFE_TOKEN environment variable is required") - return 1 +def test_force_cancel(query_run_id=None): + """Test 6: Force cancel a query run.""" + print("\n=== Test 6: Force Cancel Query Run ===") - if not org: - print("Error: TFE_ORG environment variable is required") - return 1 + client, workspace = get_client_and_workspace() - # Initialize client - print("=== Terraform Enterprise Query Run SDK Example ===") - print(f"Address: {address}") - print(f"Organization: {org}") - print(f"Timestamp: {datetime.now()}") + try: + # If no query_run_id provided, create a new one + if not query_run_id: + print("Creating a new query run to force cancel...") + new_run = test_create() + if not new_run: + print("ERROR: Could not create query run to force cancel") + return False + query_run_id = new_run.id + time.sleep(1) # Give it a moment to start + + # Force cancel the query run + client.query_runs.force_cancel(query_run_id) + print(f"Force cancel requested for query run: {query_run_id}") + + # Verify force cancellation + time.sleep(2) + query_run = client.query_runs.read(query_run_id) + print(f" Status after force cancel: {query_run.status}") - config = TFEConfig(address=address, token=token) - client = TFEClient(config) + return True - try: - # 1. List existing query runs - existing_query_run = test_list_query_runs(client, org) + except Exception as e: + print(f"Error: {e}") + print(" Note: Query run may not be in a force-cancelable state") + return False - # 2. Create new query runs - created_query_runs = test_create_query_runs(client, org) - # 3. Test read operations - if existing_query_run: - test_read_query_run(client, existing_query_run.id) +def main(): + """Run all tests in sequence.""" + print("=" * 80) + print("QUERY RUN FUNCTION TESTS") + print("=" * 80) + print("Testing all Query Run API operations") + print() + print("NOTE: Query Runs require Terraform 1.10+ with 'terraform query' command.") + print(" Most query runs will error since this feature is not yet available.") + print("=" * 80) - # Only test logs and results if query run is completed - if existing_query_run.status == QueryRunStatus.COMPLETED: - test_query_run_logs(client, existing_query_run.id) - test_query_run_results(client, existing_query_run.id) + # Test 1: List query runs + query_runs = test_list() - # 4. Test cancellation (with a new query run if needed) - if created_query_runs: - test_query_run_cancellation(client, created_query_runs[0].id) + # Test 2: Create a query run + new_query_run = test_create() - # 5. Test complete workflow - test_query_run_workflow(client, org) + # Test 3: Read a query run + if query_runs: + test_read(query_runs[0].id) + elif new_query_run: + test_read(new_query_run.id) - print("\n" + "=" * 80) - print("Query Run operations completed successfully!") - print("=" * 80) + # Test 4: Get logs (use first query run from list) + if query_runs: + test_logs(query_runs[0].id) - except Exception as e: - print(f"\nUnexpected error: {e}") - return 1 + # Test 5: Cancel a query run (creates new one) + test_cancel() - return 0 + # Test 6: Force cancel a query run (creates new one) + test_force_cancel() if __name__ == "__main__": - exit(main()) + main() diff --git a/src/pytfe/models/__init__.py b/src/pytfe/models/__init__.py index f3eb33b..6851577 100644 --- a/src/pytfe/models/__init__.py +++ b/src/pytfe/models/__init__.py @@ -148,16 +148,18 @@ # ── Query Runs ──────────────────────────────────────────────────────────────── from .query_run import ( QueryRun, + QueryRunActions, QueryRunCancelOptions, QueryRunCreateOptions, QueryRunForceCancelOptions, + QueryRunIncludeOpt, QueryRunList, QueryRunListOptions, - QueryRunLogs, QueryRunReadOptions, - QueryRunResults, + QueryRunSource, QueryRunStatus, - QueryRunType, + QueryRunStatusTimestamps, + QueryRunVariable, ) # ── Registry Modules / Providers ────────────────────────────────────────────── @@ -432,16 +434,18 @@ "RegistryProviderReadOptions", # Query runs "QueryRun", + "QueryRunActions", "QueryRunCancelOptions", "QueryRunCreateOptions", "QueryRunForceCancelOptions", + "QueryRunIncludeOpt", "QueryRunList", "QueryRunListOptions", - "QueryRunLogs", "QueryRunReadOptions", - "QueryRunResults", + "QueryRunSource", "QueryRunStatus", - "QueryRunType", + "QueryRunStatusTimestamps", + "QueryRunVariable", # Core (from old types.py, now split) "Entitlements", "ExecutionMode", diff --git a/src/pytfe/models/query_run.py b/src/pytfe/models/query_run.py index 3670830..f13a080 100644 --- a/src/pytfe/models/query_run.py +++ b/src/pytfe/models/query_run.py @@ -2,7 +2,6 @@ from datetime import datetime from enum import Enum -from typing import Any from pydantic import BaseModel, ConfigDict, Field @@ -11,18 +10,66 @@ class QueryRunStatus(str, Enum): """QueryRunStatus represents the status of a query run operation.""" PENDING = "pending" + QUEUED = "queued" RUNNING = "running" - COMPLETED = "completed" + FINISHED = "finished" ERRORED = "errored" CANCELED = "canceled" -class QueryRunType(str, Enum): - """QueryRunType represents different types of query runs.""" +class QueryRunSource(str, Enum): + """QueryRunSource represents the source of a query run.""" - FILTER = "filter" - SEARCH = "search" - ANALYTICS = "analytics" + API = "tfe-api" + + +class QueryRunActions(BaseModel): + """Actions available on a query run.""" + + model_config = ConfigDict(populate_by_name=True) + + is_cancelable: bool = Field( + ..., alias="is-cancelable", description="Whether the query run can be canceled" + ) + is_force_cancelable: bool = Field( + ..., + alias="is-force-cancelable", + description="Whether the query run can be force canceled", + ) + + +class QueryRunStatusTimestamps(BaseModel): + """Timestamps for each status of a query run.""" + + model_config = ConfigDict(populate_by_name=True) + + pending_at: datetime | None = Field( + None, alias="pending-at", description="When the query run was created" + ) + queued_at: datetime | None = Field( + None, alias="queued-at", description="When the query run was queued" + ) + running_at: datetime | None = Field( + None, alias="running-at", description="When the query run started running" + ) + finished_at: datetime | None = Field( + None, + alias="finished-at", + description="When the query run finished successfully", + ) + errored_at: datetime | None = Field( + None, alias="errored-at", description="When the query run encountered an error" + ) + canceled_at: datetime | None = Field( + None, alias="canceled-at", description="When the query run was canceled" + ) + + +class QueryRunVariable(BaseModel): + """A variable for a query run.""" + + key: str = Field(..., description="Variable key") + value: str = Field(..., description="Variable value") class QueryRun(BaseModel): @@ -31,51 +78,46 @@ class QueryRun(BaseModel): model_config = ConfigDict(populate_by_name=True) id: str = Field(..., description="The unique identifier for this query run") - type: str = Field(default="query-runs", description="The type of this resource") - query: str = Field(..., description="The query string used for this run") - query_type: QueryRunType = Field( - ..., alias="query-type", description="The type of query being executed" + type: str = Field(default="queries", description="The type of this resource") + actions: QueryRunActions | None = Field( + None, description="Actions available on this query run" ) - status: QueryRunStatus = Field( - ..., description="The current status of the query run" - ) - results_count: int | None = Field( - None, alias="results-count", description="The number of results returned" + canceled_at: datetime | None = Field( + None, alias="canceled-at", description="When the query run was canceled" ) created_at: datetime = Field( ..., alias="created-at", description="The time this query run was created" ) - updated_at: datetime = Field( - ..., alias="updated-at", description="The time this query run was last updated" - ) - started_at: datetime | None = Field( - None, alias="started-at", description="The time this query run was started" + updated_at: datetime | None = Field( + None, alias="updated-at", description="The time this query run was last updated" ) - finished_at: datetime | None = Field( - None, alias="finished-at", description="The time this query run was finished" + source: QueryRunSource | str = Field(..., description="The source of the query run") + status: QueryRunStatus = Field( + ..., description="The current status of the query run" ) - error_message: str | None = Field( - None, alias="error-message", description="Error message if the query run failed" + status_timestamps: QueryRunStatusTimestamps | None = Field( + None, + alias="status-timestamps", + description="Timestamps for each status of the query run", ) - logs_url: str | None = Field( - None, alias="logs-url", description="URL to retrieve the query run logs" + variables: list[QueryRunVariable] | None = Field( + None, description="Run-specific variable values" ) - results_url: str | None = Field( - None, alias="results-url", description="URL to retrieve the query run results" + log_read_url: str | None = Field( + None, alias="log-read-url", description="URL to retrieve the query run logs" ) + # Relationships workspace_id: str | None = Field( - None, - alias="workspace-id", - description="The workspace ID if query is workspace-scoped", + None, description="The workspace ID associated with this query run" ) - organization_name: str | None = Field( - None, alias="organization-name", description="The organization name" + configuration_version_id: str | None = Field( + None, description="The configuration version ID used for this query run" ) - timeout_seconds: int | None = Field( - None, alias="timeout-seconds", description="Query timeout in seconds" + created_by_id: str | None = Field( + None, description="The user ID who created this query run" ) - max_results: int | None = Field( - None, alias="max-results", description="Maximum number of results to return" + canceled_by_id: str | None = Field( + None, description="The user ID who canceled this query run" ) @@ -84,34 +126,29 @@ class QueryRunCreateOptions(BaseModel): model_config = ConfigDict(populate_by_name=True) - query: str = Field(..., description="The query string to execute") - query_type: QueryRunType = Field( - ..., alias="query-type", description="The type of query being executed" + source: QueryRunSource | str = Field(..., description="The source of the query run") + variables: list[QueryRunVariable] | None = Field( + None, description="Run-specific variable values" ) - workspace_id: str | None = Field( - None, + workspace_id: str = Field( + ..., alias="workspace-id", - description="The workspace ID if query is workspace-scoped", - ) - organization_name: str | None = Field( - None, alias="organization-name", description="The organization name" - ) - timeout_seconds: int | None = Field( - None, - alias="timeout-seconds", - description="Query timeout in seconds", - gt=0, - le=3600, + description="The workspace ID to run the query against", ) - max_results: int | None = Field( + configuration_version_id: str | None = Field( None, - alias="max-results", - description="Maximum number of results to return", - gt=0, - le=10000, + alias="configuration-version-id", + description="The configuration version ID to use for the query", ) - filters: dict[str, Any] | None = Field( - None, description="Additional filters to apply to the query" + + +class QueryRunIncludeOpt(str, Enum): + """Options for including related resources in query run requests.""" + + CREATED_BY = "created_by" + CONFIGURATION_VERSION = "configuration_version" + CONFIGURATION_VERSION_INGRESS_ATTRIBUTES = ( + "configuration_version.ingress_attributes" ) @@ -126,19 +163,8 @@ class QueryRunListOptions(BaseModel): page_size: int | None = Field( None, alias="page[size]", description="Number of items per page", ge=1, le=100 ) - query_type: QueryRunType | None = Field( - None, alias="filter[query-type]", description="Filter by query type" - ) - status: QueryRunStatus | None = Field( - None, alias="filter[status]", description="Filter by status" - ) - workspace_id: str | None = Field( - None, alias="filter[workspace-id]", description="Filter by workspace ID" - ) - organization_name: str | None = Field( - None, - alias="filter[organization-name]", - description="Filter by organization name", + include: list[QueryRunIncludeOpt] | None = Field( + None, description="List of related resources to include" ) @@ -147,11 +173,8 @@ class QueryRunReadOptions(BaseModel): model_config = ConfigDict(populate_by_name=True) - include_results: bool | None = Field( - None, alias="include[results]", description="Include query results in response" - ) - include_logs: bool | None = Field( - None, alias="include[logs]", description="Include query logs in response" + include: list[QueryRunIncludeOpt] | None = Field( + None, description="List of related resources to include" ) @@ -160,7 +183,9 @@ class QueryRunCancelOptions(BaseModel): model_config = ConfigDict(populate_by_name=True) - reason: str | None = Field(None, description="Reason for canceling the query run") + comment: str | None = Field( + None, description="Optional comment about why the query run was canceled" + ) class QueryRunForceCancelOptions(BaseModel): @@ -168,8 +193,8 @@ class QueryRunForceCancelOptions(BaseModel): model_config = ConfigDict(populate_by_name=True) - reason: str | None = Field( - None, description="Reason for force canceling the query run" + comment: str | None = Field( + None, description="Optional comment about why the query run was force canceled" ) @@ -183,32 +208,6 @@ class QueryRunList(BaseModel): ) current_page: int | None = Field(None, description="Current page number") total_pages: int | None = Field(None, description="Total number of pages") - prev_page: str | None = Field(None, description="URL of the previous page") - next_page: str | None = Field(None, description="URL of the next page") + prev_page: int | str | None = Field(None, description="Previous page number or URL") + next_page: int | str | None = Field(None, description="Next page number or URL") total_count: int | None = Field(None, description="Total number of items") - - -class QueryRunResults(BaseModel): - """Represents the results of a query run.""" - - model_config = ConfigDict(populate_by_name=True) - - query_run_id: str = Field(..., description="The ID of the query run") - results: list[dict[str, Any]] = Field( - default_factory=list, description="The query results" - ) - total_count: int = Field(..., description="Total number of results") - truncated: bool = Field( - False, description="Whether the results were truncated due to limits" - ) - - -class QueryRunLogs(BaseModel): - """Represents the logs of a query run.""" - - model_config = ConfigDict(populate_by_name=True) - - query_run_id: str = Field(..., description="The ID of the query run") - logs: str = Field(..., description="The query run logs") - log_level: str | None = Field(None, description="The log level") - timestamp: datetime | None = Field(None, description="When the logs were generated") diff --git a/src/pytfe/resources/query_run.py b/src/pytfe/resources/query_run.py index 1540c70..fce1c1d 100644 --- a/src/pytfe/resources/query_run.py +++ b/src/pytfe/resources/query_run.py @@ -1,21 +1,20 @@ from __future__ import annotations +import io +from collections.abc import Iterator from typing import Any from ..errors import ( - InvalidOrgError, InvalidQueryRunIDError, + InvalidWorkspaceIDError, ) from ..models.query_run import ( QueryRun, QueryRunCancelOptions, QueryRunCreateOptions, QueryRunForceCancelOptions, - QueryRunList, QueryRunListOptions, - QueryRunLogs, QueryRunReadOptions, - QueryRunResults, ) from ..utils import valid_string_id from ._base import _Service @@ -25,57 +24,69 @@ class QueryRuns(_Service): """Query Runs API for Terraform Enterprise.""" def list( - self, organization: str, options: QueryRunListOptions | None = None - ) -> QueryRunList: - """List query runs for the given organization.""" - if not valid_string_id(organization): - raise InvalidOrgError() - - params = ( - options.model_dump(by_alias=True, exclude_none=True) if options else None - ) + self, workspace_id: str, options: QueryRunListOptions | None = None + ) -> Iterator[QueryRun]: + """Iterate through all query runs for the given workspace. + + This method automatically handles pagination and yields QueryRun objects one at a time. + + Args: + workspace_id: The ID of the workspace + options: Optional list options (page_size, include, etc.) + + Yields: + QueryRun objects one at a time + + Example: + for query_run in client.query_runs.list(workspace_id): + print(f"Query Run: {query_run.id} - Status: {query_run.status}") + """ + if not valid_string_id(workspace_id): + raise InvalidWorkspaceIDError() + + params: dict[str, Any] = {} + if options: + params = options.model_dump(by_alias=True, exclude_none=True) + # Convert include list to comma-separated string + if "include" in params and params["include"] and options.include: + params["include"] = ",".join([i.value for i in options.include]) + + path = f"/api/v2/workspaces/{workspace_id}/queries" + for item in self._list(path, params=params): + attrs = item.get("attributes", {}) + attrs["id"] = item.get("id") + yield QueryRun.model_validate(attrs) + + def create(self, options: QueryRunCreateOptions) -> QueryRun: + """Create a new query run.""" + attrs = options.model_dump(by_alias=True, exclude_none=True) - r = self.t.request( - "GET", - f"/api/v2/organizations/{organization}/query-runs", - params=params, - ) + # Build relationships + relationships: dict[str, Any] = {} - jd = r.json() - items = [] - meta = jd.get("meta", {}) - pagination = meta.get("pagination", {}) - - for d in jd.get("data", []): - attrs = d.get("attributes", {}) - attrs["id"] = d.get("id") - items.append(QueryRun.model_validate(attrs)) - - return QueryRunList( - items=items, - current_page=pagination.get("current-page"), - total_pages=pagination.get("total-pages"), - prev_page=pagination.get("prev-page"), - next_page=pagination.get("next-page"), - total_count=pagination.get("total-count"), - ) + if workspace_id := attrs.pop("workspace-id", None): + relationships["workspace"] = { + "data": {"type": "workspaces", "id": workspace_id} + } - def create(self, organization: str, options: QueryRunCreateOptions) -> QueryRun: - """Create a new query run for the given organization.""" - if not valid_string_id(organization): - raise InvalidOrgError() + if config_version_id := attrs.pop("configuration-version-id", None): + relationships["configuration-version"] = { + "data": {"type": "configuration-versions", "id": config_version_id} + } - attrs = options.model_dump(by_alias=True, exclude_none=True) body: dict[str, Any] = { "data": { + "type": "queries", "attributes": attrs, - "type": "query-runs", } } + if relationships: + body["data"]["relationships"] = relationships + r = self.t.request( "POST", - f"/api/v2/organizations/{organization}/query-runs", + "/api/v2/queries", json_body=body, ) @@ -91,7 +102,7 @@ def read(self, query_run_id: str) -> QueryRun: if not valid_string_id(query_run_id): raise InvalidQueryRunIDError() - r = self.t.request("GET", f"/api/v2/query-runs/{query_run_id}") + r = self.t.request("GET", f"/api/v2/queries/{query_run_id}") jd = r.json() data = jd.get("data", {}) @@ -108,8 +119,11 @@ def read_with_options( raise InvalidQueryRunIDError() params = options.model_dump(by_alias=True, exclude_none=True) + # Convert include list to comma-separated string + if "include" in params and params["include"] and options.include: + params["include"] = ",".join([i.value for i in options.include]) - r = self.t.request("GET", f"/api/v2/query-runs/{query_run_id}", params=params) + r = self.t.request("GET", f"/api/v2/queries/{query_run_id}", params=params) jd = r.json() data = jd.get("data", {}) @@ -118,99 +132,66 @@ def read_with_options( return QueryRun.model_validate(attrs) - def logs(self, query_run_id: str) -> QueryRunLogs: - """Retrieve the logs for a query run.""" - if not valid_string_id(query_run_id): - raise InvalidQueryRunIDError() + def logs(self, query_run_id: str) -> io.IOBase: + """Retrieve the logs for a query run. - r = self.t.request("GET", f"/api/v2/query-runs/{query_run_id}/logs") - - # Handle both JSON and plain text responses - content_type = r.headers.get("content-type", "").lower() - - if "application/json" in content_type: - jd = r.json() - return QueryRunLogs.model_validate(jd.get("data", {})) - else: - # Plain text logs - return QueryRunLogs( - query_run_id=query_run_id, - logs=r.text, - log_level="info", - timestamp=None, - ) - - def results(self, query_run_id: str) -> QueryRunResults: - """Retrieve the results for a query run.""" + Returns an IO stream that can be read to get the log content. + """ if not valid_string_id(query_run_id): raise InvalidQueryRunIDError() - r = self.t.request("GET", f"/api/v2/query-runs/{query_run_id}/results") + # First get the query run to retrieve the log read URL + query_run = self.read(query_run_id) - jd = r.json() - data = jd.get("data", {}) + if not query_run.log_read_url: + raise ValueError(f"Query run {query_run_id} does not have a log URL") - return QueryRunResults( - query_run_id=query_run_id, - results=data.get("results", []), - total_count=data.get("total_count", 0), - truncated=data.get("truncated", False), - ) + # Fetch the logs from the URL (absolute URLs are handled by _build_url) + r = self.t.request("GET", query_run.log_read_url) + + # Return the content as a BytesIO stream + return io.BytesIO(r.content) def cancel( self, query_run_id: str, options: QueryRunCancelOptions | None = None - ) -> QueryRun: - """Cancel a query run.""" + ) -> None: + """Cancel a query run. + + Returns 202 on success with empty body. + """ if not valid_string_id(query_run_id): raise InvalidQueryRunIDError() - attrs = options.model_dump(by_alias=True, exclude_none=True) if options else {} - - body: dict[str, Any] = { - "data": { - "attributes": attrs, - "type": "query-runs", - } - } + body: dict[str, Any] | None = None + if options: + attrs = options.model_dump(by_alias=True, exclude_none=True) + if attrs: + body = {"data": {"attributes": attrs}} - r = self.t.request( + self.t.request( "POST", - f"/api/v2/query-runs/{query_run_id}/actions/cancel", + f"/api/v2/queries/{query_run_id}/actions/cancel", json_body=body, ) - jd = r.json() - data = jd.get("data", {}) - attrs = data.get("attributes", {}) - attrs["id"] = data.get("id") - - return QueryRun.model_validate(attrs) - def force_cancel( self, query_run_id: str, options: QueryRunForceCancelOptions | None = None - ) -> QueryRun: - """Force cancel a query run.""" + ) -> None: + """Force cancel a query run. + + Returns 202 on success with empty body. + """ if not valid_string_id(query_run_id): raise InvalidQueryRunIDError() - attrs = options.model_dump(by_alias=True, exclude_none=True) if options else {} - - body: dict[str, Any] = { - "data": { - "attributes": attrs, - "type": "query-runs", - } - } + body: dict[str, Any] | None = None + if options: + attrs = options.model_dump(by_alias=True, exclude_none=True) + if attrs: + body = {"data": {"attributes": attrs}} - r = self.t.request( + self.t.request( "POST", - f"/api/v2/query-runs/{query_run_id}/actions/force-cancel", + f"/api/v2/queries/{query_run_id}/actions/force-cancel", json_body=body, ) - - jd = r.json() - data = jd.get("data", {}) - attrs = data.get("attributes", {}) - attrs["id"] = data.get("id") - - return QueryRun.model_validate(attrs) diff --git a/tests/units/test_query_run.py b/tests/units/test_query_run.py index 8808090..ce87db0 100644 --- a/tests/units/test_query_run.py +++ b/tests/units/test_query_run.py @@ -1,564 +1,593 @@ -from datetime import datetime -from unittest.mock import MagicMock, Mock +""" +Comprehensive unit tests for query run operations in the Python TFE SDK. + +This test suite covers all query run methods including: +1. list() - List query runs for a workspace with pagination +2. create() - Create new query runs +3. read() - Read query run details +4. read_with_options() - Read with include options +5. logs() - Retrieve query run logs +6. cancel() - Cancel a query run +7. force_cancel() - Force cancel a query run + +Usage: + pytest tests/units/test_query_run.py -v +""" + +from unittest.mock import Mock, patch import pytest -from pytfe import TFEClient, TFEConfig -from pytfe.errors import InvalidOrgError, InvalidQueryRunIDError -from pytfe.models.query_run import ( +from pytfe.errors import InvalidQueryRunIDError, InvalidWorkspaceIDError +from pytfe.models import ( QueryRun, QueryRunCancelOptions, QueryRunCreateOptions, QueryRunForceCancelOptions, - QueryRunList, + QueryRunIncludeOpt, QueryRunListOptions, - QueryRunLogs, QueryRunReadOptions, - QueryRunResults, + QueryRunSource, QueryRunStatus, - QueryRunType, + QueryRunStatusTimestamps, + QueryRunVariable, ) +from pytfe.resources.query_run import QueryRuns + +# ============================================================================ +# Fixtures +# ============================================================================ + + +@pytest.fixture +def mock_transport(): + """Create a mock HTTPTransport.""" + return Mock() + + +@pytest.fixture +def query_runs_service(mock_transport): + """Create a QueryRuns service with mocked transport.""" + return QueryRuns(mock_transport) + + +@pytest.fixture +def sample_query_run_data(): + """Sample query run data from API.""" + return { + "id": "qr-123abc456def", + "type": "queries", + "attributes": { + "source": "tfe-api", + "status": "finished", + "created-at": "2024-01-15T10:00:00Z", + "updated-at": "2024-01-15T10:05:00Z", + "canceled-at": None, + "log-read-url": "https://app.terraform.io/api/v2/queries/qr-123abc456def/logs", + "status-timestamps": { + "queued-at": "2024-01-15T10:00:00Z", + "running-at": "2024-01-15T10:01:00Z", + "finished-at": "2024-01-15T10:05:00Z", + }, + "variables": [ + {"key": "environment", "value": "production"}, + {"key": "region", "value": "us-east-1"}, + ], + "actions": { + "is-cancelable": True, + "is-force-cancelable": False, + }, + }, + "relationships": { + "workspace": {"data": {"id": "ws-abc123", "type": "workspaces"}}, + "configuration-version": { + "data": {"id": "cv-def456", "type": "configuration-versions"} + }, + "created-by": {"data": {"id": "user-123", "type": "users"}}, + }, + } + + +@pytest.fixture +def sample_query_run_list_response(sample_query_run_data): + """Sample query run list response.""" + return { + "data": [ + sample_query_run_data, + { + "id": "qr-789ghi012jkl", + "type": "queries", + "attributes": { + "source": "tfe-api", + "status": "running", + "created-at": "2024-01-15T11:00:00Z", + "updated-at": "2024-01-15T11:02:00Z", + "canceled-at": None, + "log-read-url": None, + "status-timestamps": { + "queued-at": "2024-01-15T11:00:00Z", + "running-at": "2024-01-15T11:01:00Z", + }, + "variables": [], + "actions": { + "is-cancelable": True, + "is-force-cancelable": False, + }, + }, + }, + ], + "meta": { + "pagination": { + "current-page": 1, + "page-size": 20, + "total-pages": 1, + "total-count": 2, + } + }, + "links": {"next": None}, + } -class TestQueryRunModels: - """Test query run models and validation.""" +# ============================================================================ +# List Operations Tests +# ============================================================================ - def test_query_run_model_basic(self): - """Test basic QueryRun model creation.""" - query_run = QueryRun( - id="qr-test123", - query="SELECT * FROM runs WHERE status = 'completed'", - query_type=QueryRunType.FILTER, - status=QueryRunStatus.COMPLETED, - created_at=datetime.now(), - updated_at=datetime.now(), - ) - assert query_run.id == "qr-test123" - assert query_run.query == "SELECT * FROM runs WHERE status = 'completed'" - assert query_run.query_type == QueryRunType.FILTER - assert query_run.status == QueryRunStatus.COMPLETED - - def test_query_run_status_enum(self): - """Test QueryRunStatus enum values.""" - assert QueryRunStatus.PENDING == "pending" - assert QueryRunStatus.RUNNING == "running" - assert QueryRunStatus.COMPLETED == "completed" - assert QueryRunStatus.ERRORED == "errored" - assert QueryRunStatus.CANCELED == "canceled" - - def test_query_run_type_enum(self): - """Test QueryRunType enum values.""" - assert QueryRunType.FILTER == "filter" - assert QueryRunType.SEARCH == "search" - assert QueryRunType.ANALYTICS == "analytics" - - def test_query_run_create_options(self): - """Test QueryRunCreateOptions model.""" - options = QueryRunCreateOptions( - query="SELECT * FROM workspaces", - query_type=QueryRunType.SEARCH, - organization_name="test-org", - timeout_seconds=300, - max_results=1000, - ) - assert options.query == "SELECT * FROM workspaces" - assert options.query_type == QueryRunType.SEARCH - assert options.organization_name == "test-org" - assert options.timeout_seconds == 300 - assert options.max_results == 1000 - - def test_query_run_list_options(self): - """Test QueryRunListOptions model.""" - options = QueryRunListOptions( - page_number=2, - page_size=50, - query_type=QueryRunType.FILTER, - status=QueryRunStatus.COMPLETED, - organization_name="test-org", - ) - assert options.page_number == 2 - assert options.page_size == 50 - assert options.query_type == QueryRunType.FILTER - assert options.status == QueryRunStatus.COMPLETED - assert options.organization_name == "test-org" - - -class TestQueryRunOperations: - """Test query run operations.""" - - @pytest.fixture - def client(self): - """Create a test client.""" - config = TFEConfig(address="https://test.terraform.io", token="test-token") - return TFEClient(config) - - @pytest.fixture - def mock_response(self): - """Create a mock response.""" - mock = Mock() - mock.json.return_value = { - "data": [ - { - "id": "qr-test123", - "type": "query-runs", - "attributes": { - "query": "SELECT * FROM runs", - "query-type": "filter", - "status": "completed", - "results-count": 42, - "created-at": "2023-01-01T00:00:00Z", - "updated-at": "2023-01-01T00:05:00Z", - "started-at": "2023-01-01T00:01:00Z", - "finished-at": "2023-01-01T00:05:00Z", - "organization-name": "test-org", - }, - } - ], - "meta": { - "pagination": { - "current-page": 1, - "total-pages": 1, - "prev-page": None, - "next-page": None, - "total-count": 1, - } - }, - } - return mock - def test_list_query_runs(self, client, mock_response): - """Test listing query runs.""" - client._transport.request = MagicMock(return_value=mock_response) +class TestQueryRunsList: + """Test suite for query run list operations.""" - result = client.query_runs.list("test-org") + def test_list_basic( + self, query_runs_service, mock_transport, sample_query_run_list_response + ): + """Test basic query run listing.""" + mock_response = Mock() + mock_response.json.return_value = sample_query_run_list_response + mock_transport.request.return_value = mock_response - assert isinstance(result, QueryRunList) - assert len(result.items) == 1 - assert result.items[0].id == "qr-test123" - assert result.items[0].query == "SELECT * FROM runs" - assert result.current_page == 1 - assert result.total_count == 1 + workspace_id = "ws-abc123" + query_runs = list(query_runs_service.list(workspace_id)) - client._transport.request.assert_called_once_with( - "GET", "/api/v2/organizations/test-org/query-runs", params=None + # Verify the request + mock_transport.request.assert_called_with( + "GET", + f"/api/v2/workspaces/{workspace_id}/queries", + params={"page[number]": 1, "page[size]": 100}, ) - def test_list_query_runs_with_options(self, client, mock_response): - """Test listing query runs with options.""" - client._transport.request = MagicMock(return_value=mock_response) + # Verify the results + assert len(query_runs) == 2 + + # Check first query run + qr1 = query_runs[0] + assert qr1.id == "qr-123abc456def" + assert qr1.status == QueryRunStatus.FINISHED + assert qr1.source == QueryRunSource.API + assert qr1.log_read_url is not None + assert len(qr1.variables) == 2 + assert qr1.variables[0].key == "environment" + assert qr1.variables[0].value == "production" + + # Check second query run + qr2 = query_runs[1] + assert qr2.id == "qr-789ghi012jkl" + assert qr2.status == QueryRunStatus.RUNNING + assert qr2.log_read_url is None + assert len(qr2.variables) == 0 + + def test_list_with_options( + self, query_runs_service, mock_transport, sample_query_run_list_response + ): + """Test list with options.""" + mock_response = Mock() + mock_response.json.return_value = sample_query_run_list_response + mock_transport.request.return_value = mock_response + workspace_id = "ws-abc123" options = QueryRunListOptions( - page_number=2, - page_size=25, - query_type=QueryRunType.FILTER, - status=QueryRunStatus.COMPLETED, + page_number=1, + page_size=10, + include=[ + QueryRunIncludeOpt.CREATED_BY, + QueryRunIncludeOpt.CONFIGURATION_VERSION, + ], ) - result = client.query_runs.list("test-org", options) - assert isinstance(result, QueryRunList) - client._transport.request.assert_called_once_with( - "GET", - "/api/v2/organizations/test-org/query-runs", - params={ - "page[number]": 2, - "page[size]": 25, - "filter[query-type]": "filter", - "filter[status]": "completed", - }, - ) + query_runs = list(query_runs_service.list(workspace_id, options)) + + # Verify the request includes options + call_args = mock_transport.request.call_args + assert call_args[0][0] == "GET" + assert call_args[0][1] == f"/api/v2/workspaces/{workspace_id}/queries" + params = call_args[1]["params"] + assert params["page[number]"] == 1 + assert params["page[size]"] == 10 + assert params["include"] == "created_by,configuration_version" + + assert len(query_runs) == 2 + + def test_list_invalid_workspace_id(self, query_runs_service): + """Test list with invalid workspace ID.""" + with pytest.raises(InvalidWorkspaceIDError): + list(query_runs_service.list("")) + + with pytest.raises(InvalidWorkspaceIDError): + list(query_runs_service.list(None)) + + +# ============================================================================ +# Create Operations Tests +# ============================================================================ - def test_create_query_run(self, client): - """Test creating a query run.""" + +class TestQueryRunsCreate: + """Test suite for query run create operations.""" + + def test_create_basic( + self, query_runs_service, mock_transport, sample_query_run_data + ): + """Test basic query run creation.""" mock_response = Mock() - mock_response.json.return_value = { - "data": { - "id": "qr-new123", - "type": "query-runs", - "attributes": { - "query": "SELECT * FROM workspaces", - "query-type": "search", - "status": "pending", - "created-at": "2023-01-01T00:00:00Z", - "updated-at": "2023-01-01T00:00:00Z", - "organization-name": "test-org", - }, - } - } - client._transport.request = MagicMock(return_value=mock_response) + mock_response.json.return_value = {"data": sample_query_run_data} + mock_transport.request.return_value = mock_response options = QueryRunCreateOptions( - query="SELECT * FROM workspaces", - query_type=QueryRunType.SEARCH, - organization_name="test-org", - timeout_seconds=300, + source=QueryRunSource.API, + workspace_id="ws-abc123", + configuration_version_id="cv-def456", ) - result = client.query_runs.create("test-org", options) - assert isinstance(result, QueryRun) - assert result.id == "qr-new123" - assert result.query == "SELECT * FROM workspaces" - assert result.status == QueryRunStatus.PENDING + result = query_runs_service.create(options) - client._transport.request.assert_called_once_with( - "POST", - "/api/v2/organizations/test-org/query-runs", - json_body={ - "data": { - "attributes": { - "query": "SELECT * FROM workspaces", - "query-type": "search", - "organization-name": "test-org", - "timeout-seconds": 300, - }, - "type": "query-runs", - } - }, + # Verify the request + call_args = mock_transport.request.call_args + assert call_args[0][0] == "POST" + assert call_args[0][1] == "/api/v2/queries" + + json_body = call_args[1]["json_body"] + assert json_body["data"]["type"] == "queries" + assert json_body["data"]["attributes"]["source"] == "tfe-api" + assert ( + json_body["data"]["relationships"]["workspace"]["data"]["id"] == "ws-abc123" + ) + assert ( + json_body["data"]["relationships"]["configuration-version"]["data"]["id"] + == "cv-def456" ) - def test_read_query_run(self, client): - """Test reading a query run.""" + # Verify the result + assert isinstance(result, QueryRun) + assert result.id == "qr-123abc456def" + assert result.status == QueryRunStatus.FINISHED + assert result.source == QueryRunSource.API + + def test_create_with_variables( + self, query_runs_service, mock_transport, sample_query_run_data + ): + """Test query run creation with variables.""" mock_response = Mock() - mock_response.json.return_value = { - "data": { - "id": "qr-test123", - "type": "query-runs", - "attributes": { - "query": "SELECT * FROM runs", - "query-type": "filter", - "status": "completed", - "results-count": 42, - "created-at": "2023-01-01T00:00:00Z", - "updated-at": "2023-01-01T00:05:00Z", - }, - } - } - client._transport.request = MagicMock(return_value=mock_response) + mock_response.json.return_value = {"data": sample_query_run_data} + mock_transport.request.return_value = mock_response - result = client.query_runs.read("qr-test123") - - assert isinstance(result, QueryRun) - assert result.id == "qr-test123" - assert result.status == QueryRunStatus.COMPLETED - assert result.results_count == 42 + variables = [ + QueryRunVariable(key="environment", value="production"), + QueryRunVariable(key="region", value="us-east-1"), + ] - client._transport.request.assert_called_once_with( - "GET", "/api/v2/query-runs/qr-test123" + options = QueryRunCreateOptions( + source=QueryRunSource.API, + workspace_id="ws-abc123", + configuration_version_id="cv-def456", + variables=variables, ) - def test_read_query_run_with_options(self, client): - """Test reading a query run with options.""" + result = query_runs_service.create(options) + + # Verify variables in request + call_args = mock_transport.request.call_args + json_body = call_args[1]["json_body"] + assert "variables" in json_body["data"]["attributes"] + assert len(json_body["data"]["attributes"]["variables"]) == 2 + + # Verify result + assert result.id == "qr-123abc456def" + assert len(result.variables) == 2 + + +# ============================================================================ +# Read Operations Tests +# ============================================================================ + + +class TestQueryRunsRead: + """Test suite for query run read operations.""" + + def test_read_success( + self, query_runs_service, mock_transport, sample_query_run_data + ): + """Test successful query run read.""" mock_response = Mock() - mock_response.json.return_value = { - "data": { - "id": "qr-test123", - "type": "query-runs", - "attributes": { - "query": "SELECT * FROM runs", - "query-type": "filter", - "status": "completed", - "created-at": "2023-01-01T00:00:00Z", - "updated-at": "2023-01-01T00:05:00Z", - }, - } - } - client._transport.request = MagicMock(return_value=mock_response) + mock_response.json.return_value = {"data": sample_query_run_data} + mock_transport.request.return_value = mock_response + + result = query_runs_service.read("qr-123abc456def") - options = QueryRunReadOptions(include_results=True, include_logs=True) - result = client.query_runs.read_with_options("qr-test123", options) + # Verify the request + mock_transport.request.assert_called_once_with( + "GET", "/api/v2/queries/qr-123abc456def" + ) + # Verify the result assert isinstance(result, QueryRun) - assert result.id == "qr-test123" + assert result.id == "qr-123abc456def" + assert result.status == QueryRunStatus.FINISHED + assert result.source == QueryRunSource.API + assert result.log_read_url is not None - client._transport.request.assert_called_once_with( - "GET", - "/api/v2/query-runs/qr-test123", - params={"include[results]": True, "include[logs]": True}, - ) + def test_read_invalid_id(self, query_runs_service): + """Test read with invalid query run ID.""" + with pytest.raises(InvalidQueryRunIDError): + query_runs_service.read("") - def test_query_run_logs(self, client): - """Test retrieving query run logs.""" + with pytest.raises(InvalidQueryRunIDError): + query_runs_service.read(None) + + def test_read_with_options_success( + self, query_runs_service, mock_transport, sample_query_run_data + ): + """Test read with options.""" mock_response = Mock() - mock_response.headers = {"content-type": "text/plain"} - mock_response.text = ( - "Starting query execution...\nQuery completed successfully." + mock_response.json.return_value = {"data": sample_query_run_data} + mock_transport.request.return_value = mock_response + + options = QueryRunReadOptions( + include=[ + QueryRunIncludeOpt.CREATED_BY, + QueryRunIncludeOpt.CONFIGURATION_VERSION, + ] ) - client._transport.request = MagicMock(return_value=mock_response) - result = client.query_runs.logs("qr-test123") + result = query_runs_service.read_with_options("qr-123abc456def", options) - assert isinstance(result, QueryRunLogs) - assert result.query_run_id == "qr-test123" - assert "Starting query execution" in result.logs - assert result.log_level == "info" + # Verify the request includes options + call_args = mock_transport.request.call_args + assert call_args[0][0] == "GET" + assert call_args[0][1] == "/api/v2/queries/qr-123abc456def" + params = call_args[1]["params"] + assert params["include"] == "created_by,configuration_version" - client._transport.request.assert_called_once_with( - "GET", "/api/v2/query-runs/qr-test123/logs" - ) + # Verify the result + assert result.id == "qr-123abc456def" - def test_query_run_results(self, client): - """Test retrieving query run results.""" - mock_response = Mock() - mock_response.json.return_value = { - "data": { - "results": [ - {"id": "run-1", "status": "completed"}, - {"id": "run-2", "status": "pending"}, - ], - "total_count": 2, - "truncated": False, - } - } - client._transport.request = MagicMock(return_value=mock_response) - result = client.query_runs.results("qr-test123") +# ============================================================================ +# Logs Operations Tests +# ============================================================================ - assert isinstance(result, QueryRunResults) - assert result.query_run_id == "qr-test123" - assert len(result.results) == 2 - assert result.total_count == 2 - assert not result.truncated - client._transport.request.assert_called_once_with( - "GET", "/api/v2/query-runs/qr-test123/results" +class TestQueryRunsLogs: + """Test suite for query run logs operations.""" + + def test_logs_success(self, query_runs_service, mock_transport): + """Test successful logs retrieval.""" + # Mock the read method to return a query run with log URL + mock_query_run = Mock() + mock_query_run.log_read_url = ( + "https://app.terraform.io/api/v2/queries/qr-123/logs" ) - def test_cancel_query_run(self, client): - """Test canceling a query run.""" - mock_response = Mock() - mock_response.json.return_value = { - "data": { - "id": "qr-test123", - "type": "query-runs", - "attributes": { - "query": "SELECT * FROM runs", - "query-type": "filter", - "status": "canceled", - "created-at": "2023-01-01T00:00:00Z", - "updated-at": "2023-01-01T00:02:00Z", - }, - } - } - client._transport.request = MagicMock(return_value=mock_response) + # Mock the logs content + mock_logs_response = Mock() + mock_logs_response.content = b"Query run logs content\nLine 2\nLine 3" - options = QueryRunCancelOptions(reason="User requested cancellation") - result = client.query_runs.cancel("qr-test123", options) + with patch.object(query_runs_service, "read", return_value=mock_query_run): + mock_transport.request.return_value = mock_logs_response - assert isinstance(result, QueryRun) - assert result.id == "qr-test123" - assert result.status == QueryRunStatus.CANCELED + result = query_runs_service.logs("qr-123abc456def") + + # Verify read was called + query_runs_service.read.assert_called_once_with("qr-123abc456def") + + # Verify logs request was made + mock_transport.request.assert_called_once_with( + "GET", "https://app.terraform.io/api/v2/queries/qr-123/logs" + ) + + # Verify the result is an IO stream + assert result.read() == b"Query run logs content\nLine 2\nLine 3" + + def test_logs_no_url_error(self, query_runs_service): + """Test logs method when query run has no log URL.""" + mock_query_run = Mock() + mock_query_run.log_read_url = None + + with patch.object(query_runs_service, "read", return_value=mock_query_run): + with pytest.raises(ValueError) as exc: + query_runs_service.logs("qr-123abc456def") + + assert "does not have a log URL" in str(exc.value) - client._transport.request.assert_called_once_with( + def test_logs_invalid_id(self, query_runs_service): + """Test logs with invalid query run ID.""" + with pytest.raises(InvalidQueryRunIDError): + query_runs_service.logs("") + + +# ============================================================================ +# Cancel Operations Tests +# ============================================================================ + + +class TestQueryRunsCancel: + """Test suite for query run cancel operations.""" + + def test_cancel_success(self, query_runs_service, mock_transport): + """Test successful query run cancellation.""" + mock_response = Mock() + mock_transport.request.return_value = mock_response + + query_runs_service.cancel("qr-123abc456def") + + # Verify the request + mock_transport.request.assert_called_once_with( "POST", - "/api/v2/query-runs/qr-test123/actions/cancel", - json_body={ - "data": { - "attributes": {"reason": "User requested cancellation"}, - "type": "query-runs", - } - }, + "/api/v2/queries/qr-123abc456def/actions/cancel", + json_body=None, ) - def test_force_cancel_query_run(self, client): - """Test force canceling a query run.""" + def test_cancel_with_comment(self, query_runs_service, mock_transport): + """Test cancellation with comment.""" mock_response = Mock() - mock_response.json.return_value = { - "data": { - "id": "qr-test123", - "type": "query-runs", - "attributes": { - "query": "SELECT * FROM runs", - "query-type": "filter", - "status": "canceled", - "created-at": "2023-01-01T00:00:00Z", - "updated-at": "2023-01-01T00:02:00Z", - }, - } - } - client._transport.request = MagicMock(return_value=mock_response) + mock_transport.request.return_value = mock_response - options = QueryRunForceCancelOptions(reason="Force cancel due to timeout") - result = client.query_runs.force_cancel("qr-test123", options) + options = QueryRunCancelOptions(comment="Canceling due to configuration error") - assert isinstance(result, QueryRun) - assert result.id == "qr-test123" - assert result.status == QueryRunStatus.CANCELED + query_runs_service.cancel("qr-123abc456def", options) - client._transport.request.assert_called_once_with( - "POST", - "/api/v2/query-runs/qr-test123/actions/force-cancel", - json_body={ - "data": { - "attributes": {"reason": "Force cancel due to timeout"}, - "type": "query-runs", - } - }, + # Verify the request includes comment + call_args = mock_transport.request.call_args + assert call_args[0][0] == "POST" + assert call_args[0][1] == "/api/v2/queries/qr-123abc456def/actions/cancel" + json_body = call_args[1]["json_body"] + assert ( + json_body["data"]["attributes"]["comment"] + == "Canceling due to configuration error" ) + def test_cancel_invalid_id(self, query_runs_service): + """Test cancel with invalid query run ID.""" + with pytest.raises(InvalidQueryRunIDError): + query_runs_service.cancel("") -class TestQueryRunErrorHandling: - """Test query run error handling.""" - @pytest.fixture - def client(self): - """Create a test client.""" - config = TFEConfig(address="https://test.terraform.io", token="test-token") - return TFEClient(config) +# ============================================================================ +# Force Cancel Operations Tests +# ============================================================================ - def test_invalid_organization_error(self, client): - """Test invalid organization error.""" - with pytest.raises(InvalidOrgError): - client.query_runs.list("") - with pytest.raises(InvalidOrgError): - client.query_runs.list(None) +class TestQueryRunsForceCancel: + """Test suite for query run force cancel operations.""" - def test_invalid_query_run_id_error(self, client): - """Test invalid query run ID error.""" - with pytest.raises(InvalidQueryRunIDError): - client.query_runs.read("") + def test_force_cancel_success(self, query_runs_service, mock_transport): + """Test successful force cancellation.""" + mock_response = Mock() + mock_transport.request.return_value = mock_response - with pytest.raises(InvalidQueryRunIDError): - client.query_runs.read(None) + query_runs_service.force_cancel("qr-123abc456def") - with pytest.raises(InvalidQueryRunIDError): - client.query_runs.logs("") + # Verify the request + mock_transport.request.assert_called_once_with( + "POST", + "/api/v2/queries/qr-123abc456def/actions/force-cancel", + json_body=None, + ) - with pytest.raises(InvalidQueryRunIDError): - client.query_runs.results("") + def test_force_cancel_with_comment(self, query_runs_service, mock_transport): + """Test force cancellation with comment.""" + mock_response = Mock() + mock_transport.request.return_value = mock_response - with pytest.raises(InvalidQueryRunIDError): - client.query_runs.cancel("") + options = QueryRunForceCancelOptions(comment="Force canceling stuck query run") - with pytest.raises(InvalidQueryRunIDError): - client.query_runs.force_cancel("") + query_runs_service.force_cancel("qr-123abc456def", options) - def test_create_query_run_validation_errors(self, client): - """Test create query run validation errors.""" - with pytest.raises(InvalidOrgError): - options = QueryRunCreateOptions( - query="SELECT * FROM runs", query_type=QueryRunType.FILTER - ) - client.query_runs.create("", options) + # Verify the request includes comment + call_args = mock_transport.request.call_args + assert call_args[0][0] == "POST" + assert call_args[0][1] == "/api/v2/queries/qr-123abc456def/actions/force-cancel" + json_body = call_args[1]["json_body"] + assert ( + json_body["data"]["attributes"]["comment"] + == "Force canceling stuck query run" + ) + def test_force_cancel_invalid_id(self, query_runs_service): + """Test force cancel with invalid query run ID.""" + with pytest.raises(InvalidQueryRunIDError): + query_runs_service.force_cancel("") -class TestQueryRunIntegration: - """Test query run integration scenarios.""" - @pytest.fixture - def client(self): - """Create a test client with mocked transport.""" - from unittest.mock import MagicMock, patch +# ============================================================================ +# Unit Tests - Model Validation +# ============================================================================ - # Mock the HTTPTransport to prevent any network calls during initialization - with patch("pytfe.client.HTTPTransport") as mock_transport_class: - mock_transport_instance = MagicMock() - mock_transport_class.return_value = mock_transport_instance - config = TFEConfig(address="https://test.terraform.io", token="test-token") - client = TFEClient(config) - return client +class TestQueryRunCreateOptions: + """Unit tests for QueryRunCreateOptions model.""" - def test_full_query_run_workflow(self, client): - """Test a complete query run workflow simulation.""" - # Use the already mocked transport from the fixture - mock_transport = client._transport + def test_create_with_required_fields(self): + """Test creating options with required fields only.""" + options = QueryRunCreateOptions( + source=QueryRunSource.API, + workspace_id="ws-123", + ) - # 1. Create query run - create_response = Mock() - create_response.json.return_value = { - "data": { - "id": "qr-workflow123", - "type": "query-runs", - "attributes": { - "query": "SELECT * FROM runs WHERE status = 'completed'", - "query-type": "filter", - "status": "pending", - "created-at": "2023-01-01T00:00:00Z", - "updated-at": "2023-01-01T00:00:00Z", - "organization-name": "test-org", - }, - } - } - - # 2. Read query run (running state) - read_response = Mock() - read_response.json.return_value = { - "data": { - "id": "qr-workflow123", - "type": "query-runs", - "attributes": { - "query": "SELECT * FROM runs WHERE status = 'completed'", - "query-type": "filter", - "status": "running", - "created-at": "2023-01-01T00:00:00Z", - "updated-at": "2023-01-01T00:01:00Z", - "started-at": "2023-01-01T00:01:00Z", - }, - } - } - - # 3. Read query run (completed state) - completed_response = Mock() - completed_response.json.return_value = { - "data": { - "id": "qr-workflow123", - "type": "query-runs", - "attributes": { - "query": "SELECT * FROM runs WHERE status = 'completed'", - "query-type": "filter", - "status": "completed", - "results-count": 15, - "created-at": "2023-01-01T00:00:00Z", - "updated-at": "2023-01-01T00:05:00Z", - "started-at": "2023-01-01T00:01:00Z", - "finished-at": "2023-01-01T00:05:00Z", - }, - } - } - - # 4. Get results - results_response = Mock() - results_response.json.return_value = { - "data": { - "results": [ - {"id": f"run-{i}", "status": "completed"} for i in range(15) - ], - "total_count": 15, - "truncated": False, - } - } + assert options.source == QueryRunSource.API + assert options.workspace_id == "ws-123" + assert options.configuration_version_id is None + assert options.variables is None - mock_transport.request.side_effect = [ - create_response, - read_response, - completed_response, - results_response, + def test_create_with_all_fields(self): + """Test creating options with all fields.""" + variables = [ + QueryRunVariable(key="var1", value="value1"), + QueryRunVariable(key="var2", value="value2"), ] - # Execute workflow options = QueryRunCreateOptions( - query="SELECT * FROM runs WHERE status = 'completed'", - query_type=QueryRunType.FILTER, - organization_name="test-org", + source=QueryRunSource.API, + workspace_id="ws-123", + configuration_version_id="cv-456", + variables=variables, ) - # 1. Create - query_run = client.query_runs.create("test-org", options) - assert query_run.status == QueryRunStatus.PENDING + assert options.source == QueryRunSource.API + assert options.workspace_id == "ws-123" + assert options.configuration_version_id == "cv-456" + assert len(options.variables) == 2 + assert options.variables[0].key == "var1" + + +class TestQueryRunModel: + """Unit tests for QueryRun model.""" + + def test_status_enum_values(self): + """Test all status enum values.""" + assert QueryRunStatus.PENDING.value == "pending" + assert QueryRunStatus.QUEUED.value == "queued" + assert QueryRunStatus.RUNNING.value == "running" + assert QueryRunStatus.FINISHED.value == "finished" + assert QueryRunStatus.ERRORED.value == "errored" + assert QueryRunStatus.CANCELED.value == "canceled" + + def test_source_enum_value(self): + """Test source enum value.""" + assert QueryRunSource.API.value == "tfe-api" + + +# ============================================================================ +# Test Utilities +# ============================================================================ + + +def test_query_run_variable(): + """Test QueryRunVariable model.""" + var = QueryRunVariable(key="test_key", value="test_value") - # 2. Check status (running) - query_run = client.query_runs.read(query_run.id) - assert query_run.status == QueryRunStatus.RUNNING + assert var.key == "test_key" + assert var.value == "test_value" - # 3. Check status (completed) - query_run = client.query_runs.read(query_run.id) - assert query_run.status == QueryRunStatus.COMPLETED - assert query_run.results_count == 15 - # 4. Get results - results = client.query_runs.results(query_run.id) - assert len(results.results) == 15 - assert not results.truncated +def test_query_run_status_timestamps(): + """Test QueryRunStatusTimestamps model.""" + timestamps = QueryRunStatusTimestamps( + queued_at="2024-01-15T10:00:00Z", + running_at="2024-01-15T10:05:00Z", + errored_at="2024-01-15T10:10:00Z", + ) - # Verify all calls were made - assert mock_transport.request.call_count == 4 + # Timestamps are datetime objects + assert timestamps.queued_at is not None + assert timestamps.running_at is not None + assert timestamps.errored_at is not None + assert timestamps.finished_at is None + assert timestamps.canceled_at is None