|
| 1 | +#!/usr/bin/env python3 |
| 2 | +""" |
| 3 | +Release Intent Parser |
| 4 | +
|
| 5 | +Parses .github/tag_and_release/intent.yaml and validates against JSON schema. |
| 6 | +Merges defaults with workflow_dispatch inputs and outputs both human-readable |
| 7 | +JSON and GitHub Action outputs. |
| 8 | +
|
| 9 | +This tool follows PEP 484/585 typing standards and implements robust error |
| 10 | +handling with schema validation. |
| 11 | +""" |
| 12 | + |
| 13 | +import json |
| 14 | +import os |
| 15 | +import sys |
| 16 | +from pathlib import Path |
| 17 | +from typing import Any, Dict, Optional |
| 18 | + |
| 19 | +import jsonschema |
| 20 | +import jsonschema.exceptions |
| 21 | +import yaml |
| 22 | + |
| 23 | +# Type definitions following PEP 585 |
| 24 | +ReleaseIntent = Dict[str, Any] |
| 25 | +SchemaType = Dict[str, Any] |
| 26 | + |
| 27 | + |
| 28 | +class ReleaseIntentError(Exception): |
| 29 | + """Custom exception for release intent parsing errors.""" |
| 30 | + |
| 31 | + |
| 32 | +def load_schema() -> SchemaType: |
| 33 | + """Load and return the JSON schema for release intent validation.""" |
| 34 | + schema_path = Path(".github/tag_and_release/schema.json") |
| 35 | + |
| 36 | + if not schema_path.exists(): |
| 37 | + raise ReleaseIntentError(f"Schema file not found: {schema_path}") |
| 38 | + |
| 39 | + try: |
| 40 | + with open(schema_path, "r", encoding="utf-8") as f: |
| 41 | + return json.load(f) |
| 42 | + except (json.JSONDecodeError, OSError) as e: |
| 43 | + raise ReleaseIntentError(f"Failed to load schema: {e}") from e |
| 44 | + |
| 45 | + |
| 46 | +def load_intent_file() -> Optional[ReleaseIntent]: |
| 47 | + """Load the release intent YAML file if it exists.""" |
| 48 | + intent_path = Path(".github/tag_and_release/intent.yaml") |
| 49 | + |
| 50 | + if not intent_path.exists(): |
| 51 | + return None |
| 52 | + |
| 53 | + try: |
| 54 | + with open(intent_path, "r", encoding="utf-8") as f: |
| 55 | + return yaml.safe_load(f) |
| 56 | + except (yaml.YAMLError, OSError) as e: |
| 57 | + raise ReleaseIntentError(f"Failed to load intent file: {e}") from e |
| 58 | + |
| 59 | + |
| 60 | +def get_workflow_dispatch_inputs() -> ReleaseIntent: |
| 61 | + """Extract workflow_dispatch inputs from environment variables.""" |
| 62 | + return { |
| 63 | + "level": os.getenv("INPUT_LEVEL", "").strip(), |
| 64 | + "python": os.getenv("INPUT_PYTHON", "").strip(), |
| 65 | + "docker": os.getenv("INPUT_DOCKER", "").strip(), |
| 66 | + "docs": os.getenv("INPUT_DOCS", "").strip(), |
| 67 | + "notes": os.getenv("INPUT_NOTES", "").strip(), |
| 68 | + } |
| 69 | + |
| 70 | + |
| 71 | +def get_defaults() -> ReleaseIntent: |
| 72 | + """Return default values for release intent.""" |
| 73 | + return { |
| 74 | + "release": True, |
| 75 | + "level": "auto", |
| 76 | + "artifacts": { |
| 77 | + "python": "auto", |
| 78 | + "docker": "auto", |
| 79 | + "docs": "auto", |
| 80 | + }, |
| 81 | + "notes": "", |
| 82 | + } |
| 83 | + |
| 84 | + |
| 85 | +def merge_intent_data( |
| 86 | + defaults: ReleaseIntent, file_data: Optional[ReleaseIntent], workflow_inputs: ReleaseIntent |
| 87 | +) -> ReleaseIntent: |
| 88 | + """ |
| 89 | + Merge defaults, file data, and workflow inputs in priority order. |
| 90 | +
|
| 91 | + Priority: workflow_inputs > file_data > defaults |
| 92 | + """ |
| 93 | + # Start with deep copy of defaults |
| 94 | + merged = defaults.copy() |
| 95 | + merged["artifacts"] = defaults["artifacts"].copy() |
| 96 | + |
| 97 | + # Apply file data if available |
| 98 | + if file_data: |
| 99 | + # Update top-level keys |
| 100 | + for key, value in file_data.items(): |
| 101 | + if key == "artifacts": |
| 102 | + # Handle nested artifacts dict - merge with defaults |
| 103 | + if isinstance(value, dict): |
| 104 | + merged["artifacts"].update(value) |
| 105 | + else: |
| 106 | + merged[key] = value |
| 107 | + |
| 108 | + # Apply workflow dispatch inputs (highest priority) |
| 109 | + # Only override if the input is not empty |
| 110 | + if workflow_inputs["level"]: |
| 111 | + merged["level"] = workflow_inputs["level"] |
| 112 | + if workflow_inputs["python"]: |
| 113 | + merged["artifacts"]["python"] = workflow_inputs["python"] |
| 114 | + if workflow_inputs["docker"]: |
| 115 | + merged["artifacts"]["docker"] = workflow_inputs["docker"] |
| 116 | + if workflow_inputs["docs"]: |
| 117 | + # Handle docs carefully - don't override complex object with simple string |
| 118 | + if isinstance(merged["artifacts"]["docs"], dict): |
| 119 | + # If file config has complex docs object, only override mode if input is specific |
| 120 | + if workflow_inputs["docs"] in ["skip", "force"]: |
| 121 | + merged["artifacts"]["docs"]["mode"] = workflow_inputs["docs"] |
| 122 | + # Otherwise preserve the complex configuration from file |
| 123 | + else: |
| 124 | + # If file config has simple string, replace it |
| 125 | + merged["artifacts"]["docs"] = workflow_inputs["docs"] |
| 126 | + if workflow_inputs["notes"]: |
| 127 | + merged["notes"] = workflow_inputs["notes"] |
| 128 | + |
| 129 | + return merged |
| 130 | + |
| 131 | + |
| 132 | +def parse_docs_config(docs_artifact: Any) -> tuple[str, str, str]: |
| 133 | + """ |
| 134 | + Parse docs artifact and return (mode, sections, strategy). |
| 135 | +
|
| 136 | + Args: |
| 137 | + docs_artifact: Either a string or dict with docs configuration |
| 138 | +
|
| 139 | + Returns: |
| 140 | + Tuple of (mode, sections_json, strategy) |
| 141 | + """ |
| 142 | + if isinstance(docs_artifact, str): |
| 143 | + # Legacy string format - convert to new format |
| 144 | + if docs_artifact == "skip": |
| 145 | + return "skip", "[]", "all" |
| 146 | + elif docs_artifact == "auto": |
| 147 | + return "auto", '["docs", "dev"]', "all" |
| 148 | + else: |
| 149 | + # Assume it's a mode value |
| 150 | + return docs_artifact, '["docs", "dev"]', "all" |
| 151 | + |
| 152 | + elif isinstance(docs_artifact, dict): |
| 153 | + # New object format |
| 154 | + mode = docs_artifact.get("mode", "auto") |
| 155 | + sections = docs_artifact.get("sections", ["docs", "dev"]) |
| 156 | + strategy = docs_artifact.get("strategy", "all") |
| 157 | + |
| 158 | + # Convert sections list to JSON string for output |
| 159 | + sections_json = json.dumps(sections) |
| 160 | + |
| 161 | + return mode, sections_json, strategy |
| 162 | + |
| 163 | + else: |
| 164 | + # Fallback to defaults |
| 165 | + return "auto", '["docs", "dev"]', "all" |
| 166 | + |
| 167 | + |
| 168 | +def validate_intent(intent: ReleaseIntent, schema: SchemaType) -> None: |
| 169 | + """Validate the release intent against the JSON schema.""" |
| 170 | + try: |
| 171 | + validator = jsonschema.Draft202012Validator(schema) |
| 172 | + validator.validate(intent) |
| 173 | + except jsonschema.ValidationError as e: |
| 174 | + raise ReleaseIntentError(f"Intent validation failed: {e.message}") from e |
| 175 | + except (jsonschema.SchemaError, jsonschema.exceptions.UnknownType) as e: |
| 176 | + raise ReleaseIntentError(f"Schema validation failed: {e}") from e |
| 177 | + |
| 178 | + |
| 179 | +def write_github_outputs(intent: ReleaseIntent) -> None: |
| 180 | + """Write GitHub Action outputs to $GITHUB_OUTPUT.""" |
| 181 | + github_output = os.getenv("GITHUB_OUTPUT") |
| 182 | + if not github_output: |
| 183 | + # Running locally, skip GitHub output |
| 184 | + return |
| 185 | + |
| 186 | + # Parse docs configuration |
| 187 | + docs_mode, docs_sections, docs_strategy = parse_docs_config(intent["artifacts"]["docs"]) |
| 188 | + |
| 189 | + try: |
| 190 | + with open(github_output, "a", encoding="utf-8") as f: |
| 191 | + f.write(f"do_release={'true' if intent['release'] else 'false'}\n") |
| 192 | + f.write(f"level={intent['level']}\n") |
| 193 | + f.write(f"python={intent['artifacts']['python']}\n") |
| 194 | + f.write(f"docker={intent['artifacts']['docker']}\n") |
| 195 | + # Legacy docs output for backward compatibility |
| 196 | + if isinstance(intent["artifacts"]["docs"], str): |
| 197 | + f.write(f"docs={intent['artifacts']['docs']}\n") |
| 198 | + else: |
| 199 | + f.write(f"docs={docs_mode}\n") |
| 200 | + # New docs outputs |
| 201 | + f.write(f"docs_mode={docs_mode}\n") |
| 202 | + f.write(f"docs_sections={docs_sections}\n") |
| 203 | + f.write(f"docs_strategy={docs_strategy}\n") |
| 204 | + f.write(f"notes={intent['notes']}\n") |
| 205 | + except OSError as e: |
| 206 | + raise ReleaseIntentError(f"Failed to write GitHub outputs: {e}") from e |
| 207 | + |
| 208 | + |
| 209 | +def main() -> int: |
| 210 | + """ |
| 211 | + Main entry point for the release intent parser. |
| 212 | +
|
| 213 | + Returns: |
| 214 | + 0 on success (including when release is skipped) |
| 215 | + 1 on error |
| 216 | + """ |
| 217 | + try: |
| 218 | + # Load schema |
| 219 | + schema = load_schema() |
| 220 | + |
| 221 | + # Load intent file (may not exist) |
| 222 | + file_data = load_intent_file() |
| 223 | + |
| 224 | + # Get workflow dispatch inputs |
| 225 | + workflow_inputs = get_workflow_dispatch_inputs() |
| 226 | + |
| 227 | + # Get defaults and merge all data |
| 228 | + defaults = get_defaults() |
| 229 | + merged_intent = merge_intent_data(defaults, file_data, workflow_inputs) |
| 230 | + |
| 231 | + # Validate merged intent |
| 232 | + validate_intent(merged_intent, schema) |
| 233 | + |
| 234 | + # Output human-readable JSON to stdout |
| 235 | + print(json.dumps(merged_intent, indent=2, sort_keys=True)) |
| 236 | + |
| 237 | + # Write GitHub Action outputs |
| 238 | + write_github_outputs(merged_intent) |
| 239 | + |
| 240 | + return 0 |
| 241 | + |
| 242 | + except ReleaseIntentError as e: |
| 243 | + print(f"Error: {e}", file=sys.stderr) |
| 244 | + return 1 |
| 245 | + except Exception as e: |
| 246 | + print(f"Unexpected error: {e}", file=sys.stderr) |
| 247 | + return 1 |
| 248 | + |
| 249 | + |
| 250 | +if __name__ == "__main__": |
| 251 | + sys.exit(main()) |
0 commit comments