|
| 1 | +#!/usr/bin/env python3 |
| 2 | +""" |
| 3 | +CodeBuild Deployment Script |
| 4 | +
|
| 5 | +Handles IDP stack deployment and testing in AWS CodeBuild environment. |
| 6 | +""" |
| 7 | + |
| 8 | +import os |
| 9 | +import re |
| 10 | +import subprocess |
| 11 | +import sys |
| 12 | +from concurrent.futures import ThreadPoolExecutor, as_completed |
| 13 | +from datetime import datetime |
| 14 | + |
| 15 | +# Configuration for patterns to deploy |
| 16 | +DEPLOY_PATTERNS = [ |
| 17 | + { |
| 18 | + "name": "Pattern 1 - BDA", |
| 19 | + "id": "pattern-1", |
| 20 | + "suffix": "p1", |
| 21 | + "sample_file": "lending_package.pdf", |
| 22 | + "verify_string": "ANYTOWN, USA 12345", |
| 23 | + "result_location": "pages/0/result.json", |
| 24 | + "content_path": "pages.0.representation.markdown", |
| 25 | + }, |
| 26 | + { |
| 27 | + "name": "Pattern 2 - OCR + Bedrock", |
| 28 | + "id": "pattern-2", |
| 29 | + "suffix": "p2", |
| 30 | + "sample_file": "lending_package.pdf", |
| 31 | + "verify_string": "ANYTOWN, USA 12345", |
| 32 | + "result_location": "pages/1/result.json", |
| 33 | + "content_path": "text", |
| 34 | + }, |
| 35 | + # {"name": "Pattern 3 - UDOP + Bedrock", "id": "pattern-3", "suffix": "p3", "sample_file": "rvl_cdip_package.pdf", "verify_string": "WESTERN DARK FIRED TOBACCO GROWERS", "result_location": "pages/1/result.json", "content_path": "text"}, |
| 36 | +] |
| 37 | + |
| 38 | + |
| 39 | +def run_command(cmd, check=True): |
| 40 | + """Run shell command and return result""" |
| 41 | + print(f"Running: {cmd}") |
| 42 | + result = subprocess.run(cmd, shell=True, capture_output=True, text=True) |
| 43 | + if result.stdout: |
| 44 | + print(result.stdout) |
| 45 | + if result.stderr: |
| 46 | + print(result.stderr, file=sys.stderr) |
| 47 | + if check and result.returncode != 0: |
| 48 | + print(f"Command failed with exit code {result.returncode}") |
| 49 | + sys.exit(1) |
| 50 | + return result |
| 51 | + |
| 52 | + |
| 53 | +def get_env_var(name, default=None): |
| 54 | + """Get environment variable with optional default""" |
| 55 | + value = os.environ.get(name, default) |
| 56 | + if value is None: |
| 57 | + print(f"Error: Environment variable {name} is required") |
| 58 | + sys.exit(1) |
| 59 | + return value |
| 60 | + |
| 61 | + |
| 62 | +def generate_stack_prefix(): |
| 63 | + """Generate unique stack prefix with timestamp""" |
| 64 | + timestamp = datetime.now().strftime("%m%d-%H%M") # Shorter format: MMDD-HHMM |
| 65 | + return f"idp-{timestamp}" |
| 66 | + |
| 67 | + |
| 68 | +def publish_templates(): |
| 69 | + """Run publish.py to build and upload templates to S3""" |
| 70 | + print("📦 Publishing templates to S3...") |
| 71 | + |
| 72 | + # Get AWS account ID and region |
| 73 | + account_id = get_env_var("IDP_ACCOUNT_ID", "020432867916") |
| 74 | + region = get_env_var("AWS_DEFAULT_REGION", "us-east-1") |
| 75 | + |
| 76 | + # Generate bucket name and prefix |
| 77 | + bucket_basename = f"idp-sdlc-sourcecode-{account_id}-{region}" |
| 78 | + prefix = f"codebuild-{datetime.now().strftime('%Y%m%d-%H%M%S')}" |
| 79 | + |
| 80 | + # Run publish.sh |
| 81 | + cmd = f"./publish.sh {bucket_basename} {prefix} {region}" |
| 82 | + result = run_command(cmd) |
| 83 | + |
| 84 | + # Extract template URL from output - match S3 URLs only |
| 85 | + template_url_pattern = r"https://s3\..*?idp-main\.yaml" |
| 86 | + |
| 87 | + # Remove line breaks that might split the URL in terminal output |
| 88 | + clean_stdout = result.stdout.replace('\n', '').replace('\r', '') |
| 89 | + template_url_match = re.search(template_url_pattern, clean_stdout) |
| 90 | + |
| 91 | + if template_url_match: |
| 92 | + template_url = template_url_match.group(0) |
| 93 | + print(f"✅ Template published: {template_url}") |
| 94 | + return template_url |
| 95 | + else: |
| 96 | + print("❌ Failed to extract template URL from publish output") |
| 97 | + sys.exit(1) |
| 98 | + |
| 99 | + |
| 100 | +def deploy_and_test_pattern(stack_prefix, pattern_config, admin_email, template_url): |
| 101 | + """Deploy and test a specific IDP pattern""" |
| 102 | + pattern_name = pattern_config["name"] |
| 103 | + pattern_id = pattern_config["id"] |
| 104 | + pattern_suffix = pattern_config["suffix"] |
| 105 | + sample_file = pattern_config["sample_file"] |
| 106 | + verify_string = pattern_config["verify_string"] |
| 107 | + result_location = pattern_config["result_location"] |
| 108 | + content_path = pattern_config["content_path"] |
| 109 | + |
| 110 | + stack_name = f"{stack_prefix}-{pattern_suffix}" |
| 111 | + batch_id = f"test-{pattern_suffix}" |
| 112 | + |
| 113 | + print(f"[{pattern_name}] Starting deployment: {stack_name}") |
| 114 | + |
| 115 | + try: |
| 116 | + # Step 1: Deploy using template URL |
| 117 | + print(f"[{pattern_name}] Step 1: Deploying stack...") |
| 118 | + cmd = f"idp-cli deploy --stack-name {stack_name} --template-url {template_url} --pattern {pattern_id} --admin-email {admin_email} --wait" |
| 119 | + run_command(cmd) |
| 120 | + print(f"[{pattern_name}] ✅ Deployment completed") |
| 121 | + |
| 122 | + # Step 2: Test stack status |
| 123 | + print(f"[{pattern_name}] Step 2: Verifying stack status...") |
| 124 | + cmd = f"aws cloudformation describe-stacks --stack-name {stack_name} --query 'Stacks[0].StackStatus' --output text" |
| 125 | + result = run_command(cmd) |
| 126 | + |
| 127 | + if "COMPLETE" not in result.stdout: |
| 128 | + print(f"[{pattern_name}] ❌ Stack status: {result.stdout.strip()}") |
| 129 | + return { |
| 130 | + "stack_name": stack_name, |
| 131 | + "pattern_name": pattern_name, |
| 132 | + "success": False, |
| 133 | + } |
| 134 | + |
| 135 | + print(f"[{pattern_name}] ✅ Stack is healthy") |
| 136 | + |
| 137 | + # Step 3: Run inference test |
| 138 | + print(f"[{pattern_name}] Step 3: Running inference test with {sample_file}...") |
| 139 | + cmd = f"idp-cli run-inference --stack-name {stack_name} --dir samples --file-pattern {sample_file} --batch-id {batch_id} --monitor" |
| 140 | + run_command(cmd) |
| 141 | + print(f"[{pattern_name}] ✅ Inference completed") |
| 142 | + |
| 143 | + # Step 4: Download and verify results |
| 144 | + print(f"[{pattern_name}] Step 4: Downloading results...") |
| 145 | + results_dir = f"/tmp/results-{pattern_suffix}" |
| 146 | + |
| 147 | + cmd = f"idp-cli download-results --stack-name {stack_name} --batch-id {batch_id} --output-dir {results_dir}" |
| 148 | + run_command(cmd) |
| 149 | + |
| 150 | + # Step 5: Verify result content |
| 151 | + print(f"[{pattern_name}] Step 5: Verifying result content...") |
| 152 | + |
| 153 | + # Find the result file at the specified location |
| 154 | + cmd = f"find {results_dir} -path '*/{result_location}' | head -1" |
| 155 | + result = run_command(cmd) |
| 156 | + result_file = result.stdout.strip() |
| 157 | + |
| 158 | + if not result_file: |
| 159 | + print(f"[{pattern_name}] ❌ No result file found at {result_location}") |
| 160 | + return { |
| 161 | + "stack_name": stack_name, |
| 162 | + "pattern_name": pattern_name, |
| 163 | + "success": False, |
| 164 | + } |
| 165 | + |
| 166 | + # Verify the result file contains expected content |
| 167 | + try: |
| 168 | + import json |
| 169 | + |
| 170 | + with open(result_file, "r") as f: |
| 171 | + result_json = json.load(f) |
| 172 | + |
| 173 | + # Extract text content using the specified path |
| 174 | + text_content = result_json |
| 175 | + for key in content_path.split("."): |
| 176 | + if key.isdigit(): |
| 177 | + text_content = text_content[int(key)] |
| 178 | + else: |
| 179 | + text_content = text_content[key] |
| 180 | + |
| 181 | + # Verify expected string in content |
| 182 | + if verify_string not in text_content: |
| 183 | + print( |
| 184 | + f"[{pattern_name}] ❌ Text content does not contain expected string: '{verify_string}'" |
| 185 | + ) |
| 186 | + print( |
| 187 | + f"[{pattern_name}] Actual text starts with: '{text_content[:100]}...'" |
| 188 | + ) |
| 189 | + return { |
| 190 | + "stack_name": stack_name, |
| 191 | + "pattern_name": pattern_name, |
| 192 | + "success": False, |
| 193 | + } |
| 194 | + |
| 195 | + print( |
| 196 | + f"[{pattern_name}] ✅ Found expected verification string: '{verify_string}'" |
| 197 | + ) |
| 198 | + return { |
| 199 | + "stack_name": stack_name, |
| 200 | + "pattern_name": pattern_name, |
| 201 | + "success": True, |
| 202 | + } |
| 203 | + |
| 204 | + except Exception as e: |
| 205 | + print(f"[{pattern_name}] ❌ Failed to validate result content: {e}") |
| 206 | + return { |
| 207 | + "stack_name": stack_name, |
| 208 | + "pattern_name": pattern_name, |
| 209 | + "success": False, |
| 210 | + } |
| 211 | + |
| 212 | + except Exception as e: |
| 213 | + print(f"[{pattern_name}] ❌ Testing failed: {e}") |
| 214 | + return { |
| 215 | + "stack_name": stack_name, |
| 216 | + "pattern_name": pattern_name, |
| 217 | + "success": False, |
| 218 | + } |
| 219 | + |
| 220 | + |
| 221 | +def cleanup_stack(stack_name, pattern_name): |
| 222 | + """Clean up a deployed stack""" |
| 223 | + print(f"[{pattern_name}] Cleaning up: {stack_name}") |
| 224 | + try: |
| 225 | + run_command(f"idp-cli delete --stack-name {stack_name} --force", check=False) |
| 226 | + print(f"[{pattern_name}] ✅ Cleanup completed") |
| 227 | + except Exception as e: |
| 228 | + print(f"[{pattern_name}] ⚠️ Cleanup failed: {e}") |
| 229 | + |
| 230 | + |
| 231 | +def main(): |
| 232 | + """Main execution function""" |
| 233 | + print("Starting CodeBuild deployment process...") |
| 234 | + |
| 235 | + admin_email = get_env_var( "IDP_ADMIN_EMAIL", "[email protected]") |
| 236 | + stack_prefix = generate_stack_prefix() |
| 237 | + |
| 238 | + print(f"Stack Prefix: {stack_prefix}") |
| 239 | + print(f"Admin Email: {admin_email}") |
| 240 | + print(f"Patterns to deploy: {[p['name'] for p in DEPLOY_PATTERNS]}") |
| 241 | + |
| 242 | + # Step 1: Publish templates to S3 |
| 243 | + template_url = publish_templates() |
| 244 | + |
| 245 | + deployed_stacks = [] |
| 246 | + all_success = True |
| 247 | + |
| 248 | + # Step 2: Deploy and test patterns concurrently |
| 249 | + print("🚀 Starting concurrent deployment of all patterns...") |
| 250 | + with ThreadPoolExecutor(max_workers=len(DEPLOY_PATTERNS)) as executor: |
| 251 | + # Submit all deployment tasks |
| 252 | + future_to_pattern = { |
| 253 | + executor.submit( |
| 254 | + deploy_and_test_pattern, |
| 255 | + stack_prefix, |
| 256 | + pattern_config, |
| 257 | + admin_email, |
| 258 | + template_url, |
| 259 | + ): pattern_config |
| 260 | + for pattern_config in DEPLOY_PATTERNS |
| 261 | + } |
| 262 | + |
| 263 | + # Collect results as they complete |
| 264 | + for future in as_completed(future_to_pattern): |
| 265 | + pattern_config = future_to_pattern[future] |
| 266 | + try: |
| 267 | + result = future.result() |
| 268 | + deployed_stacks.append(result) |
| 269 | + if not result["success"]: |
| 270 | + all_success = False |
| 271 | + print(f"[{pattern_config['name']}] ❌ Failed") |
| 272 | + else: |
| 273 | + print(f"[{pattern_config['name']}] ✅ Success") |
| 274 | + except Exception as e: |
| 275 | + print(f"[{pattern_config['name']}] ❌ Exception: {e}") |
| 276 | + all_success = False |
| 277 | + |
| 278 | + # Step 3: Cleanup all stacks concurrently |
| 279 | + print("🧹 Starting concurrent cleanup of all stacks...") |
| 280 | + with ThreadPoolExecutor(max_workers=len(deployed_stacks)) as executor: |
| 281 | + cleanup_futures = [ |
| 282 | + executor.submit(cleanup_stack, result["stack_name"], result["pattern_name"]) |
| 283 | + for result in deployed_stacks |
| 284 | + ] |
| 285 | + |
| 286 | + # Wait for all cleanups to complete |
| 287 | + for future in as_completed(cleanup_futures): |
| 288 | + future.result() # Wait for completion |
| 289 | + |
| 290 | + if all_success: |
| 291 | + print("🎉 All pattern deployments completed successfully!") |
| 292 | + sys.exit(0) |
| 293 | + else: |
| 294 | + print("💥 Some deployments failed!") |
| 295 | + sys.exit(1) |
| 296 | + |
| 297 | + |
| 298 | +if __name__ == "__main__": |
| 299 | + main() |
0 commit comments