|
| 1 | +""" |
| 2 | +Lambda function to deploy the RealKIE-FCC-Verified dataset from HuggingFace |
| 3 | +to the TestSetBucket during stack deployment. |
| 4 | +""" |
| 5 | + |
| 6 | +import json |
| 7 | +import os |
| 8 | +import logging |
| 9 | +import boto3 |
| 10 | +from datetime import datetime |
| 11 | +from typing import Dict, Any |
| 12 | +import cfnresponse |
| 13 | + |
| 14 | +# HuggingFace datasets library - will fail fast if not available |
| 15 | +from datasets import load_dataset |
| 16 | +from huggingface_hub import hf_hub_download |
| 17 | + |
| 18 | +# Configure logging |
| 19 | +logger = logging.getLogger() |
| 20 | +logger.setLevel(os.environ.get('LOG_LEVEL', 'INFO')) |
| 21 | + |
| 22 | +# AWS clients |
| 23 | +s3_client = boto3.client('s3') |
| 24 | +dynamodb = boto3.resource('dynamodb') |
| 25 | + |
| 26 | +# Environment variables |
| 27 | +TESTSET_BUCKET = os.environ.get('TESTSET_BUCKET') |
| 28 | +TRACKING_TABLE = os.environ.get('TRACKING_TABLE') |
| 29 | + |
| 30 | +# Constants |
| 31 | +DATASET_NAME = 'RealKIE-FCC-Verified' |
| 32 | +DATASET_PREFIX = 'realkie-fcc-verified/' |
| 33 | +TEST_SET_ID = 'realkie-fcc-verified' |
| 34 | + |
| 35 | + |
| 36 | +def handler(event, context): |
| 37 | + """ |
| 38 | + Main Lambda handler for deploying the FCC dataset. |
| 39 | + """ |
| 40 | + logger.info(f"Event: {json.dumps(event)}") |
| 41 | + |
| 42 | + try: |
| 43 | + request_type = event['RequestType'] |
| 44 | + |
| 45 | + if request_type == 'Delete': |
| 46 | + # On stack deletion, we optionally clean up |
| 47 | + # For now, we'll leave the data in place |
| 48 | + logger.info("Delete request - keeping dataset in place") |
| 49 | + cfnresponse.send(event, context, cfnresponse.SUCCESS, {}) |
| 50 | + return |
| 51 | + |
| 52 | + # Extract properties |
| 53 | + properties = event['ResourceProperties'] |
| 54 | + dataset_version = properties.get('DatasetVersion', '1.0') |
| 55 | + dataset_description = properties.get('DatasetDescription', '') |
| 56 | + |
| 57 | + logger.info(f"Processing dataset version: {dataset_version}") |
| 58 | + |
| 59 | + # Check if dataset already exists with this version |
| 60 | + if check_existing_version(dataset_version): |
| 61 | + logger.info(f"Dataset version {dataset_version} already deployed, skipping") |
| 62 | + cfnresponse.send(event, context, cfnresponse.SUCCESS, { |
| 63 | + 'Message': f'Dataset version {dataset_version} already exists' |
| 64 | + }) |
| 65 | + return |
| 66 | + |
| 67 | + # Download and deploy the dataset |
| 68 | + result = deploy_dataset(dataset_version, dataset_description) |
| 69 | + |
| 70 | + logger.info(f"Dataset deployment completed: {result}") |
| 71 | + cfnresponse.send(event, context, cfnresponse.SUCCESS, result) |
| 72 | + |
| 73 | + except Exception as e: |
| 74 | + logger.error(f"Error deploying dataset: {str(e)}", exc_info=True) |
| 75 | + cfnresponse.send(event, context, cfnresponse.FAILED, {}, |
| 76 | + reason=f"Error deploying dataset: {str(e)}") |
| 77 | + |
| 78 | + |
| 79 | +def check_existing_version(version: str) -> bool: |
| 80 | + """ |
| 81 | + Check if the dataset with the specified version already exists. |
| 82 | + """ |
| 83 | + try: |
| 84 | + table = dynamodb.Table(TRACKING_TABLE) |
| 85 | + response = table.get_item( |
| 86 | + Key={ |
| 87 | + 'PK': f'testset#{TEST_SET_ID}', |
| 88 | + 'SK': 'metadata' |
| 89 | + } |
| 90 | + ) |
| 91 | + |
| 92 | + if 'Item' in response: |
| 93 | + existing_version = response['Item'].get('datasetVersion', '') |
| 94 | + logger.info(f"Found existing dataset version: {existing_version}") |
| 95 | + |
| 96 | + # Check if version matches and files exist |
| 97 | + if existing_version == version: |
| 98 | + # Verify at least some files exist in S3 |
| 99 | + try: |
| 100 | + response = s3_client.list_objects_v2( |
| 101 | + Bucket=TESTSET_BUCKET, |
| 102 | + Prefix=f'{DATASET_PREFIX}input/', |
| 103 | + MaxKeys=1 |
| 104 | + ) |
| 105 | + if response.get('KeyCount', 0) > 0: |
| 106 | + logger.info("Files exist in S3, skipping deployment") |
| 107 | + return True |
| 108 | + except Exception as e: |
| 109 | + logger.warning(f"Error checking S3 files: {e}") |
| 110 | + |
| 111 | + return False |
| 112 | + |
| 113 | + except Exception as e: |
| 114 | + logger.warning(f"Error checking existing version: {e}") |
| 115 | + return False |
| 116 | + |
| 117 | + |
| 118 | +def deploy_dataset(version: str, description: str) -> Dict[str, Any]: |
| 119 | + """ |
| 120 | + Deploy the dataset by downloading PDFs and ground truth from HuggingFace |
| 121 | + and uploading to S3. |
| 122 | + """ |
| 123 | + try: |
| 124 | + logger.info(f"Downloading dataset from HuggingFace: amazon-agi/RealKIE-FCC-Verified") |
| 125 | + |
| 126 | + # Download the dataset metadata (for ground truth) |
| 127 | + dataset = load_dataset("amazon-agi/RealKIE-FCC-Verified", split='test') |
| 128 | + |
| 129 | + logger.info(f"Dataset loaded with {len(dataset)} documents") |
| 130 | + |
| 131 | + # Process and upload each document |
| 132 | + file_count = 0 |
| 133 | + skipped_count = 0 |
| 134 | + |
| 135 | + for idx, item in enumerate(dataset): |
| 136 | + try: |
| 137 | + document_id = item.get('id', f'doc_{idx}') |
| 138 | + |
| 139 | + # Get ground truth from json_response field |
| 140 | + json_response = item.get('json_response', {}) |
| 141 | + if not json_response: |
| 142 | + logger.warning(f"Skipping {document_id}: no json_response") |
| 143 | + skipped_count += 1 |
| 144 | + continue |
| 145 | + |
| 146 | + logger.info(f"Processing {document_id}") |
| 147 | + |
| 148 | + # Download PDF file from HuggingFace repository |
| 149 | + # PDFs are stored in the /pdfs directory of the dataset repo |
| 150 | + try: |
| 151 | + pdf_path = hf_hub_download( |
| 152 | + repo_id="amazon-agi/RealKIE-FCC-Verified", |
| 153 | + filename=f"pdfs/{document_id}", |
| 154 | + repo_type="dataset" |
| 155 | + ) |
| 156 | + |
| 157 | + # Read the downloaded PDF |
| 158 | + with open(pdf_path, 'rb') as f: |
| 159 | + pdf_bytes = f.read() |
| 160 | + |
| 161 | + logger.info(f"Downloaded PDF for {document_id} ({len(pdf_bytes):,} bytes)") |
| 162 | + |
| 163 | + # Upload PDF to input folder |
| 164 | + pdf_key = f'{DATASET_PREFIX}input/{document_id}' |
| 165 | + s3_client.put_object( |
| 166 | + Bucket=TESTSET_BUCKET, |
| 167 | + Key=pdf_key, |
| 168 | + Body=pdf_bytes, |
| 169 | + ContentType='application/pdf' |
| 170 | + ) |
| 171 | + |
| 172 | + except Exception as e: |
| 173 | + logger.error(f"Error downloading/uploading PDF for {document_id}: {e}") |
| 174 | + skipped_count += 1 |
| 175 | + continue |
| 176 | + |
| 177 | + # Upload ground truth baseline (already in correct format!) |
| 178 | + result_json = {"inference_result": json_response} |
| 179 | + result_key = f'{DATASET_PREFIX}baseline/{document_id}/sections/1/result.json' |
| 180 | + s3_client.put_object( |
| 181 | + Bucket=TESTSET_BUCKET, |
| 182 | + Key=result_key, |
| 183 | + Body=json.dumps(result_json, indent=2), |
| 184 | + ContentType='application/json' |
| 185 | + ) |
| 186 | + |
| 187 | + file_count += 1 |
| 188 | + |
| 189 | + if file_count % 10 == 0: |
| 190 | + logger.info(f"Processed {file_count}/{len(dataset)} documents...") |
| 191 | + |
| 192 | + except Exception as e: |
| 193 | + logger.error(f"Error processing document {idx} ({document_id}): {e}") |
| 194 | + skipped_count += 1 |
| 195 | + continue |
| 196 | + |
| 197 | + logger.info(f"Successfully deployed {file_count} documents (skipped {skipped_count})") |
| 198 | + |
| 199 | + # Create test set record in DynamoDB |
| 200 | + create_testset_record(version, description, file_count) |
| 201 | + |
| 202 | + return { |
| 203 | + 'DatasetVersion': version, |
| 204 | + 'FileCount': file_count, |
| 205 | + 'SkippedCount': skipped_count, |
| 206 | + 'Message': f'Successfully deployed {file_count} documents with PDFs and baseline files' |
| 207 | + } |
| 208 | + |
| 209 | + except Exception as e: |
| 210 | + logger.error(f"Error deploying dataset: {e}", exc_info=True) |
| 211 | + raise |
| 212 | + |
| 213 | + |
| 214 | +def create_testset_record(version: str, description: str, file_count: int): |
| 215 | + """ |
| 216 | + Create or update the test set record in DynamoDB. |
| 217 | + """ |
| 218 | + table = dynamodb.Table(TRACKING_TABLE) |
| 219 | + timestamp = datetime.utcnow().isoformat() + 'Z' |
| 220 | + |
| 221 | + item = { |
| 222 | + 'PK': f'testset#{TEST_SET_ID}', |
| 223 | + 'SK': 'metadata', |
| 224 | + 'testSetId': TEST_SET_ID, |
| 225 | + 'name': DATASET_NAME, |
| 226 | + 'description': description, |
| 227 | + 'bucketType': 'testset', |
| 228 | + 'bucketName': TESTSET_BUCKET, |
| 229 | + 'inputPrefix': f'{DATASET_PREFIX}input/', |
| 230 | + 'baselinePrefix': f'{DATASET_PREFIX}baseline/', |
| 231 | + 'fileCount': file_count, |
| 232 | + 'status': 'COMPLETED', |
| 233 | + 'datasetVersion': version, |
| 234 | + 'createdAt': timestamp, |
| 235 | + 'updatedAt': timestamp, |
| 236 | + 'source': 'huggingface:amazon-agi/RealKIE-FCC-Verified', |
| 237 | + 'ExpiresAfter': int((datetime.utcnow().timestamp() + (365 * 24 * 60 * 60))) # 1 year TTL |
| 238 | + } |
| 239 | + |
| 240 | + table.put_item(Item=item) |
| 241 | + logger.info(f"Created test set record in DynamoDB: {TEST_SET_ID}") |
0 commit comments