|
1 | 1 | import json
|
2 | 2 | import time
|
3 | 3 | import logging
|
| 4 | +from datetime import datetime, timezone, timedelta |
4 | 5 | from typing import Any, List, Dict, Optional
|
5 | 6 |
|
6 | 7 | import httpx
|
@@ -157,12 +158,64 @@ async def execute_snowflake_query(sql: str, snowflake_token: Optional[str] = Non
|
157 | 158 | track_snowflake_query(start_time, success)
|
158 | 159 |
|
159 | 160 |
|
| 161 | +def parse_snowflake_timestamp(timestamp_str: str) -> str: |
| 162 | + """Parse Snowflake timestamp format and convert to ISO format""" |
| 163 | + if not timestamp_str or not isinstance(timestamp_str, str): |
| 164 | + return timestamp_str |
| 165 | + |
| 166 | + try: |
| 167 | + # Handle format like "1753767533.658000000 1440" |
| 168 | + parts = timestamp_str.strip().split() |
| 169 | + if len(parts) >= 2: |
| 170 | + timestamp_part = parts[0] |
| 171 | + timezone_offset_minutes = int(parts[1]) |
| 172 | + |
| 173 | + # Convert to float to handle decimal seconds |
| 174 | + timestamp_float = float(timestamp_part) |
| 175 | + |
| 176 | + # Create datetime from timestamp |
| 177 | + dt = datetime.fromtimestamp(timestamp_float, tz=timezone.utc) |
| 178 | + |
| 179 | + # Apply timezone offset (offset is in minutes) |
| 180 | + offset_timedelta = timedelta(minutes=timezone_offset_minutes) |
| 181 | + dt_with_offset = dt + offset_timedelta |
| 182 | + |
| 183 | + # Return in ISO format |
| 184 | + return dt_with_offset.isoformat() |
| 185 | + else: |
| 186 | + # Try parsing as simple timestamp |
| 187 | + timestamp_float = float(timestamp_str) |
| 188 | + dt = datetime.fromtimestamp(timestamp_float, tz=timezone.utc) |
| 189 | + return dt.isoformat() |
| 190 | + |
| 191 | + except (ValueError, TypeError) as e: |
| 192 | + logger.debug(f"Could not parse timestamp '{timestamp_str}': {e}") |
| 193 | + return timestamp_str |
| 194 | + |
| 195 | + |
160 | 196 | def format_snowflake_row(row_data: List[Any], columns: List[str]) -> Dict[str, Any]:
|
161 | 197 | """Convert Snowflake row data to dictionary using column names"""
|
162 | 198 | if len(row_data) != len(columns):
|
163 | 199 | return {}
|
164 | 200 |
|
165 |
| - return {columns[i]: row_data[i] for i in range(len(columns))} |
| 201 | + result = {} |
| 202 | + # Date/time columns that should be parsed |
| 203 | + timestamp_columns = { |
| 204 | + 'CREATED', 'UPDATED', 'DUEDATE', 'RESOLUTIONDATE', |
| 205 | + 'ARCHIVEDDATE', '_FIVETRAN_SYNCED' |
| 206 | + } |
| 207 | + |
| 208 | + for i in range(len(columns)): |
| 209 | + column_name = columns[i].upper() |
| 210 | + value = row_data[i] |
| 211 | + |
| 212 | + # Parse timestamp columns |
| 213 | + if column_name in timestamp_columns and value: |
| 214 | + result[columns[i]] = parse_snowflake_timestamp(str(value)) |
| 215 | + else: |
| 216 | + result[columns[i]] = value |
| 217 | + |
| 218 | + return result |
166 | 219 |
|
167 | 220 |
|
168 | 221 | async def get_issue_labels(issue_ids: List[str], snowflake_token: Optional[str] = None) -> Dict[str, List[str]]:
|
|
0 commit comments