Skip to content

Commit 3d212a2

Browse files
committed
Enhance video generation logging and testing:
- Add detailed logging for video job processing in api_server.py - Implement synchronous and asynchronous video generation tests in test_endpoints.py - Introduce a debug response analyzer for inspecting JSON structures - Create comprehensive debug test for async video generation - Enable debug logging configuration for troubleshooting
1 parent 26e0cff commit 3d212a2

File tree

10 files changed

+493
-119
lines changed

10 files changed

+493
-119
lines changed

.github/workflows/publish.yml

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
name: Publish metaai-sdk to PyPI
2+
3+
on:
4+
push:
5+
tags:
6+
- "v*"
7+
8+
permissions:
9+
contents: read
10+
id-token: write
11+
12+
jobs:
13+
publish:
14+
runs-on: ubuntu-latest
15+
16+
steps:
17+
- uses: actions/checkout@v4
18+
with:
19+
fetch-depth: 0
20+
21+
- uses: actions/setup-python@v5
22+
with:
23+
python-version: "3.11"
24+
25+
- name: Build
26+
run: |
27+
pip install build setuptools_scm
28+
python -m build
29+
30+
- name: Publish to PyPI
31+
uses: pypa/gh-action-pypi-publish@release/v1

.github/workflows/python-publish.yml

Lines changed: 0 additions & 35 deletions
This file was deleted.

analyze_debug_responses.py

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
"""
2+
Analyze debug response files to understand the raw JSON structure.
3+
This helps us see EXACTLY what Meta AI is returning.
4+
"""
5+
6+
import json
7+
import os
8+
import re
9+
from pathlib import Path
10+
11+
DEBUG_DIR = Path("debug_responses")
12+
13+
def analyze_response_file(filepath):
14+
"""Analyze a single debug response file."""
15+
print(f"\n{'='*80}")
16+
print(f"FILE: {filepath.name}")
17+
print(f"{'='*80}")
18+
19+
try:
20+
with open(filepath, 'r', encoding='utf-8') as f:
21+
content = f.read()
22+
23+
# Try to parse as JSON
24+
try:
25+
data = json.loads(content)
26+
print(f"✓ Valid JSON ({len(content)} characters)")
27+
28+
# Look for video-related keys
29+
video_keys = []
30+
def find_video_keys(obj, path=""):
31+
if isinstance(obj, dict):
32+
for key, value in obj.items():
33+
current_path = f"{path}.{key}" if path else key
34+
if 'video' in key.lower() or 'url' in key.lower():
35+
video_keys.append((current_path, type(value).__name__, str(value)[:100]))
36+
find_video_keys(value, current_path)
37+
elif isinstance(obj, list):
38+
for idx, item in enumerate(obj):
39+
find_video_keys(item, f"{path}[{idx}]")
40+
41+
find_video_keys(data)
42+
43+
if video_keys:
44+
print(f"\nFound {len(video_keys)} video-related keys:")
45+
for path, vtype, sample in video_keys:
46+
print(f" • {path} ({vtype})")
47+
if vtype == 'str' and ('http' in sample or '.mp4' in sample):
48+
print(f" URL: {sample}")
49+
50+
# Look for URLs in the raw text
51+
urls = re.findall(r'https?://[^\s\'"]+\.mp4[^\s\'"]*', content)
52+
if urls:
53+
print(f"\nFound {len(urls)} .mp4 URLs via regex:")
54+
for idx, url in enumerate(urls[:5], 1): # Show first 5
55+
print(f" {idx}. {url[:80]}...")
56+
57+
# Look for video identifiers
58+
video_ids = re.findall(r'/v/[a-zA-Z0-9_-]+|/t6/[a-zA-Z0-9_-]+', content)
59+
if video_ids:
60+
print(f"\nFound {len(video_ids)} video identifier paths:")
61+
for vid_id in set(video_ids[:5]):
62+
print(f" • {vid_id}")
63+
64+
except json.JSONDecodeError as e:
65+
print(f"✗ Invalid JSON: {e}")
66+
print(f"\nFirst 500 characters:")
67+
print(content[:500])
68+
69+
except Exception as e:
70+
print(f"✗ Error reading file: {e}")
71+
72+
def main():
73+
print("\n" + "="*80)
74+
print("DEBUG RESPONSE ANALYZER")
75+
print("="*80)
76+
77+
if not DEBUG_DIR.exists():
78+
print(f"\n✗ Directory not found: {DEBUG_DIR}")
79+
return
80+
81+
response_files = sorted(DEBUG_DIR.glob("response_*.txt"))
82+
83+
if not response_files:
84+
print(f"\n✗ No response files found in {DEBUG_DIR}")
85+
return
86+
87+
print(f"\nFound {len(response_files)} debug response files")
88+
89+
for filepath in response_files:
90+
analyze_response_file(filepath)
91+
92+
print("\n" + "="*80)
93+
print("ANALYSIS COMPLETE")
94+
print("="*80)
95+
print("\nThis shows the EXACT structure of Meta AI responses.")
96+
print("Use this to understand what fields contain video URLs.\n")
97+
98+
if __name__ == "__main__":
99+
main()

debug_async_video_test.py

Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
"""
2+
Comprehensive debug test for async video generation with full tracing.
3+
This script will show EXACTLY what's happening at each step.
4+
"""
5+
6+
import requests
7+
import json
8+
import time
9+
import logging
10+
11+
# Enable detailed logging
12+
logging.basicConfig(
13+
level=logging.DEBUG,
14+
format='%(asctime)s - [%(levelname)s] %(message)s',
15+
)
16+
17+
logger = logging.getLogger(__name__)
18+
19+
API_URL = "http://localhost:8000"
20+
IMAGE_PATH = r"C:\Users\spike\Downloads\meta-ai-api-main\ChatGPT Image Jan 14, 2026, 06_59_02 PM.png"
21+
22+
def test_async_video_generation():
23+
"""Test async video generation with comprehensive debugging."""
24+
25+
print("\n" + "=" * 80)
26+
print("COMPREHENSIVE DEBUG TEST - ASYNC VIDEO GENERATION")
27+
print("=" * 80)
28+
29+
# Step 1: Upload image
30+
logger.info("STEP 1: Uploading image...")
31+
with open(IMAGE_PATH, 'rb') as f:
32+
upload_response = requests.post(f"{API_URL}/upload", files={'file': f})
33+
34+
if upload_response.status_code != 200:
35+
logger.error(f"Upload failed: {upload_response.text}")
36+
return
37+
38+
upload_result = upload_response.json()
39+
media_id = upload_result['media_id']
40+
file_size = upload_result['file_size']
41+
mime_type = upload_result['mime_type']
42+
43+
logger.info(f"✓ Upload successful")
44+
logger.info(f" Media ID: {media_id}")
45+
logger.info(f" File Size: {file_size}")
46+
logger.info(f" MIME Type: {mime_type}")
47+
48+
# Step 2: Submit async video job
49+
logger.info("\nSTEP 2: Submitting async video job...")
50+
51+
payload = {
52+
"prompt": "generate a video with cinematic zoom effect",
53+
"media_ids": [media_id],
54+
"attachment_metadata": {
55+
"file_size": file_size,
56+
"mime_type": mime_type
57+
},
58+
"wait_before_poll": 10,
59+
"max_attempts": 30,
60+
"wait_seconds": 5,
61+
"verbose": False
62+
}
63+
64+
logger.debug(f"Payload: {json.dumps(payload, indent=2)}")
65+
66+
job_response = requests.post(
67+
f"{API_URL}/video/async",
68+
headers={"Content-Type": "application/json"},
69+
data=json.dumps(payload)
70+
)
71+
72+
if job_response.status_code != 200:
73+
logger.error(f"Job submission failed: {job_response.text}")
74+
return
75+
76+
job_result = job_response.json()
77+
job_id = job_result['job_id']
78+
initial_status = job_result['status']
79+
80+
logger.info(f"✓ Job submitted")
81+
logger.info(f" Job ID: {job_id}")
82+
logger.info(f" Initial Status: {initial_status}")
83+
84+
# Step 3: Poll job status
85+
logger.info("\nSTEP 3: Polling job status...")
86+
87+
max_polls = 20
88+
poll_interval = 3
89+
90+
for poll_num in range(1, max_polls + 1):
91+
time.sleep(poll_interval)
92+
93+
logger.info(f"\n--- POLL #{poll_num}/{max_polls} ---")
94+
95+
status_response = requests.get(f"{API_URL}/video/jobs/{job_id}")
96+
97+
if status_response.status_code != 200:
98+
logger.error(f"Failed to get job status: {status_response.text}")
99+
break
100+
101+
job_status = status_response.json()
102+
current_status = job_status['status']
103+
104+
logger.info(f"Status: {current_status}")
105+
logger.info(f"Created: {job_status['created_at']}")
106+
logger.info(f"Updated: {job_status['updated_at']}")
107+
108+
if current_status == 'succeeded':
109+
logger.info("\n✓✓✓ JOB SUCCEEDED ✓✓✓")
110+
result = job_status.get('result', {})
111+
112+
logger.info("\nResult Details:")
113+
logger.info(f" Success Flag: {result.get('success')}")
114+
logger.info(f" Conversation ID: {result.get('conversation_id')}")
115+
logger.info(f" Prompt: {result.get('prompt')}")
116+
logger.info(f" Timestamp: {result.get('timestamp')}")
117+
118+
video_urls = result.get('video_urls', [])
119+
logger.info(f"\n Video URLs Count: {len(video_urls)}")
120+
121+
if video_urls:
122+
for idx, url in enumerate(video_urls, 1):
123+
logger.info(f"\n VIDEO {idx}:")
124+
logger.info(f" Full URL: {url}")
125+
logger.info(f" Length: {len(url)} characters")
126+
logger.info(f" Has .mp4: {'.mp4' in url}")
127+
logger.info(f" Has fbcdn: {'fbcdn' in url}")
128+
logger.info(f" Has /v/: {'/v/' in url}")
129+
logger.info(f" Has /t6/: {'/t6/' in url}")
130+
else:
131+
logger.warning("\n ⚠ NO VIDEO URLs FOUND!")
132+
logger.warning(" This means URL extraction failed even though generation succeeded")
133+
134+
logger.debug(f"\nFull Job Status JSON:\n{json.dumps(job_status, indent=2)}")
135+
break
136+
137+
elif current_status == 'failed':
138+
logger.error("\n✗✗✗ JOB FAILED ✗✗✗")
139+
error = job_status.get('error', 'Unknown error')
140+
logger.error(f"Error: {error}")
141+
logger.debug(f"\nFull Job Status JSON:\n{json.dumps(job_status, indent=2)}")
142+
break
143+
144+
elif current_status in ['pending', 'running']:
145+
logger.info(f"⏳ Job still {current_status}... waiting {poll_interval}s")
146+
else:
147+
logger.warning(f"Unknown status: {current_status}")
148+
else:
149+
logger.warning(f"\n⚠ Reached max polls ({max_polls}). Job may still be processing.")
150+
logger.info(f"Check manually: GET {API_URL}/video/jobs/{job_id}")
151+
152+
print("\n" + "=" * 80)
153+
print("DEBUG TEST COMPLETE")
154+
print("=" * 80)
155+
print("\nCheck meta_ai_debug.log for server-side logs")
156+
print()
157+
158+
if __name__ == "__main__":
159+
try:
160+
test_async_video_generation()
161+
except KeyboardInterrupt:
162+
logger.info("\n\nTest interrupted by user")
163+
except Exception as e:
164+
logger.error(f"\n\nTest failed with exception: {e}", exc_info=True)

enable_debug_logging.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
"""
2+
Enable comprehensive debug logging for Meta AI API troubleshooting.
3+
Run this before starting the server to see detailed logs.
4+
"""
5+
6+
import logging
7+
import sys
8+
9+
# Configure root logger
10+
logging.basicConfig(
11+
level=logging.DEBUG,
12+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
13+
handlers=[
14+
logging.StreamHandler(sys.stdout),
15+
logging.FileHandler('meta_ai_debug.log', mode='w')
16+
]
17+
)
18+
19+
# Set specific loggers to DEBUG
20+
logging.getLogger('metaai_api').setLevel(logging.DEBUG)
21+
logging.getLogger('metaai_api.video_generation').setLevel(logging.DEBUG)
22+
logging.getLogger('metaai_api.api_server').setLevel(logging.DEBUG)
23+
logging.getLogger('uvicorn').setLevel(logging.INFO)
24+
25+
print("=" * 70)
26+
print("DEBUG LOGGING ENABLED")
27+
print("=" * 70)
28+
print("Log output:")
29+
print(" - Console: stdout (colored)")
30+
print(" - File: meta_ai_debug.log")
31+
print("=" * 70)
32+
print()

pyproject.toml

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
[build-system]
2-
requires = ["setuptools>=61", "wheel"]
2+
requires = ["setuptools>=61", "setuptools_scm"]
33
build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "metaai-sdk"
7-
version = "2.2.1"
7+
dynamic = ["version"]
88
description = "Feature-rich Python SDK for Meta AI - Chat, Image & Video Generation powered by Llama 3"
99
authors = [{name = "Ashiq Hussain Mir", email = "imseldrith@gmail.com"}]
1010
readme = "README.md"
@@ -48,4 +48,7 @@ dev = [
4848
"pytest",
4949
"black",
5050
"flake8",
51-
]
51+
]
52+
[tool.setuptools_scm]
53+
version_scheme = "release"
54+
local_scheme = "no-local-version"

setup.cfg

Lines changed: 0 additions & 2 deletions
This file was deleted.

0 commit comments

Comments
 (0)