Skip to content

Commit 72fa443

Browse files
committed
fix: Complete sync wrapper coverage and Amazon Search dataset fix
1 parent ea894a8 commit 72fa443

File tree

6 files changed

+380
-3
lines changed

6 files changed

+380
-3
lines changed

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,4 +261,4 @@ Thumbs.db
261261
# Project specific
262262
*.log
263263
.cache/
264-
264+
probe

src/brightdata/api/base.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,17 @@ def _execute_sync(self, *args: Any, **kwargs: Any) -> Any:
3838
Execute API operation synchronously.
3939
4040
Wraps async method using asyncio.run() for sync compatibility.
41+
Properly manages engine context.
4142
"""
4243
try:
4344
asyncio.get_running_loop()
4445
raise RuntimeError(
4546
"Cannot call sync method from async context. Use async method instead."
4647
)
4748
except RuntimeError:
48-
return asyncio.run(self._execute_async(*args, **kwargs))
49+
50+
async def _run():
51+
async with self.engine:
52+
return await self._execute_async(*args, **kwargs)
53+
54+
return asyncio.run(_run())

src/brightdata/api/scrape_service.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,4 +214,9 @@ async def url_async(
214214

215215
def url(self, *args, **kwargs) -> Union[ScrapeResult, List[ScrapeResult]]:
216216
"""Scrape URL(s) synchronously."""
217-
return asyncio.run(self.url_async(*args, **kwargs))
217+
218+
async def _run():
219+
async with self._client.engine:
220+
return await self.url_async(*args, **kwargs)
221+
222+
return asyncio.run(_run())

src/brightdata/api/search_service.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,9 +116,11 @@ def google(
116116
... location="United States"
117117
... )
118118
"""
119+
119120
async def _run():
120121
async with self._client.engine:
121122
return await self.google_async(query, **kwargs)
123+
122124
return asyncio.run(_run())
123125

124126
async def bing_async(
@@ -151,9 +153,11 @@ async def bing_async(
151153

152154
def bing(self, query: Union[str, List[str]], **kwargs):
153155
"""Search Bing synchronously."""
156+
154157
async def _run():
155158
async with self._client.engine:
156159
return await self.bing_async(query, **kwargs)
160+
157161
return asyncio.run(_run())
158162

159163
async def yandex_async(
@@ -186,9 +190,11 @@ async def yandex_async(
186190

187191
def yandex(self, query: Union[str, List[str]], **kwargs):
188192
"""Search Yandex synchronously."""
193+
189194
async def _run():
190195
async with self._client.engine:
191196
return await self.yandex_async(query, **kwargs)
197+
192198
return asyncio.run(_run())
193199

194200
@property

tests/run_all.py

Lines changed: 185 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,185 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Comprehensive test runner - validates EVERYTHING
4+
Saves all outputs to probe/ directory for inspection
5+
"""
6+
7+
import subprocess
8+
import json
9+
from pathlib import Path
10+
from datetime import datetime
11+
12+
# Create probe directory structure matching tests/ structure
13+
PROBE_DIR = Path("probe")
14+
PROBE_DIR.mkdir(exist_ok=True)
15+
(PROBE_DIR / "unit").mkdir(exist_ok=True)
16+
(PROBE_DIR / "e2e").mkdir(exist_ok=True)
17+
(PROBE_DIR / "integration").mkdir(exist_ok=True)
18+
(PROBE_DIR / "enes").mkdir(exist_ok=True)
19+
(PROBE_DIR / "root").mkdir(exist_ok=True)
20+
21+
# Test suites to run (matches tests/ directory structure)
22+
test_suites = {
23+
"root_readme": "tests/readme.py", # Root level test
24+
"unit": "tests/unit/",
25+
"e2e": "tests/e2e/",
26+
"integration": "tests/integration/",
27+
"enes": "tests/enes/",
28+
}
29+
30+
# Linting checks
31+
lint_checks = {
32+
"black": ["black", "--check", "src", "tests"],
33+
"ruff": ["ruff", "check", "src/", "tests/"],
34+
}
35+
36+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
37+
results = {"timestamp": timestamp, "test_suites": {}, "lint_checks": {}, "summary": {}}
38+
39+
print("=" * 80)
40+
print("COMPREHENSIVE SDK VALIDATION")
41+
print("=" * 80)
42+
print(f"Timestamp: {timestamp}")
43+
print(f"Output directory: {PROBE_DIR.absolute()}")
44+
print("=" * 80)
45+
46+
# Run linting checks
47+
print("\n📋 STEP 1: LINTING CHECKS")
48+
print("-" * 80)
49+
50+
for check_name, command in lint_checks.items():
51+
print(f"\n{check_name.upper()}:")
52+
result = subprocess.run(command, capture_output=True, text=True, timeout=60)
53+
54+
output_file = PROBE_DIR / f"{check_name}_{timestamp}.txt"
55+
output_file.write_text(result.stdout + "\n\n" + result.stderr)
56+
57+
passed = result.returncode == 0
58+
results["lint_checks"][check_name] = {
59+
"passed": passed,
60+
"output_file": str(output_file),
61+
"return_code": result.returncode,
62+
}
63+
64+
if passed:
65+
print(" ✅ PASSED")
66+
else:
67+
print(f" ❌ FAILED (exit code {result.returncode})")
68+
print(f" 📁 Output saved to: {output_file.name}")
69+
70+
# Run test suites
71+
print("\n📋 STEP 2: TEST SUITES")
72+
print("-" * 80)
73+
74+
total_passed = 0
75+
total_failed = 0
76+
77+
for suite_name, test_path in test_suites.items():
78+
print(f"\n{suite_name.upper()} TESTS:")
79+
80+
result = subprocess.run(
81+
["python", "-m", "pytest", test_path, "-v", "--tb=short"],
82+
capture_output=True,
83+
text=True,
84+
timeout=300, # Increased timeout for readme tests
85+
)
86+
87+
# Save to proper subdirectory
88+
if suite_name == "root_readme":
89+
output_file = PROBE_DIR / "root" / f"readme_{timestamp}.txt"
90+
else:
91+
output_file = PROBE_DIR / suite_name / f"all_{timestamp}.txt"
92+
93+
output_file.write_text(result.stdout + "\n\n" + result.stderr)
94+
95+
# Parse results
96+
output = result.stdout + result.stderr
97+
98+
# Extract pass/fail counts
99+
import re
100+
101+
match = re.search(r"(\d+) passed", output)
102+
passed = int(match.group(1)) if match else 0
103+
104+
match = re.search(r"(\d+) failed", output)
105+
failed = int(match.group(1)) if match else 0
106+
107+
match = re.search(r"(\d+) skipped", output)
108+
skipped = int(match.group(1)) if match else 0
109+
110+
total_passed += passed
111+
total_failed += failed
112+
113+
results["test_suites"][suite_name] = {
114+
"passed": passed,
115+
"failed": failed,
116+
"skipped": skipped,
117+
"output_file": str(output_file),
118+
"return_code": result.returncode,
119+
}
120+
121+
status = "✅ PASSED" if failed == 0 else f"❌ {failed} FAILED"
122+
print(f" {status} - {passed} passed, {failed} failed, {skipped} skipped")
123+
print(f" 📁 Output saved to: {output_file.relative_to(Path.cwd())}")
124+
125+
# Also run individual test files for detailed inspection
126+
if suite_name in ["unit", "e2e", "integration"]:
127+
test_files = Path(test_path).glob("test_*.py")
128+
for test_file in test_files:
129+
individual_result = subprocess.run(
130+
["python", "-m", "pytest", str(test_file), "-v", "--tb=short"],
131+
capture_output=True,
132+
text=True,
133+
timeout=60,
134+
)
135+
136+
# Save individual test outputs
137+
individual_output = PROBE_DIR / suite_name / f"{test_file.stem}_{timestamp}.txt"
138+
individual_output.write_text(
139+
individual_result.stdout + "\n\n" + individual_result.stderr
140+
)
141+
142+
# Save summary
143+
summary_file = PROBE_DIR / f"summary_{timestamp}.json"
144+
results["summary"] = {
145+
"total_tests_passed": total_passed,
146+
"total_tests_failed": total_failed,
147+
"all_linting_passed": all(v["passed"] for v in results["lint_checks"].values()),
148+
"all_tests_passed": total_failed == 0,
149+
"overall_status": (
150+
"PASS"
151+
if (total_failed == 0 and all(v["passed"] for v in results["lint_checks"].values()))
152+
else "FAIL"
153+
),
154+
}
155+
156+
summary_file.write_text(json.dumps(results, indent=2))
157+
158+
# Final summary
159+
print("\n" + "=" * 80)
160+
print("FINAL VALIDATION SUMMARY")
161+
print("=" * 80)
162+
163+
print("\n📊 TEST RESULTS:")
164+
for suite, data in results["test_suites"].items():
165+
print(f" {suite:15} {data['passed']:4} passed, {data['failed']:4} failed")
166+
167+
print(f"\n TOTAL: {total_passed:4} passed, {total_failed:4} failed")
168+
169+
print("\n🔍 LINTING:")
170+
for check, data in results["lint_checks"].items():
171+
status = "✅ PASS" if data["passed"] else "❌ FAIL"
172+
print(f" {check:15} {status}")
173+
174+
print(f"\n📁 All outputs saved to: {PROBE_DIR.absolute()}")
175+
print(f"📄 Summary: {summary_file.name}")
176+
177+
print("\n" + "=" * 80)
178+
if results["summary"]["overall_status"] == "PASS":
179+
print("🎉 ALL VALIDATIONS PASSED - SDK IS 100% WORKING")
180+
else:
181+
print("⚠️ SOME VALIDATIONS FAILED - CHECK PROBE OUTPUTS")
182+
print("=" * 80)
183+
184+
# Exit with appropriate code
185+
exit(0 if results["summary"]["overall_status"] == "PASS" else 1)

0 commit comments

Comments
 (0)