Skip to content

Commit 0b7f980

Browse files
authored
fix: wrap evaluate command with if __name__ == "__main__" (#644)
Signed-off-by: Marta Stepniewska-Dziubinska <martas@nvidia.com>
1 parent 9af7f85 commit 0b7f980

File tree

6 files changed

+28
-41
lines changed

6 files changed

+28
-41
lines changed

.github/config/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
sphinx
1+
sphinx==8.2.3
22
sphinx-autobuild # For live doc serving while editing docs
33
sphinx-autodoc2 # For documenting Python API
44
autodoc_pydantic # For rendering pydantic dataclasses

docs/get-started/_snippets/core_basic.py

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,6 @@
1919
# Prerequisites: Set your API key
2020
# export NGC_API_KEY="nvapi-..."
2121

22-
import os
23-
2422
# [snippet-start]
2523
from nemo_evaluator.api.api_dataclasses import (
2624
ApiEndpoint,
@@ -49,14 +47,9 @@
4947
)
5048
)
5149

52-
# Run evaluation
53-
result = evaluate(eval_cfg=eval_config, target_cfg=target_config)
54-
print(f"Evaluation completed: {result}")
55-
# [snippet-end]
5650

5751
if __name__ == "__main__":
58-
# Note: This requires a valid API key to actually run
59-
api_key = os.getenv("NGC_API_KEY")
60-
if not api_key:
61-
print("Set NGC_API_KEY environment variable to run this example")
62-
print("export NGC_API_KEY='your-key-here'")
52+
# Run evaluation
53+
result = evaluate(eval_cfg=eval_config, target_cfg=target_config)
54+
print(f"Evaluation completed: {result}")
55+
# [snippet-end]

docs/get-started/_snippets/core_full_example.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -52,15 +52,9 @@
5252
)
5353
)
5454

55-
# Run evaluation
56-
try:
55+
if __name__ == "__main__":
56+
# Run evaluation
5757
result = evaluate(eval_cfg=eval_config, target_cfg=target_config)
5858
print(f"Evaluation completed. Results saved to: {eval_config.output_dir}")
59-
except Exception as e:
60-
print(f"Evaluation failed: {e}")
61-
# [snippet-end]
6259

63-
if __name__ == "__main__":
64-
print(
65-
"Replace 'nvapi-your-key-here' with your actual NGC API key to run this example"
66-
)
60+
# [snippet-end]

docs/get-started/_snippets/core_multi_benchmark.py

Lines changed: 13 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -39,21 +39,19 @@
3939
)
4040
)
4141

42-
# Run multiple benchmarks
43-
benchmarks = ["mmlu_pro", "humaneval", "mgsm"]
44-
results = {}
4542

46-
for benchmark in benchmarks:
47-
config = EvaluationConfig(
48-
type=benchmark,
49-
output_dir=f"./results/{benchmark}",
50-
params=ConfigParams(limit_samples=10),
51-
)
43+
if __name__ == "__main__":
44+
# Run multiple benchmarks
45+
benchmarks = ["mmlu_pro", "humaneval", "mgsm"]
46+
results = {}
5247

53-
result = evaluate(eval_cfg=config, target_cfg=target_config)
54-
results[benchmark] = result
55-
# [snippet-end]
48+
for benchmark in benchmarks:
49+
config = EvaluationConfig(
50+
type=benchmark,
51+
output_dir=f"./results/{benchmark}",
52+
params=ConfigParams(limit_samples=10),
53+
)
5654

57-
if __name__ == "__main__":
58-
print("Multi-benchmark evaluation example")
59-
print("Replace 'your_api_key_here' with your actual API key to run")
55+
result = evaluate(eval_cfg=config, target_cfg=target_config)
56+
results[benchmark] = result
57+
# [snippet-end]

docs/get-started/_snippets/nemo_fw_basic.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,8 @@
4141
),
4242
)
4343

44-
# Run evaluation
45-
results = evaluate(target_cfg=target, eval_cfg=config)
46-
print(results)
44+
if __name__ == "__main__":
45+
# Run evaluation
46+
results = evaluate(target_cfg=target, eval_cfg=config)
47+
print(results)
4748
# [snippet-end]

docs/get-started/quickstart/core.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -166,8 +166,9 @@ config = EvaluationConfig(
166166
)
167167
)
168168

169-
result = evaluate(eval_cfg=config, target_cfg=target)
170-
print(f"Evaluation completed: {result}")
169+
if __name__ == "__main__":
170+
result = evaluate(eval_cfg=config, target_cfg=target)
171+
print(f"Evaluation completed: {result}")
171172
```
172173

173174
**Available Interceptors:**

0 commit comments

Comments
 (0)