Skip to content

Commit 74bebb1

Browse files
committed
excpose reasoning tokesn count
1 parent e46142f commit 74bebb1

File tree

7 files changed

+686
-533
lines changed

7 files changed

+686
-533
lines changed

optillm/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
extract_optillm_approach = module.extract_optillm_approach
2828
get_config = module.get_config
2929
load_plugins = module.load_plugins
30+
count_reasoning_tokens = module.count_reasoning_tokens
3031

3132
# Export execution functions
3233
execute_single_approach = module.execute_single_approach
@@ -48,6 +49,7 @@
4849
'extract_optillm_approach',
4950
'get_config',
5051
'load_plugins',
52+
'count_reasoning_tokens',
5153
'execute_single_approach',
5254
'execute_combined_approaches',
5355
'execute_parallel_approaches',

tests/test.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@
3030
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
3131
logger = logging.getLogger(__name__)
3232

33-
# OpenAI API configuration
34-
API_KEY = os.environ.get("OPENAI_API_KEY")
33+
# API configuration - default to local inference for testing
34+
API_KEY = os.environ.get("OPENAI_API_KEY", "optillm")
3535

3636
# Mock OpenAI client for testing purposes
3737
class MockOpenAIClient:
@@ -150,14 +150,23 @@ def main():
150150
args.test_cases = os.path.join(script_dir, "test_cases.json")
151151

152152
# If using local inference mode, override model to a local model
153-
if os.environ.get("OPTILLM_API_KEY") == "optillm" and args.model == "gpt-4o-mini":
153+
if API_KEY == "optillm" and args.model == "gpt-4o-mini":
154154
args.model = "Qwen/Qwen2.5-0.5B-Instruct"
155155
logger.info(f"Using local model: {args.model}")
156+
157+
# Set environment variable for local inference
158+
if API_KEY == "optillm":
159+
os.environ["OPTILLM_API_KEY"] = "optillm"
156160

157161
test_cases = load_test_cases(args.test_cases)
158162

163+
# Use local inference by default for testing
159164
if args.base_url:
160165
client = OpenAI(api_key=API_KEY, base_url=args.base_url)
166+
elif API_KEY == "optillm":
167+
# Use local inference endpoint
168+
client = OpenAI(api_key=API_KEY, base_url="http://localhost:8000/v1")
169+
logger.info("Using local inference endpoint: http://localhost:8000/v1")
161170
else:
162171
client = OpenAI(api_key=API_KEY)
163172
# client = LiteLLMWrapper()

tests/test_approaches.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
Tests the basic structure of approaches without requiring actual model inference
55
"""
66

7-
import pytest
87
import sys
98
import os
109
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

tests/test_plugins.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,8 +79,8 @@ def test_majority_voting_plugin():
7979
import optillm.plugins.majority_voting_plugin as plugin
8080
assert hasattr(plugin, 'run')
8181
assert hasattr(plugin, 'SLUG')
82-
assert hasattr(plugin, 'extract_answer')
83-
assert hasattr(plugin, 'normalize_answer')
82+
assert hasattr(plugin, 'extract_final_answer')
83+
assert hasattr(plugin, 'normalize_response')
8484
assert plugin.SLUG == "majority_voting"
8585

8686

0 commit comments

Comments
 (0)