|
13 | 13 | LLM incrementally better at solving problems by learning from its experiences. |
14 | 14 | """ |
15 | 15 |
|
16 | | -import os |
17 | | -import sys |
18 | | -import importlib.util |
19 | 16 | from typing import Tuple |
| 17 | +from optillm.plugins.spl import run_spl |
20 | 18 |
|
21 | 19 | # Plugin identifier |
22 | 20 | SLUG = "spl" |
23 | 21 |
|
24 | 22 | def run(system_prompt: str, initial_query: str, client, model: str, request_config: dict = None) -> Tuple[str, int]: |
25 | 23 | """ |
26 | 24 | Plugin entry point for System Prompt Learning. |
27 | | - |
| 25 | +
|
28 | 26 | Args: |
29 | 27 | system_prompt: The system prompt |
30 | 28 | initial_query: The user's query |
31 | 29 | client: The LLM client |
32 | 30 | model: The model identifier |
33 | 31 | request_config: Optional request configuration |
34 | 32 | Can include {'spl_learning': True} to enable learning mode |
35 | | - |
| 33 | +
|
36 | 34 | Returns: |
37 | 35 | Tuple[str, int]: The LLM response and token count |
38 | 36 | """ |
39 | | - # Get the directory where this plugin is located |
40 | | - plugin_dir = os.path.dirname(os.path.abspath(__file__)) |
41 | | - spl_dir = os.path.join(plugin_dir, 'spl') |
42 | | - main_file = os.path.join(spl_dir, 'main.py') |
43 | | - |
44 | | - # Load the main module dynamically |
45 | | - spec = importlib.util.spec_from_file_location("spl_main", main_file) |
46 | | - spl_main = importlib.util.module_from_spec(spec) |
47 | | - |
48 | | - # Add the spl directory to the Python path temporarily |
49 | | - if spl_dir not in sys.path: |
50 | | - sys.path.insert(0, spl_dir) |
51 | | - |
52 | | - try: |
53 | | - spec.loader.exec_module(spl_main) |
54 | | - return spl_main.run_spl(system_prompt, initial_query, client, model, request_config) |
55 | | - finally: |
56 | | - # Remove from path after use |
57 | | - if spl_dir in sys.path: |
58 | | - sys.path.remove(spl_dir) |
| 37 | + return run_spl(system_prompt, initial_query, client, model, request_config) |
0 commit comments