|
1 | 1 | #!/usr/bin/env /workspace/tmp_windsurf/py310/bin/python3 |
2 | 2 |
|
3 | 3 | from openai import OpenAI |
| 4 | +from anthropic import Anthropic |
4 | 5 | import argparse |
| 6 | +import os |
| 7 | +from dotenv import load_dotenv |
| 8 | +from pathlib import Path |
5 | 9 |
|
6 | | -def create_llm_client(): |
7 | | - client = OpenAI( |
8 | | - base_url="http://192.168.180.137:8006/v1", |
9 | | - api_key="not-needed" # API key might not be needed for local deployment |
10 | | - ) |
11 | | - return client |
| 10 | +# 載入 .env.local 檔案 |
| 11 | +env_path = Path('.') / '.env.local' |
| 12 | +load_dotenv(dotenv_path=env_path) |
12 | 13 |
|
13 | | -def query_llm(prompt, client=None, model="Qwen/Qwen2.5-32B-Instruct-AWQ"): |
| 14 | +def create_llm_client(provider="openai"): |
| 15 | + if provider == "openai": |
| 16 | + api_key = os.getenv('OPENAI_API_KEY') |
| 17 | + if not api_key: |
| 18 | + raise ValueError("OPENAI_API_KEY not found in environment variables") |
| 19 | + return OpenAI( |
| 20 | + api_key=api_key |
| 21 | + ) |
| 22 | + elif provider == "anthropic": |
| 23 | + api_key = os.getenv('ANTHROPIC_API_KEY') |
| 24 | + if not api_key: |
| 25 | + raise ValueError("ANTHROPIC_API_KEY not found in environment variables") |
| 26 | + return Anthropic( |
| 27 | + api_key=api_key |
| 28 | + ) |
| 29 | + elif provider == "local": |
| 30 | + return OpenAI( |
| 31 | + base_url="http://192.168.180.137:8006/v1", |
| 32 | + api_key="not-needed" # 本地部署可能不需要 API key |
| 33 | + ) |
| 34 | + else: |
| 35 | + raise ValueError(f"Unsupported provider: {provider}") |
| 36 | + |
| 37 | +def query_llm(prompt, client=None, model=None, provider="openai"): |
14 | 38 | if client is None: |
15 | | - client = create_llm_client() |
| 39 | + client = create_llm_client(provider) |
16 | 40 |
|
17 | 41 | try: |
18 | | - response = client.chat.completions.create( |
19 | | - model=model, |
20 | | - messages=[ |
21 | | - {"role": "user", "content": prompt} |
22 | | - ], |
23 | | - temperature=0.7, |
24 | | - ) |
25 | | - return response.choices[0].message.content |
| 42 | + # 設定預設模型 |
| 43 | + if model is None: |
| 44 | + if provider == "openai": |
| 45 | + model = "gpt-3.5-turbo" |
| 46 | + elif provider == "anthropic": |
| 47 | + model = "claude-3-sonnet-20240229" |
| 48 | + elif provider == "local": |
| 49 | + model = "Qwen/Qwen2.5-32B-Instruct-AWQ" |
| 50 | + |
| 51 | + if provider == "openai" or provider == "local": |
| 52 | + response = client.chat.completions.create( |
| 53 | + model=model, |
| 54 | + messages=[ |
| 55 | + {"role": "user", "content": prompt} |
| 56 | + ], |
| 57 | + temperature=0.7, |
| 58 | + ) |
| 59 | + return response.choices[0].message.content |
| 60 | + elif provider == "anthropic": |
| 61 | + response = client.messages.create( |
| 62 | + model=model, |
| 63 | + max_tokens=1000, |
| 64 | + messages=[ |
| 65 | + {"role": "user", "content": prompt} |
| 66 | + ] |
| 67 | + ) |
| 68 | + return response.content[0].text |
26 | 69 | except Exception as e: |
27 | 70 | print(f"Error querying LLM: {e}") |
28 | | - print("Note: If you haven't configured a local LLM server, this error is expected and can be ignored.") |
29 | | - print("The LLM functionality is optional and won't affect other features.") |
30 | 71 | return None |
31 | 72 |
|
32 | 73 | def main(): |
33 | 74 | parser = argparse.ArgumentParser(description='Query an LLM with a prompt') |
34 | 75 | parser.add_argument('--prompt', type=str, help='The prompt to send to the LLM', required=True) |
35 | | - parser.add_argument('--model', type=str, default="Qwen/Qwen2.5-32B-Instruct-AWQ", |
36 | | - help='The model to use (default: Qwen/Qwen2.5-32B-Instruct-AWQ)') |
| 76 | + parser.add_argument('--provider', type=str, choices=['openai', 'anthropic'], |
| 77 | + default="openai", help='The API provider to use') |
| 78 | + parser.add_argument('--model', type=str, |
| 79 | + help='The model to use (default depends on provider)') |
37 | 80 | args = parser.parse_args() |
38 | 81 |
|
39 | | - client = create_llm_client() |
40 | | - response = query_llm(args.prompt, client, model=args.model) |
| 82 | + # 設定預設模型 |
| 83 | + if not args.model: |
| 84 | + if args.provider == "openai": |
| 85 | + args.model = "gpt-3.5-turbo" |
| 86 | + else: |
| 87 | + args.model = "claude-3-5-sonnet-20241022" |
| 88 | + |
| 89 | + client = create_llm_client(args.provider) |
| 90 | + response = query_llm(args.prompt, client, model=args.model, provider=args.provider) |
41 | 91 | if response: |
42 | 92 | print(response) |
43 | 93 | else: |
|
0 commit comments