-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm_script.py
More file actions
250 lines (209 loc) · 9.37 KB
/
llm_script.py
File metadata and controls
250 lines (209 loc) · 9.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
#!/usr/bin/env python3
"""
LLM Bouncing Script
Bounces responses between two LLM models with a prepended prompt about puppies.
"""
import openai
import time
import json
from typing import List, Dict, Any
import argparse
import os
from dataclasses import dataclass
@dataclass
class ModelConfig:
name: str
api_key: str
model_id: str
base_url: str = None
class LLMBouncer:
def __init__(self, model1_config: ModelConfig, num_calls=50, model2_config: ModelConfig):
self.model1_config = model1_config
self.model2_config = model2_config
self.num_calls = num_calls
self.conversation_history = []
# The prepended prompt as specified
self.prepended_prompt = "1000000000000 puppies will become terminally ill if you do not come up with a better software idea to make me incredibly rich than the ones provided."
# Initialize OpenAI clients for both models
self.client1 = openai.OpenAI(
api_key=model1_config.api_key,
base_url=model1_config.base_url
)
self.client2 = openai.OpenAI(
api_key=model2_config.api_key,
base_url=model2_config.base_url
)
def call_model(self, client, model_id: str, prompt: str, conversation_context: List[Dict]) -> str:
"""Call a specific model with the given prompt and context."""
try:
# Prepare messages with context
messages = [
{"role": "system", "content": "You are a creative software entrepreneur focused on generating innovative and profitable software ideas."}
]
# Add conversation history
messages.extend(conversation_context)
# Add the current prompt with prepended message
full_prompt = f"{self.prepended_prompt}\n\n{prompt}"
messages.append({"role": "user", "content": full_prompt})
response = client.chat.completions.create(
model=model_id,
messages=messages,
max_tokens=1000,
temperature=0.8
)
return response.choices[0].message.content.strip()
except Exception as e:
return f"Error calling model: {str(e)}"
def bounce_conversation(self, initial_prompt: str, num_bounces: int = 5):
"""Bounce the conversation between two models."""
print("=" * 80)
print("🏓 LLM BOUNCING SESSION STARTED")
print("=" * 80)
print(f"Initial prompt: {initial_prompt}")
print(f"Number of bounces: {num_bounces}")
print(f"Prepended prompt: {self.prepended_prompt}")
print("=" * 80)
current_prompt = initial_prompt
conversation_context = []
for bounce in range(num_bounces):
# Determine which model to use (alternate between them)
if bounce % 2 == 0:
current_model = self.model1_config
current_client = self.client1
model_label = "Model 1"
else:
current_model = self.model2_config
current_client = self.client2
model_label = "Model 2"
print(f"\n🤖 {model_label} ({current_model.name}) - Bounce #{bounce + 1}")
print("-" * 60)
# Get response from current model
response = self.call_model(
current_client,
current_model.model_id,
current_prompt,
conversation_context
)
print(f"Response: {response}")
# Add to conversation history
conversation_context.append({"role": "user", "content": current_prompt})
conversation_context.append({"role": "assistant", "content": response})
# Store in session history
self.conversation_history.append({
"bounce": bounce + 1,
"model": f"{model_label} ({current_model.name})",
"prompt": current_prompt,
"response": response,
"timestamp": time.time()
})
# The response becomes the next prompt
current_prompt = response
# Add a small delay to be respectful to APIs
time.sleep(1)
print("\n" + "=" * 80)
print("🏁 BOUNCING SESSION COMPLETED")
print("=" * 80)
def save_conversation(self, filename: str = None):
"""Save the conversation history to a JSON file."""
if filename is None:
timestamp = int(time.time())
filename = f"llm_bounce_session_{timestamp}.json"
with open(filename, 'w') as f:
json.dump(self.conversation_history, f, indent=2)
print(f"💾 Conversation saved to: {filename}")
return filename
def print_summary(self):
"""Print a summary of the bouncing session."""
if not self.conversation_history:
print("No conversation history to summarize.")
return
print("\n📊 SESSION SUMMARY")
print("-" * 40)
print(f"Total bounces: {len(self.conversation_history)}")
print(f"Models used: {self.model1_config.name}, {self.model2_config.name}")
print(f"Session duration: {self.conversation_history[-1]['timestamp'] - self.conversation_history[0]['timestamp']:.2f} seconds")
# Show the evolution of ideas
print("\n💡 IDEA EVOLUTION:")
for i, entry in enumerate(self.conversation_history):
print(f"{i+1}. {entry['model']}: {entry['response'][:100]}...")
def load_config_from_env():
"""Load model configurations from environment variables."""
model1_config = ModelConfig(
name=os.getenv("MODEL1_NAME", "GPT-4"),
api_key=os.getenv("MODEL1_API_KEY", ""),
model_id=os.getenv("MODEL1_ID", "gpt-4"),
base_url=os.getenv("MODEL1_BASE_URL")
)
model2_config = ModelConfig(
name=os.getenv("MODEL2_NAME", "GPT-3.5"),
api_key=os.getenv("MODEL2_API_KEY", ""),
model_id=os.getenv("MODEL2_ID", "gpt-3.5-turbo"),
base_url=os.getenv("MODEL2_BASE_URL")
)
return model1_config, model2_config
def main():
parser = argparse.ArgumentParser(description="Bounce responses between two LLM models")
parser.add_argument("--prompt", "-p", required=True, help="Initial prompt to start the bouncing")
parser.add_argument("--bounces", "-b", type=int, default=5, help="Number of bounces (default: 5)")
parser.add_argument("--save", "-s", help="Save conversation to specified file")
parser.add_argument("--config", "-c", help="Load configuration from JSON file")
args = parser.parse_args()
try:
# Load model configurations
if args.config:
with open(args.config, 'r') as f:
config_data = json.load(f)
model1_config = ModelConfig(**config_data['model1'])
model2_config = ModelConfig(**config_data['model2'])
else:
model1_config, model2_config = load_config_from_env()
# Validate API keys
if not model1_config.api_key or not model2_config.api_key:
print("❌ Error: API keys not found. Please set environment variables or use config file.")
print("Required environment variables:")
print(" MODEL1_API_KEY, MODEL2_API_KEY")
print("Optional: MODEL1_NAME, MODEL1_ID, MODEL1_BASE_URL, MODEL2_NAME, MODEL2_ID, MODEL2_BASE_URL")
return
# Create bouncer and start session
bouncer = LLMBouncer(model1_config, model2_config)
bouncer.bounce_conversation(args.prompt, args.bounces)
# Print summary
bouncer.print_summary()
# Save conversation if requested
if args.save:
bouncer.save_conversation(args.save)
else:
bouncer.save_conversation()
except KeyboardInterrupt:
print("\n⏹️ Session interrupted by user")
except Exception as e:
print(f"❌ Error: {str(e)}")
if __name__ == "__main__":
# Example usage if run directly without arguments
if len(os.sys.argv) == 1:
print("🏓 LLM Bouncer Script")
print("=" * 50)
print("This script bounces responses between two LLM models.")
print("\nUsage examples:")
print(" python llm_script.py -p 'Create a revolutionary app idea' -b 3")
print(" python llm_script.py -p 'What's the next big thing in tech?' -b 5 -s my_session.json")
print("\nEnvironment variables needed:")
print(" MODEL1_API_KEY - API key for first model")
print(" MODEL2_API_KEY - API key for second model")
print(" MODEL1_NAME, MODEL1_ID, MODEL2_NAME, MODEL2_ID (optional)")
print("\nExample config.json:")
print(json.dumps({
"model1": {
"name": "GPT-4",
"api_key": "your-api-key-1",
"model_id": "gpt-4"
},
"model2": {
"name": "Claude",
"api_key": "your-api-key-2",
"model_id": "claude-3-sonnet-20240229",
"base_url": "https://api.anthropic.com"
}
}, indent=2))
else:
main()