-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun.py
More file actions
187 lines (158 loc) · 7.85 KB
/
run.py
File metadata and controls
187 lines (158 loc) · 7.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
import argparse
import json
import logging
import os
import sys
import webbrowser
from chatdev.chat_chain import ChatChain
from chatdev.utils import log_arguments, get_config, check_bool
from camel.typing import ModelType # <--- AGGIUNTA IMPORT
from camel.configs import ChatGPTConfig # <--- AGGIUNTA IMPORT
# === Imposta variabili d'ambiente per ChatDev/ChatChain in base a llm_config.json ===
try:
with open("llm_config.json", "r") as f:
llm_config = json.load(f)
llm_url = llm_config.get("url")
llm_api_key = llm_config.get("api_key", "")
llm_model = llm_config.get("model", "")
if llm_url:
os.environ["OPENAI_API_BASE"] = llm_url
if llm_api_key is not None:
os.environ["OPENAI_API_KEY"] = llm_api_key
if llm_model:
os.environ["OPENAI_MODEL"] = llm_model
except Exception as e:
print(f"[WARN] Impossibile caricare la configurazione LLM: {e}")
root = os.path.dirname(__file__)
sys.path.append(root)
try:
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall
from openai.types.chat.chat_completion_message import FunctionCall
openai_new_api = True # new openai api version
except ImportError:
openai_new_api = False # old openai api version
print(
"Warning: Your OpenAI version is outdated. \n "
"Please update as specified in requirement.txt. \n "
"The old API interface is deprecated and will no longer be supported.")
def main():
# === Carica la configurazione LLM scelta dall'utente ===
try:
with open("llm_config.json", "r") as f:
llm_config = json.load(f)
LLM_TYPE = llm_config.get("type", "local")
LLM_MODEL = llm_config.get("model", "GPT_3_5_TURBO")
LLM_URL = llm_config.get("url")
LLM_API_KEY = llm_config.get("api_key")
except Exception:
LLM_TYPE = "local"
LLM_MODEL = "GPT_3_5_TURBO"
LLM_URL = None
LLM_API_KEY = None
parser = argparse.ArgumentParser(description='argparse')
parser.add_argument('--config', type=str, default="Default",
help="Name of config, which is used to load configuration under CompanyConfig/")
parser.add_argument('--org', type=str, default="DefaultOrganization",
help="Name of organization, your software will be generated in WareHouse/name_org_timestamp")
parser.add_argument('--task', type=str, default="Develop a basic Gomoku game.",
help="Prompt of software")
parser.add_argument('--name', type=str, default="Gomoku",
help="Name of software, your software will be generated in WareHouse/name_org_timestamp")
parser.add_argument('--model', type=str, default=LLM_MODEL,
help="GPT Model, choose from {'GPT_3_5_TURBO', 'GPT_4', 'GPT_4_TURBO', 'GPT_4O', 'GPT_4O_MINI'}")
parser.add_argument('--path', type=str, default="",
help="Your file directory, ChatDev will build upon your software in the Incremental mode")
parser.add_argument('--max_tokens_output_agents', type=int, default=20000,
help="Maximum number of output tokens for agents in ChatChain.")
args = parser.parse_args()
# Se ChatDev/ChatChain supporta endpoint e api_key, passarli come variabili d'ambiente
if LLM_URL:
os.environ["CHATDEV_API_URL"] = LLM_URL
if LLM_API_KEY:
os.environ["CHATDEV_API_KEY"] = LLM_API_KEY
# Start ChatDev
# ----------------------------------------
# Init Model Config
# ----------------------------------------
# Configurazione del modello per ChatDev.
# LM Studio gestisce la temperatura e repetition_penalty a livello di server.
# Qui impostiamo max_tokens che ChatDev usa per limitare l'output degli agenti.
# Il valore di max_tokens_output_agents è già nel parser, usiamolo.
# Se non è definito, usa un default ragionevole.
max_tokens_for_chain = getattr(args, 'max_tokens_output_agents', 100000) # <--- NUOVA LOGICA
if max_tokens_for_chain is None: # Aggiunto controllo per None
max_tokens_for_chain = 100000
# Creiamo una ChatGPTConfig che ChatChain userà per i suoi agenti.
# La temperatura qui potrebbe essere un placeholder se LM Studio la sovrascrive sempre.
# È comunque buona pratica averla definita.
chat_model_config = ChatGPTConfig( # <--- NUOVA LOGICA
temperature=0.3, # Bassa temperatura per risposte più deterministiche
max_tokens=max_tokens_for_chain
)
# ----------------------------------------
# Init ChatChain
# ----------------------------------------
config_path, config_phase_path, config_role_path = get_config(args.config)
args2type = {'GPT_3_5_TURBO': ModelType.GPT_3_5_TURBO,
'GPT_4': ModelType.GPT_4,
'GPT_4_TURBO': ModelType.GPT_4_TURBO,
'GPT_4O': ModelType.GPT_4O,
'GPT_4O_MINI': ModelType.GPT_4O_MINI,
}
# La gestione di openai_new_api per args2type['GPT_3_5_TURBO'] sembra corretta.
if openai_new_api and 'GPT_3_5_TURBO' in args2type: # Aggiunto controllo esistenza chiave
args2type['GPT_3_5_TURBO'] = ModelType.GPT_3_5_TURBO_NEW
chat_chain = ChatChain(config_path=config_path,
config_phase_path=config_phase_path,
config_role_path=config_role_path,
task_prompt=args.task,
project_name=args.name,
org_name=args.org,
model_type=args2type.get(args.model, ModelType.GPT_3_5_TURBO), # Aggiunto .get con fallback
code_path=args.path,
model_config_override=chat_model_config # <--- MODIFICA: Passa la config creata
)
# ----------------------------------------
# Init Log
# ----------------------------------------
logging.basicConfig(filename=chat_chain.log_filepath, level=logging.INFO,
format='[%(asctime)s %(levelname)s] %(message)s',
datefmt='%Y-%d-%m %H:%M:%S', encoding="utf-8")
# ----------------------------------------
# Pre Processing
# ----------------------------------------
print("Starting pre processing...")
chat_chain.pre_processing()
# ----------------------------------------
# Personnel Recruitment
# ----------------------------------------
print("Starting recruitment...")
chat_chain.make_recruitment()
# ----------------------------------------
# Chat Chain
# ----------------------------------------
print("Starting execute chain...")
chat_chain.execute_chain()
# ----------------------------------------
# Post Processing
# ----------------------------------------
chat_chain.post_processing()
# --- Print project path for the interactive editor ---
# This line is specifically for the script that calls this one.
project_path = chat_chain.chat_env.env_dict.get("directory", "")
print(f"PROJECT_PATH_FOR_EDITOR:{project_path}")
if __name__ == "__main__":
main()