-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagent.py
More file actions
125 lines (102 loc) · 4.32 KB
/
agent.py
File metadata and controls
125 lines (102 loc) · 4.32 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import logging
import asyncio
import sys
import os
import yaml
from dotenv import load_dotenv
import chromadb
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.embeddings.ollama import OllamaEmbedding
from llama_index.core.agent.workflow import FunctionAgent, AgentStream, ToolCallResult, ReActAgent
from llama_index.core import Settings, VectorStoreIndex, set_global_handler
from llama_index.llms.ollama import Ollama
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core.tools import QueryEngineTool
from llama_index.llms.gemini import Gemini
OLLAMA_MODEL = "qwen3:4b" # Modelo menor para testes mais rápidos
OLLAMA_BASE_URL = "OLLAMA_BASE_URL"
# OLLAMA_BASE_URL = "http://server:11434/"
EMBED_MODEL_NAME = "dengcao/Qwen3-Embedding-0.6B:Q8_0"
GEMINI_MODEL = "models/gemini-1.5-flash"
USE_GEMINI = False
def load_settings_ai(file_path: str = 'prompts.yaml') -> dict:
"""
Carrega as configurações de prompts de um arquivo YAML.
Retorna um dicionário com os prompts.
"""
try:
with open(file_path, 'r', encoding='utf-8') as file:
config = yaml.safe_load(file)
return config
except FileNotFoundError:
print(f"Erro: O arquivo de configuração '{file_path}' não foi encontrado.")
return None
except Exception as e:
print(f"Erro ao ler ou processar o arquivo YAML: {e}")
return None
def setup_settings():
"""
Configura o modelo de embedding e o LLM globalmente para o LlamaIndex.
Carrega a chave de API de variáveis de ambiente se o Gemini for usado.
"""
# set_global_handler("simple")
logging.info("Configurando o modelo de embedding e o LLM...")
Settings.embed_model = OllamaEmbedding(model_name=EMBED_MODEL_NAME, base_url=OLLAMA_BASE_URL)
if USE_GEMINI:
load_dotenv()
api_key = os.getenv("GOOGLE_API_KEY")
if not api_key:
raise ValueError("GOOGLE_API_KEY not found. Please create a .env file or set the environment variable.")
Settings.llm = Gemini(model_name=GEMINI_MODEL, api_key=api_key)
logging.info(f"LLM configured: Gemini ({GEMINI_MODEL})")
else:
Settings.llm = Ollama(
model=OLLAMA_MODEL,
base_url=OLLAMA_BASE_URL,
request_timeout=12000.0,
temperature=1,
thinking=True
)
logging.info(f"LLM configurado: Ollama ({OLLAMA_MODEL})")
def load_tools(db,chroma_collection_name,tool_name, description):
"""
Carrega as ferramentas necessárias para o agente.
"""
chroma_collection = db.get_or_create_collection(chroma_collection_name)
vector_store = ChromaVectorStore(
chroma_collection=chroma_collection,
)
index = VectorStoreIndex.from_vector_store(
vector_store=vector_store,
)
query_engine = index.as_query_engine(similarity_top_k=3)
tool = QueryEngineTool.from_defaults(
query_engine=query_engine,
name=tool_name,
description=description
)
return tool
async def main():
"""
Função principal para configurar o ambiente e iniciar o agente.
"""
setup_settings()
# Cria um agente de função com o LLM configurado
prompt_configs = load_settings_ai()
db =chromadb.PersistentClient(path="./chroma_db")
dtc_codes_tool = load_tools(db, "dtc_codes", "DTC_Codes", prompt_configs["description_dtc_tool"])
manual_vehicles_tool = load_tools(db, "manuals_vehicles", "Vehicle_Manual", prompt_configs["description_vehicle_manual_tool"])
forum_tool = load_tools(db, "forum", "Forum", prompt_configs["description_forum_tool"])
manual_obd_tool = load_tools(db, "manual_obd", "OBD_Manual", prompt_configs["description_obd_tool"])
agent = ReActAgent(
llm=Settings.llm,
tools=[dtc_codes_tool,manual_vehicles_tool, forum_tool,manual_obd_tool],
system_prompt=prompt_configs["prompt_system"] )
# Exemplo de uso do agente
response = agent.run("Meu ABS parou de fucionar tenho um celta 2014 como posso encontrar o diagnóstico?")
async for ev in response.stream_events():
if isinstance(ev, AgentStream):
print(ev.delta, end="", flush=True)
print("\n--- Resposta Completa ---\n")
if __name__ == "__main__":
asyncio.run(main())