1- from langchain .llms import AzureOpenAI , OpenAI
21import os
32from dotenv import load_dotenv
4- from langchain .llms import LlamaCpp
5- load_dotenv ()
6- from langchain import PromptTemplate , LLMChain
3+ import logging .config
4+ import traceback
5+ from django .utils .timezone import make_aware
6+ from datetime import datetime , timezone
7+ from uuid import uuid4
8+ from ollama import Client
9+ from openai import OpenAI
10+ from django .conf import settings
11+ from langchain_openai .chat_models import ChatOpenAI
12+ from langchain_community .llms import Ollama
13+ from langchain_community .llms import AzureOpenAI
14+ from langchain_community .llms import LlamaCpp
15+ from langchain .prompts import PromptTemplate
16+ from langchain .chains import LLMChain
717from langchain .callbacks .manager import CallbackManager
818from langchain .callbacks .streaming_stdout import StreamingStdOutCallbackHandler
9- import traceback
1019from web .models .failed_jobs import FailedJob
11- from datetime import datetime
12- from uuid import uuid4
20+
21+ load_dotenv ()
22+ logging .config .dictConfig (settings .LOGGING )
23+ logger = logging .getLogger (__name__ )
24+
1325
1426def get_llama_llm ():
1527 try :
1628 n_gpu_layers = 1 # Metal set to 1 is enough.
1729 n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip.
18-
30+
1931 # Callbacks support token-wise streaming
2032 callback_manager = CallbackManager ([StreamingStdOutCallbackHandler ()])
2133 llm = LlamaCpp (
@@ -28,24 +40,44 @@ def get_llama_llm():
2840 verbose = True ,
2941 temperature = 0.2 ,
3042 )
31-
43+
3244 return llm
3345 except Exception as e :
34- failed_job = FailedJob (uuid = str (uuid4 ()), connection = 'default' , queue = 'default' , payload = 'get_llama_llm' , exception = str (e ), failed_at = datetime .now ())
46+
47+ logger .debug (f"Exception in get_llama_llm: { e } " )
48+ failed_job = FailedJob (
49+ uuid = str (uuid4 ()),
50+ connection = "default" ,
51+ queue = "default" ,
52+ payload = "get_llama_llm" ,
53+ exception = str (e ),
54+ failed_at = make_aware (datetime .now (), timezone .utc ),
55+ )
3556 failed_job .save ()
3657 print (f"Exception occurred: { e } " )
3758 traceback .print_exc ()
3859
60+
3961# Azure OpenAI Language Model client
4062def get_azure_openai_llm ():
4163 """Returns AzureOpenAI instance configured from environment variables"""
4264 try :
43- openai_api_type = os .environ ['OPENAI_API_TYPE' ]
44- openai_api_key = os .environ ['AZURE_OPENAI_API_KEY' ]
45- openai_deployment_name = os .environ ['AZURE_OPENAI_DEPLOYMENT_NAME' ]
46- openai_model_name = os .environ ['AZURE_OPENAI_COMPLETION_MODEL' ]
47- openai_api_version = os .environ ['AZURE_OPENAI_API_VERSION' ]
48- openai_api_base = os .environ ['AZURE_OPENAI_API_BASE' ]
65+ if settings .DEBUG :
66+ openai_api_type = "openai" # JUST FOR DEVELOPMENT
67+ logging .debug (f"DEVELOPMENT Using API Type: { openai_api_type } " )
68+ else :
69+ openai_api_type = os .environ ["AZURE_OPENAI_API_TYPE" ]
70+
71+ openai_api_key = os .environ ["AZURE_OPENAI_API_KEY" ]
72+ openai_deployment_name = os .environ ["AZURE_OPENAI_DEPLOYMENT_NAME" ]
73+ openai_model_name = os .environ ["AZURE_OPENAI_COMPLETION_MODEL" ]
74+ openai_api_version = os .environ ["AZURE_OPENAI_API_VERSION" ]
75+ openai_api_base = os .environ ["AZURE_OPENAI_API_BASE" ]
76+ openai_api_key = os .environ ["AZURE_OPENAI_API_KEY" ]
77+ openai_deployment_name = os .environ ["AZURE_OPENAI_DEPLOYMENT_NAME" ]
78+ openai_model_name = os .environ ["AZURE_OPENAI_COMPLETION_MODEL" ]
79+ openai_api_version = os .environ ["AZURE_OPENAI_API_VERSION" ]
80+ openai_api_base = os .environ ["AZURE_OPENAI_API_BASE" ]
4981 return AzureOpenAI (
5082 openai_api_base = openai_api_base ,
5183 openai_api_key = openai_api_key ,
@@ -54,51 +86,127 @@ def get_azure_openai_llm():
5486 openai_api_type = openai_api_type ,
5587 openai_api_version = openai_api_version ,
5688 temperature = 0 ,
57- batch_size = 8
89+ batch_size = 8 ,
5890 )
5991 except Exception as e :
60- failed_job = FailedJob (uuid = str (uuid4 ()), connection = 'default' , queue = 'default' , payload = 'get_azure_openai_llm' , exception = str (e ), failed_at = datetime .now ())
92+ logger .debug (f"Exception in get_azure_openai_llm: { e } " )
93+ failed_job = FailedJob (
94+ uuid = str (uuid4 ()),
95+ connection = "default" ,
96+ queue = "default" ,
97+ payload = "get_azure_openai_llm" ,
98+ exception = str (e ),
99+ failed_at = make_aware (datetime .now (), timezone .utc ),
100+ )
61101 failed_job .save ()
62102 print (f"Exception occurred: { e } " )
63103 traceback .print_exc ()
64104
65- # OpenAI Language Model client
105+
106+ # OpenAI Language Model client
66107def get_openai_llm ():
67108 """Returns OpenAI instance configured from environment variables"""
68109 try :
69- openai_api_key = os .environ ['OPENAI_API_KEY' ]
110+ openai_api_key = os .environ .get ("OPENAI_API_KEY" )
111+ temperature = os .environ .get ("OPENAI_API_TEMPERATURE" )
112+ model = os .environ .get ("OPENAI_API_MODEL" , "gpt-3.5-turbo" )
70113
71- return OpenAI (
72- temperature = float (os .environ .get ('OPENAI_API_TEMPERATURE' , '0' )),
114+ logging .debug (
115+ f"We are in get_openai_llm: { openai_api_key } { temperature } { model } "
116+ )
117+ return ChatOpenAI (
118+ temperature = temperature ,
73119 openai_api_key = openai_api_key ,
74- model_name = os . environ . get ( 'OPENAI_API_MODEL' , 'gpt-3.5-turbo' ) ,
120+ model = model ,
75121 )
76122 except Exception as e :
77- failed_job = FailedJob (uuid = str (uuid4 ()), connection = 'default' , queue = 'default' , payload = 'get_openai_llm' , exception = str (e ), failed_at = datetime .now ())
123+ logger .debug (f"Exception in get_openai_llm: { e } " )
124+ failed_job = FailedJob (
125+ uuid = str (uuid4 ()),
126+ connection = "default" ,
127+ queue = "default" ,
128+ payload = "get_openai_llm" ,
129+ exception = str (e ),
130+ failed_at = make_aware (datetime .now (), timezone .utc ),
131+ )
132+ failed_job .save ()
133+ print (f"Exception occurred: { e } " )
134+ traceback .print_exc ()
135+
136+
137+ def get_ollama_llm (sanitized_question ):
138+ """Returns an Ollama Server instance configured from environment variables"""
139+ llm = Client (host = os .environ .get ("OLLAMA_URL" ))
140+ # Use the client to make a request
141+ try :
142+ if sanitized_question :
143+ response = llm .chat (
144+ model = os .environ .get ("OLLAMA_MODEL_NAME" ),
145+ messages = [{"role" : "user" , "content" : sanitized_question }],
146+ )
147+ else :
148+ raise ValueError ("Question cannot be None." )
149+ if response :
150+ return response
151+ else :
152+ raise ValueError ("Invalid response from Ollama." )
153+
154+ except Exception as e :
155+ logger .debug (f"Exception in get_ollama_llm: { e } " )
156+ failed_job = FailedJob (
157+ uuid = str (uuid4 ()),
158+ connection = "default" ,
159+ queue = "default" ,
160+ payload = "get_openai_llm" ,
161+ exception = str (e ),
162+ failed_at = make_aware (datetime .now (), timezone .utc ),
163+ )
78164 failed_job .save ()
79165 print (f"Exception occurred: { e } " )
80166 traceback .print_exc ()
81167
82-
83- # recommend not caching initially, and optimizing only if you observe a clear performance benefit from caching the clients.
84- # The simplest thing that works is often best to start.
85168
86169def get_llm ():
87170 """Returns LLM client instance based on OPENAI_API_TYPE"""
88171 try :
89172 clients = {
90- 'azure' : get_azure_openai_llm ,
91- 'openai' : get_openai_llm ,
92- 'llama2' : get_llama_llm
173+ "azure" : get_azure_openai_llm ,
174+ "openai" : get_openai_llm ,
175+ "llama2" : get_llama_llm ,
176+ "ollama" : lambda : get_ollama_llm (),
93177 }
94-
95- api_type = os .environ .get (' OPENAI_API_TYPE' )
178+
179+ api_type = os .environ .get (" OPENAI_API_TYPE" , "openai" )
96180 if api_type not in clients :
97181 raise ValueError (f"Invalid OPENAI_API_TYPE: { api_type } " )
98-
99- return clients [api_type ]()
182+
183+ logging .debug (f"Using LLM: { api_type } " )
184+
185+ if api_type in clients :
186+ if api_type == "ollama" :
187+ return clients [api_type ]()
188+ elif api_type != "ollama" :
189+ return clients [api_type ]()
190+ else :
191+ raise ValueError (f"Invalid OPENAI_API_TYPE: { api_type } " )
192+
100193 except Exception as e :
101- failed_job = FailedJob (uuid = str (uuid4 ()), connection = 'default' , queue = 'default' , payload = 'get_llm' , exception = str (e ), failed_at = datetime .now ())
102- failed_job .save ()
103- print (f"Exception occurred: { e } " )
104- traceback .print_exc ()
194+ failed_job = FailedJob (
195+ uuid = str (uuid4 ()),
196+ connection = "default" ,
197+ queue = "default" ,
198+ payload = "get_llm" ,
199+ exception = str (e ),
200+ failed_at = datetime .now (),
201+ )
202+ failed_job = FailedJob (
203+ uuid = str (uuid4 ()),
204+ connection = "default" ,
205+ queue = "default" ,
206+ payload = "get_llm" ,
207+ exception = str (e ),
208+ failed_at = make_aware (datetime .now (), timezone .utc ),
209+ )
210+ failed_job .save () # Ensure datetime is timezone-aware
211+ print (f"Exception occurred in get_llm: { e } " )
212+ traceback .print_exc ()
0 commit comments