1+ # genai/src/services/llm/llm_service.py
2+
3+ import os
4+ import logging
5+ from langchain_openai import ChatOpenAI
6+ from langchain_community .llms import FakeListLLM
7+ from langchain_core .language_models .base import BaseLanguageModel
8+
9+ def llm_factory () -> BaseLanguageModel :
10+ """
11+ Factory function to create and return an LLM instance based on the provider
12+ specified in the environment variables.
13+ """
14+ provider = os .getenv ("LLM_PROVIDER" , "dummy" ).lower ()
15+ logging .info (f"--- Creating LLM for provider: { provider } ---" )
16+
17+ if provider == "openai" :
18+ if not os .getenv ("OPENAI_API_KEY" ):
19+ raise ValueError ("OPENAI_API_KEY is not set for the 'openai' provider." )
20+ # Returns a high-quality chat model from OpenAI
21+ return ChatOpenAI (model = "gpt-4o-mini" , temperature = 0.7 )
22+
23+ elif provider == "dummy" :
24+ # This is a fake LLM for testing. It will cycle through these responses.
25+ responses = [
26+ "The first summary from the dummy LLM is about procedural languages." ,
27+ "The second summary is about object-oriented programming." ,
28+ "This is a fallback response." ,
29+ ]
30+ return FakeListLLM (responses = responses )
31+
32+ # In the future, you could add other providers like 'ollama' here
33+ # elif provider == "ollama":
34+ # return ChatOllama(model="llama3")
35+
36+ else :
37+ raise ValueError (f"Unsupported LLM provider: { provider } " )
38+
39+ def generate_text (prompt : str ) -> str :
40+ """
41+ Generates a text completion for a given prompt using the configured LLM.
42+ """
43+ # 1. Get the correct LLM instance from our factory
44+ llm = llm_factory ()
45+
46+ # 2. Invoke the LLM with the prompt
47+ # (The .invoke() method is standard across all LangChain models)
48+ response = llm .invoke (prompt )
49+
50+ # 3. The response object's structure can vary slightly by model.
51+ # For Chat models, the text is in the .content attribute.
52+ # For standard LLMs (like our FakeListLLM), it's the string itself.
53+ if hasattr (response , 'content' ):
54+ return response .content
55+ else :
56+ return response
0 commit comments