1717from openai import AsyncOpenAI
1818
1919from ragas import Dataset , experiment
20- from ragas .llms import instructor_llm_factory
20+ from ragas .llms import llm_factory
2121from ragas .metrics import DiscreteMetric
2222from ragas .metrics .discrete import discrete_metric
2323from ragas .metrics .result import MetricResult
@@ -173,8 +173,8 @@ async def main():
173173 # Initialize LLM client
174174 logger .info ("Initializing LLM client with model: gpt-4o-mini" )
175175 openai_client = AsyncOpenAI (api_key = os .environ .get ("OPENAI_API_KEY" ))
176- llm = instructor_llm_factory ( "openai" , model = "gpt-4o-mini" , client = openai_client )
177-
176+ llm = llm_factory ( "gpt-4o-mini" , client = openai_client )
177+
178178 # Run baseline evaluation
179179 logger .info ("Running baseline evaluation..." )
180180 results = await judge_experiment .arun (
@@ -200,8 +200,8 @@ async def main_v2():
200200 # Initialize LLM client
201201 logger .info ("Initializing LLM client with model: gpt-4o-mini" )
202202 openai_client = AsyncOpenAI (api_key = os .environ .get ("OPENAI_API_KEY" ))
203- llm = instructor_llm_factory ( "openai" , model = "gpt-4o-mini" , client = openai_client )
204-
203+ llm = llm_factory ( "gpt-4o-mini" , client = openai_client )
204+
205205 # Run v2 evaluation with improved prompt
206206 logger .info ("Running v2 evaluation with improved prompt..." )
207207 results = await judge_experiment .arun (
0 commit comments