@@ -78,26 +78,20 @@ An ideal test dataset should contain data points of high quality and diverse nat
7878
7979
8080``` python
81- from ragas.testset.generator import TestsetGenerator
82- from ragas.testset.evolutions import simple, reasoning, multi_context
81+ from ragas.testset import TestsetGenerator
8382from langchain_openai import ChatOpenAI, OpenAIEmbeddings
8483
8584TEST_SIZE = 25
8685
8786# generator with openai models
88- generator_llm = ChatOpenAI(model = " gpt-3.5-turbo-16k " )
89- critic_llm = ChatOpenAI(model = " gpt-4 " )
87+ generator_llm = ChatOpenAI(model = " gpt-4o-mini " )
88+ critic_llm = ChatOpenAI(model = " gpt-4o " )
9089embeddings = OpenAIEmbeddings()
9190
9291generator = TestsetGenerator.from_langchain(generator_llm, critic_llm, embeddings)
9392
94- # set question type distribution
95- distribution = {simple: 0.5 , reasoning: 0.25 , multi_context: 0.25 }
96-
9793# generate testset
98- testset = generator.generate_with_llamaindex_docs(
99- documents, test_size = TEST_SIZE , distributions = distribution
100- )
94+ testset = generator.generate_with_llamaindex_docs(documents, test_size = TEST_SIZE )
10195test_df = testset.to_pandas()
10296test_df.head()
10397```
@@ -123,8 +117,8 @@ Build your query engine.
123117
124118
125119``` python
126- from llama_index import VectorStoreIndex, ServiceContext
127- from llama_index.embeddings import OpenAIEmbedding
120+ from llama_index.core import VectorStoreIndex, ServiceContext
121+ from llama_index.embeddings.openai import OpenAIEmbedding
128122
129123
130124def build_query_engine (documents ):
@@ -144,7 +138,7 @@ If you check Phoenix, you should see embedding spans from when your corpus data
144138
145139
146140``` python
147- from phoenix.trace.dsl.helpers import SpanQuery
141+ from phoenix.trace.dsl import SpanQuery
148142
149143client = px.Client()
150144corpus_df = px.Client().query_spans(
@@ -240,7 +234,7 @@ Ragas uses LangChain to evaluate your LLM application data. Let's instrument Lan
240234
241235
242236``` python
243- from phoenix.trace .langchain import LangChainInstrumentor
237+ from openinference.instrumentation .langchain import LangChainInstrumentor
244238
245239LangChainInstrumentor().instrument()
246240```
0 commit comments