Skip to content

Commit b34ec7f

Browse files
authored
Fix few errors and dump version 0.3.19 (#1155)
* fix typo in vectordb.py * pop thinking before initialize LLM * dump version 0.3.19 * include query at the metric input for metrics like ragas answer correctness * skip the openai_llm_structured which needs real OpenAI API * upgrade vllm-mock to 0.0.3
1 parent dfa38b9 commit b34ec7f

File tree

8 files changed

+52
-40
lines changed

8 files changed

+52
-40
lines changed

autorag/autorag/VERSION

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
0.3.18
1+
0.3.19

autorag/autorag/nodes/generator/run.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,12 @@ def run_generator_node(
6262

6363
# make rows to metric_inputs
6464
generation_gt = to_list(qa_data["generation_gt"].tolist())
65+
queries = to_list(qa_data["query"].tolist())
6566

66-
metric_inputs = [MetricInput(generation_gt=gen_gt) for gen_gt in generation_gt]
67+
metric_inputs = [
68+
MetricInput(generation_gt=gen_gt, query=query)
69+
for gen_gt, query in zip(generation_gt, queries)
70+
]
6771

6872
metric_names, metric_params = cast_metrics(strategies.get("metrics"))
6973
if metric_names is None or len(metric_names) <= 0:

autorag/autorag/nodes/generator/vllm.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ def __init__(self, project_dir: str, llm: str, **kwargs):
2626
sampling_params_init_params = pop_params(
2727
SamplingParams.from_optional, input_kwargs
2828
)
29+
input_kwargs.pop("thinking", None)
2930
self.vllm_model = LLM(model, **input_kwargs)
3031

3132
# delete not sampling param keys in the kwargs
@@ -47,7 +48,7 @@ def __del__(self):
4748

4849
destroy_model_parallel()
4950
destroy_distributed_environment()
50-
if hasattr(self.vllm_model.llm_engine, 'model_executor'):
51+
if hasattr(self.vllm_model.llm_engine, "model_executor"):
5152
del self.vllm_model.llm_engine.model_executor
5253
del self.vllm_model
5354
with contextlib.suppress(AssertionError):

autorag/autorag/nodes/promptmaker/run.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from autorag.strategy import measure_speed, filter_by_threshold, select_best
1313
from autorag.support import get_support_modules
1414
from autorag.utils import validate_qa_dataset
15-
from autorag.utils.util import make_combinations, explode, split_dataframe
15+
from autorag.utils.util import make_combinations, explode, split_dataframe, to_list
1616

1717

1818
def run_prompt_maker_node(
@@ -136,8 +136,11 @@ def run_prompt_maker_node(
136136
validate_qa_dataset(qa_data)
137137
generation_gt = qa_data["generation_gt"].tolist()
138138
generation_gt = list(map(lambda x: x.tolist(), generation_gt))
139-
140-
metric_inputs = [MetricInput(generation_gt=gen_gt) for gen_gt in generation_gt]
139+
queries = to_list(qa_data["query"].tolist())
140+
metric_inputs = [
141+
MetricInput(generation_gt=gen_gt, query=query)
142+
for gen_gt, query in zip(generation_gt, queries)
143+
]
141144

142145
all_prompts = []
143146
for result in results:

autorag/autorag/nodes/semanticretrieval/vectordb.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,7 @@ def vectordb_ingest_huggingface(
286286
new_contents,
287287
batch_size=embedding_batch_size,
288288
normalize_embeddings=vectordb.embedding.normalize,
289-
show_progrss_bar=True,
289+
show_progress_bar=True,
290290
)
291291
vectordb.add_embedding(new_ids, embeddings)
292292
logger.info("Finish embedding & ingesting corpus data with huggingface model.")

autorag/pyproject.toml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,9 +51,9 @@ dependencies = [
5151
"fastapi>=0.115.13",
5252
"banks>=2.1.2",
5353
"grpcio>=1.66.2,<1.68.0",
54-
"grpcio-health-checking>=1.66.2,<1.68.0",
55-
"grpcio-status>=1.66.2,<1.68.0",
56-
"grpcio-tools>=1.66.2,<1.68.0",
54+
"grpcio-health-checking>=1.66.2,<1.68.0",
55+
"grpcio-status>=1.66.2,<1.68.0",
56+
"grpcio-tools>=1.66.2,<1.68.0",
5757
"datasets>=3.5.1",
5858
"pyarrow>=20.0.0",
5959

@@ -134,7 +134,7 @@ dev = [
134134
"pytest-mock",
135135
"aioresponses",
136136
"asyncstdlib",
137-
"vllm-mock>=0.0.2",
137+
"vllm-mock>=0.0.3",
138138
]
139139

140140
[project.optional-dependencies]

autorag/uv.lock

Lines changed: 29 additions & 29 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

tests/autorag/nodes/generator/test_openai.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,10 @@ async def mock_gen_gt_response(*args, **kwargs) -> ParsedChatCompletion[TestResp
170170
)
171171

172172

173+
@pytest.mark.skipif(
174+
is_github_action(),
175+
reason="Skipping this test on GitHub Actions because it uses the real OpenAI API.",
176+
)
173177
@patch.object(
174178
openai.resources.chat.completions.AsyncCompletions,
175179
"parse",

0 commit comments

Comments
 (0)