Skip to content

Commit 2d4cb3a

Browse files
chore(update): Update the License Year + Fixes mypy errors (#18)
* chore: mypy initial fixes * chore: poetry config change + spanner import type ignore * chore: isort fix * chore(update): Update the License Year * chore(update):updates the workers for integratrion test * chore(update):updates the workers for integratrion test * chore(update): update to auto workers * chore(test):fix test failures * chore(update): update to auto workers * chore(update): update coverage for workers to run parallely * chore(update):fixing the workers issue * chore(test):update command to test * chore(revert):revert the changes done for auto workers * chore(fix): fix the MALFORMED_FUNCTION_CALL in the tests results * chore(test):update the test case * chore(fix): fix my-py errors * chore(fix): fix the lint * chore(fix):fix the MALFORMED_FUNCTION_CALL by Gemini * chore(fix):fix old changes replicas * chore(fix):fix lint errors --------- Co-authored-by: gRedHeadphone <[email protected]>
1 parent 070ab23 commit 2d4cb3a

17 files changed

+52
-39
lines changed

.coveragerc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
[run]
2+
parallel = true
23
branch = true
34
omit =
45
*/__init__.py

CHANGELOG.md

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1 @@
1-
# Changelog
2-
3-
## 0.1.0 (2025-07-01)
1+
# Changelog

integration.cloudbuild.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ steps:
2626
- id: Run integration tests
2727
name: 'python:${_VERSION}'
2828
entrypoint: python
29-
args: ["-m", "pytest", "-n", "3", "--cov=llama_index_spanner", "--cov-config=.coveragerc", "tests/"]
29+
args: ["-m", "pytest", "-n", "5", "--cov=llama_index_spanner", "--cov-config=.coveragerc", "tests/"]
3030
secretEnv: ["GOOGLE_API_KEY"]
3131
env:
3232
- "PROJECT_ID=$PROJECT_ID"

pyproject.toml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,8 @@ authors = [
1010
]
1111
dependencies = [
1212
"google-cloud-spanner>=3.41.0, <4.0.0",
13-
"llama-index-core==0.12.41"
13+
"llama-index-core>=0.12.48, <1.0.0",
14+
"llama-index>=0.12.41, <1.0.0"
1415
]
1516
classifiers = [
1617
"Intended Audience :: Developers",
@@ -46,8 +47,8 @@ test = [
4647
"mypy==1.16.0",
4748
"pytest==8.3.3",
4849
"pytest-xdist==3.6.1",
49-
"pytest-asyncio==0.24.0",
5050
"pytest-cov==5.0.0",
51+
"pytest-asyncio==0.24.0",
5152
"llama-index-readers-wikipedia==0.3.0",
5253
"llama-index-embeddings-google-genai==0.1.0",
5354
"llama-index-llms-google-genai==0.1.4",

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,4 @@
1414

1515
google-cloud-spanner==3.51.0
1616
llama-index-core==0.12.48
17-
llama-index==0.12.41
17+
llama-index==0.12.48

src/llama_index_spanner/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2024 Google LLC
1+
# Copyright 2025 Google LLC
22

33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.

src/llama_index_spanner/graph_retriever.py

Lines changed: 24 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2024 Google LLC
1+
# Copyright 2025 Google LLC
22

33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@
2323
from llama_index.core.prompts import PromptType
2424
from llama_index.core.retrievers import CustomPGRetriever, VectorContextRetriever
2525
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
26-
from llama_index.core.vector_stores.types import VectorStore
26+
from llama_index.core.vector_stores.types import BasePydanticVectorStore
2727
from pydantic import BaseModel
2828

2929
from .graph_utils import extract_gql, fix_gql_syntax
@@ -78,7 +78,6 @@ def __init__(
7878
graph_store: SpannerPropertyGraphStore,
7979
llm: Optional[LLM] = None,
8080
text_to_gql_prompt: Optional[PromptTemplate] = None,
81-
response_template: Optional[str] = None,
8281
gql_validator: Optional[Callable[[str], bool]] = None,
8382
include_raw_response_as_metadata: Optional[bool] = False,
8483
max_gql_fix_retries: Optional[int] = 1,
@@ -93,7 +92,6 @@ def __init__(
9392
graph_store: The SpannerPropertyGraphStore to query.
9493
llm: The LLM to use.
9594
text_to_gql_prompt: The prompt to use for generating the GQL query.
96-
response_template: The template to use for formatting the response.
9795
gql_validator: A function to validate the GQL query.
9896
include_raw_response_as_metadata: If true, includes the raw response as
9997
metadata.
@@ -179,7 +177,7 @@ def calculate_score_for_predicted_response(
179177
gql_response_score = self.llm.predict(
180178
GQL_RESPONSE_SCORING_TEMPLATE, question=question, retrieved_context=response
181179
)
182-
return gql_response_score
180+
return float(gql_response_score.strip())
183181

184182
def retrieve_from_graph(
185183
self, query_bundle: schema.QueryBundle
@@ -208,16 +206,19 @@ def retrieve_from_graph(
208206

209207
# 2. Verify gql query using LLM
210208
if self.verify_gql:
211-
verify_response = self.llm.predict(
212-
GQL_VERIFY_PROMPT,
213-
question=question,
214-
generated_gql=generated_gql,
215-
schema=schema_str,
216-
format_instructions=GQL_VERIFY_PROMPT.output_parser.format_string,
217-
)
209+
if GQL_VERIFY_PROMPT.output_parser:
210+
verify_response = self.llm.predict(
211+
GQL_VERIFY_PROMPT,
212+
question=question,
213+
generated_gql=generated_gql,
214+
schema=schema_str,
215+
format_instructions=GQL_VERIFY_PROMPT.output_parser.format,
216+
)
218217

219-
output_parser = verify_gql_output_parser.parse(verify_response)
220-
verified_gql = fix_gql_syntax(output_parser.verified_gql)
218+
output_parser = verify_gql_output_parser.parse(verify_response)
219+
verified_gql = fix_gql_syntax(output_parser.verified_gql)
220+
else:
221+
raise ValueError("GQL_VERIFY_PROMPT is missing its output_parser.")
221222
else:
222223
verified_gql = generated_gql
223224

@@ -259,7 +260,7 @@ def retrieve_from_graph(
259260
async def aretrieve_from_graph(
260261
self, query_bundle: QueryBundle
261262
) -> List[NodeWithScore]:
262-
return await self.retrieve_from_graph(query_bundle)
263+
return self.retrieve_from_graph(query_bundle)
263264

264265

265266
class SpannerGraphCustomRetriever(CustomPGRetriever):
@@ -269,13 +270,12 @@ def init(
269270
self,
270271
## vector context retriever params
271272
embed_model: Optional[BaseEmbedding] = None,
272-
vector_store: Optional[VectorStore] = None,
273+
vector_store: Optional[BasePydanticVectorStore] = None,
273274
similarity_top_k: int = 4,
274275
path_depth: int = 2,
275276
## text-to-gql params
276277
llm_text_to_gql: Optional[LLM] = None,
277278
text_to_gql_prompt: Optional[PromptTemplate] = None,
278-
response_template: Optional[str] = None,
279279
gql_validator: Optional[Callable[[str], bool]] = None,
280280
include_raw_response_as_metadata: Optional[bool] = False,
281281
max_gql_fix_retries: Optional[int] = 1,
@@ -297,7 +297,6 @@ def init(
297297
path_depth: The depth of the path to retrieve.
298298
llm_text_to_gql: The LLM to use for text to GQL conversion.
299299
text_to_gql_prompt: The prompt to use for generating the GQL query.
300-
response_template: The template to use for formatting the response.
301300
gql_validator: A function to validate the GQL query.
302301
include_raw_response_as_metadata: Whether to include the raw response as
303302
metadata.
@@ -311,6 +310,12 @@ def init(
311310
llmranker_top_n: The number of top nodes to return.
312311
**kwargs: Additional keyword arguments.
313312
"""
313+
314+
if not isinstance(self._graph_store, SpannerPropertyGraphStore):
315+
raise TypeError(
316+
"SpannerGraphCustomRetriever requires a SpannerPropertyGraphStore."
317+
)
318+
314319
self.llm = llm_text_to_gql or Settings.llm
315320
if self.llm is None:
316321
raise ValueError("`llm for Text to GQL` cannot be none")
@@ -328,7 +333,6 @@ def init(
328333
graph_store=self._graph_store,
329334
llm=llm_text_to_gql,
330335
text_to_gql_prompt=text_to_gql_prompt,
331-
response_template=response_template,
332336
gql_validator=gql_validator,
333337
include_raw_response_as_metadata=include_raw_response_as_metadata,
334338
max_gql_fix_retries=max_gql_fix_retries,
@@ -342,7 +346,7 @@ def init(
342346
top_n=llmranker_top_n,
343347
)
344348

345-
def generate_synthesized_response(self, question: str, response: str) -> float:
349+
def generate_synthesized_response(self, question: str, response: str) -> str:
346350
gql_synthesized_response = self.llm.predict(
347351
GQL_SYNTHESIS_RESPONSE_TEMPLATE,
348352
question=question,

src/llama_index_spanner/graph_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2024 Google LLC
1+
# Copyright 2025 Google LLC
22

33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.

src/llama_index_spanner/prompts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2024 Google LLC
1+
# Copyright 2025 Google LLC
22

33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.

src/llama_index_spanner/property_graph_store.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2024 Google LLC
1+
# Copyright 2025 Google LLC
22

33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.

0 commit comments

Comments
 (0)