Skip to content

Commit beca788

Browse files
authored
Add ruff rules for builtins (#565)
1 parent 5fc9094 commit beca788

File tree

14 files changed

+44
-39
lines changed

14 files changed

+44
-39
lines changed

examples/notebooks/FLARE.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@
181181
"# Retrieve the text of a short story that will be indexed in the vector store\n",
182182
"# ruff: noqa: E501\n",
183183
"! curl https://raw.githubusercontent.com/CassioML/cassio-website/main/docs/frameworks/langchain/texts/amontillado.txt --output amontillado.txt\n",
184-
"input = \"amontillado.txt\""
184+
"text = \"amontillado.txt\""
185185
]
186186
},
187187
{
@@ -193,7 +193,7 @@
193193
"from langchain.document_loaders import TextLoader\n",
194194
"\n",
195195
"# Load your input and split it into documents\n",
196-
"loader = TextLoader(input)\n",
196+
"loader = TextLoader(text)\n",
197197
"documents = loader.load_and_split()"
198198
]
199199
},

libs/knowledge-graph/ragstack_knowledge_graph/render.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,7 @@ def print_graph_documents(
2020
for relation in doc.relationships:
2121
source = relation.source
2222
target = relation.target
23-
type = relation.type
24-
print(f"{_node_label(source)} -> {_node_label(target)}: {type}")
23+
print(f"{_node_label(source)} -> {_node_label(target)}: {relation.type}")
2524

2625

2726
def render_graph_documents(

libs/knowledge-graph/ragstack_knowledge_graph/traverse.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ class _Node(NamedTuple):
1515
class Node(_Node):
1616
__slots__ = ()
1717

18-
def __new__(cls, name, type, properties=None):
18+
def __new__(cls, name, type, properties=None): # noqa: A002
1919
if properties is None:
2020
properties = {}
2121
return super().__new__(cls, name, type, properties)

libs/knowledge-store/ragstack_knowledge_store/_mmr_helper.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -116,15 +116,15 @@ def _already_selected_embeddings(self) -> np.ndarray:
116116
selected = len(self.selected_ids)
117117
return np.vsplit(self.selected_embeddings, [selected])[0]
118118

119-
def _pop_candidate(self, id: str) -> np.ndarray:
119+
def _pop_candidate(self, candidate_id: str) -> np.ndarray:
120120
"""Pop the candidate with the given ID.
121121
122122
Returns:
123123
The embedding of the candidate.
124124
"""
125125
# Get the embedding for the id.
126-
index = self.candidate_id_to_index.pop(id)
127-
assert self.candidates[index].id == id
126+
index = self.candidate_id_to_index.pop(candidate_id)
127+
assert self.candidates[index].id == candidate_id
128128
embedding = self.candidate_embeddings[index].copy()
129129

130130
# Swap that index with the last index in the candidates and
@@ -199,10 +199,10 @@ def add_candidates(self, candidates: Dict[str, List[float]]):
199199
# And add them to the
200200
new_embeddings = np.ndarray((len(include_ids), self.dimensions))
201201
offset = self.candidate_embeddings.shape[0]
202-
for index, id in enumerate(include_ids):
203-
if id in include_ids:
204-
self.candidate_id_to_index[id] = offset + index
205-
embedding = candidates[id]
202+
for index, candidate_id in enumerate(include_ids):
203+
if candidate_id in include_ids:
204+
self.candidate_id_to_index[candidate_id] = offset + index
205+
embedding = candidates[candidate_id]
206206
new_embeddings[index] = embedding
207207

208208
# Compute the similarity to the query.
@@ -213,12 +213,12 @@ def add_candidates(self, candidates: Dict[str, List[float]]):
213213
redundancy = cosine_similarity(
214214
new_embeddings, self._already_selected_embeddings()
215215
)
216-
for index, id in enumerate(include_ids):
216+
for index, candidate_id in enumerate(include_ids):
217217
max_redundancy = 0.0
218218
if redundancy.shape[0] > 0:
219219
max_redundancy = redundancy[index].max()
220220
candidate = _Candidate(
221-
id=id,
221+
id=candidate_id,
222222
weighted_similarity=self.lambda_mult * similarity[index][0],
223223
weighted_redundancy=self.lambda_mult_complement * max_redundancy,
224224
)

libs/knowledge-store/ragstack_knowledge_store/graph_store.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -355,12 +355,14 @@ def add_nodes(rows):
355355
for row in rows:
356356
results[row.content_id] = _row_to_node(row)
357357

358-
for id in ids:
359-
if id not in results:
360-
results[id] = None
361-
cq.execute(self._query_by_id, parameters=(id,), callback=add_nodes)
358+
for node_id in ids:
359+
if node_id not in results:
360+
results[node_id] = None
361+
cq.execute(
362+
self._query_by_id, parameters=(node_id,), callback=add_nodes
363+
)
362364

363-
return [results[id] for id in ids]
365+
return [results[node_id] for node_id in ids]
364366

365367
def mmr_traversal_search(
366368
self,
@@ -414,7 +416,7 @@ def mmr_traversal_search(
414416
helper.add_candidates({row.content_id: row.text_embedding for row in fetched})
415417

416418
# Select the best item, K times.
417-
depths = {id: 0 for id in helper.candidate_ids()}
419+
depths = {candidate_id: 0 for candidate_id in helper.candidate_ids()}
418420
visited_tags = set()
419421
for _ in range(k):
420422
selected_id = helper.pop_best()
@@ -549,10 +551,10 @@ def visit_targets(d: int, targets: Sequence[NamedTuple]):
549551
new_nodes_at_next_depth.add(content_id)
550552

551553
if new_nodes_at_next_depth:
552-
for id in new_nodes_at_next_depth:
554+
for node_id in new_nodes_at_next_depth:
553555
cq.execute(
554556
self._query_ids_and_link_to_tags_by_id,
555-
parameters=(id,),
557+
parameters=(node_id,),
556558
callback=lambda rows, d=d: visit_nodes(d + 1, rows),
557559
)
558560

libs/langchain/ragstack_langchain/graph_store/extractors/gliner_link_extractor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def __init__(
3939
self._kind = kind
4040
self._extract_kwargs = extract_kwargs or {}
4141

42-
def extract_one(self, input: GLiNERInput) -> Set[Link]:
42+
def extract_one(self, input: GLiNERInput) -> Set[Link]: # noqa: A002
4343
return next(self.extract_many([input]))
4444

4545
def extract_many(

libs/langchain/ragstack_langchain/graph_store/extractors/hierarchy_link_extractor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def as_document_extractor(
3939

4040
def extract_one(
4141
self,
42-
input: HierarchyInput,
42+
input: HierarchyInput, # noqa: A002
4343
) -> Set[Link]:
4444
this_path = "/".join(input)
4545
parent_path = None

libs/langchain/ragstack_langchain/graph_store/extractors/html_link_extractor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def as_document_extractor(
102102

103103
def extract_one(
104104
self,
105-
input: HtmlInput,
105+
input: HtmlInput, # noqa: A002
106106
) -> Set[Link]:
107107
content = input.content
108108
if isinstance(content, str):

libs/langchain/ragstack_langchain/graph_store/extractors/keybert_link_extractor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def __init__(
3838
self._kind = kind
3939
self._extract_keywords_kwargs = extract_keywords_kwargs or {}
4040

41-
def extract_one(self, input: KeybertInput) -> Set[Link]:
41+
def extract_one(self, input: KeybertInput) -> Set[Link]: # noqa: A002
4242
keywords = self._kw_model.extract_keywords(
4343
input if isinstance(input, str) else input.page_content,
4444
**self._extract_keywords_kwargs,

libs/langchain/ragstack_langchain/graph_store/extractors/link_extractor.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ class LinkExtractor(ABC, Generic[InputT]):
1515
"""Interface for extracting links (incoming, outgoing, bidirectional)."""
1616

1717
@abstractmethod
18-
def extract_one(self, input: InputT) -> set[Link]:
18+
def extract_one(self, input: InputT) -> set[Link]: # noqa: A002
1919
"""Add edges from each `input` to the corresponding documents.
2020
2121
Args:
@@ -34,5 +34,4 @@ def extract_many(self, inputs: Iterable[InputT]) -> Iterable[Set[Link]]:
3434
Returns:
3535
Iterable over the set of links extracted from the input.
3636
"""
37-
for input in inputs:
38-
yield self.extract_one(input)
37+
return map(self.extract_one, inputs)

0 commit comments

Comments
 (0)