Skip to content

Commit ada740b

Browse files
cbornetccurme
andauthored
community: Add ruff rule PGH003 (#30812)
See https://docs.astral.sh/ruff/rules/blanket-type-ignore/ --------- Co-authored-by: Chester Curme <[email protected]>
1 parent f005988 commit ada740b

File tree

148 files changed

+449
-420
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

148 files changed

+449
-420
lines changed

libs/community/langchain_community/agent_toolkits/sql/base.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ def create_sql_agent(
196196
]
197197
prompt = ChatPromptTemplate.from_messages(messages)
198198
agent = RunnableAgent(
199-
runnable=create_openai_functions_agent(llm, tools, prompt), # type: ignore
199+
runnable=create_openai_functions_agent(llm, tools, prompt), # type: ignore[arg-type]
200200
input_keys_arg=["input"],
201201
return_keys_arg=["output"],
202202
**kwargs,
@@ -211,9 +211,9 @@ def create_sql_agent(
211211
]
212212
prompt = ChatPromptTemplate.from_messages(messages)
213213
if agent_type == "openai-tools":
214-
runnable = create_openai_tools_agent(llm, tools, prompt) # type: ignore
214+
runnable = create_openai_tools_agent(llm, tools, prompt) # type: ignore[arg-type]
215215
else:
216-
runnable = create_tool_calling_agent(llm, tools, prompt) # type: ignore
216+
runnable = create_tool_calling_agent(llm, tools, prompt) # type: ignore[arg-type]
217217
agent = RunnableMultiActionAgent( # type: ignore[assignment]
218218
runnable=runnable,
219219
input_keys_arg=["input"],

libs/community/langchain_community/agents/openai_assistant/base.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ def _get_assistants_tool(
135135
Dict[str, Any]: A dictionary of tools that are converted into OpenAI tools.
136136
"""
137137
if _is_assistants_builtin_tool(tool):
138-
return tool # type: ignore
138+
return tool # type: ignore[return-value]
139139
else:
140140
return convert_to_openai_tool(tool)
141141

@@ -288,7 +288,7 @@ def create_assistant(
288288
assistant = client.beta.assistants.create(
289289
name=name,
290290
instructions=instructions,
291-
tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore
291+
tools=[_get_assistants_tool(tool) for tool in tools],
292292
tool_resources=tool_resources, # type: ignore[arg-type]
293293
model=model,
294294
extra_body=extra_body,
@@ -430,7 +430,7 @@ async def acreate_assistant(
430430
assistant = await async_client.beta.assistants.create(
431431
name=name,
432432
instructions=instructions,
433-
tools=openai_tools, # type: ignore
433+
tools=openai_tools,
434434
tool_resources=tool_resources, # type: ignore[arg-type]
435435
model=model,
436436
)

libs/community/langchain_community/cache.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ async def aclear(self, **kwargs: Any) -> None:
238238
Base = declarative_base()
239239

240240

241-
class FullLLMCache(Base): # type: ignore
241+
class FullLLMCache(Base): # type: ignore[misc,valid-type]
242242
"""SQLite table for full LLM Cache (all generations)."""
243243

244244
__tablename__ = "full_llm_cache"
@@ -261,7 +261,7 @@ def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
261261
"""Look up based on prompt and llm_string."""
262262
stmt = (
263263
select(self.cache_schema.response)
264-
.where(self.cache_schema.prompt == prompt) # type: ignore
264+
.where(self.cache_schema.prompt == prompt)
265265
.where(self.cache_schema.llm == llm_string)
266266
.order_by(self.cache_schema.idx)
267267
)
@@ -1531,7 +1531,7 @@ async def aclear(self, **kwargs: Any) -> None:
15311531
await self.table.aclear()
15321532

15331533

1534-
class FullMd5LLMCache(Base): # type: ignore
1534+
class FullMd5LLMCache(Base): # type: ignore[misc,valid-type]
15351535
"""SQLite table for full LLM Cache (all generations)."""
15361536

15371537
__tablename__ = "full_md5_llm_cache"
@@ -1583,7 +1583,7 @@ def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> N
15831583
def _delete_previous(self, session: Session, prompt: str, llm_string: str) -> None:
15841584
stmt = (
15851585
delete(self.cache_schema)
1586-
.where(self.cache_schema.prompt_md5 == self.get_md5(prompt)) # type: ignore
1586+
.where(self.cache_schema.prompt_md5 == self.get_md5(prompt))
15871587
.where(self.cache_schema.llm == llm_string)
15881588
.where(self.cache_schema.prompt == prompt)
15891589
)
@@ -1593,7 +1593,7 @@ def _search_rows(self, prompt: str, llm_string: str) -> Sequence[Row]:
15931593
prompt_pd5 = self.get_md5(prompt)
15941594
stmt = (
15951595
select(self.cache_schema.response)
1596-
.where(self.cache_schema.prompt_md5 == prompt_pd5) # type: ignore
1596+
.where(self.cache_schema.prompt_md5 == prompt_pd5)
15971597
.where(self.cache_schema.llm == llm_string)
15981598
.where(self.cache_schema.prompt == prompt)
15991599
.order_by(self.cache_schema.idx)
@@ -1796,7 +1796,7 @@ def __init__(self, awaitable: Awaitable[Any]):
17961796
def __await__(self) -> Generator:
17971797
if self.result is _unset:
17981798
self.result = yield from self.awaitable.__await__()
1799-
return self.result # type: ignore
1799+
return self.result # type: ignore[return-value]
18001800

18011801

18021802
def _reawaitable(func: Callable) -> Callable:

libs/community/langchain_community/callbacks/comet_ml_callback.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,7 @@ def _reset(
584584
)
585585
_custom_metrics = custom_metrics if custom_metrics else self.custom_metrics
586586

587-
self.__init__( # type: ignore
587+
self.__init__( # type: ignore[misc]
588588
task_type=_task_type,
589589
workspace=_workspace,
590590
project_name=_project_name,

libs/community/langchain_community/callbacks/wandb_callback.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -580,7 +580,7 @@ def flush_tracker(
580580
self.temp_dir.cleanup()
581581
self.reset_callback_meta()
582582
if reset:
583-
self.__init__( # type: ignore
583+
self.__init__( # type: ignore[misc]
584584
job_type=job_type if job_type else self.job_type,
585585
project=project if project else self.project,
586586
entity=entity if entity else self.entity,

libs/community/langchain_community/chains/ernie_functions/base.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -352,7 +352,7 @@ class Dog(BaseModel):
352352
class _OutputFormatter(BaseModel):
353353
"""Output formatter. Should always be used to format your response to the user.""" # noqa: E501
354354

355-
output: output_schema # type: ignore
355+
output: output_schema # type: ignore[valid-type]
356356

357357
function = _OutputFormatter
358358
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
@@ -537,7 +537,7 @@ class Dog(BaseModel):
537537
class _OutputFormatter(BaseModel):
538538
"""Output formatter. Should always be used to format your response to the user.""" # noqa: E501
539539

540-
output: output_schema # type: ignore
540+
output: output_schema # type: ignore[valid-type]
541541

542542
function = _OutputFormatter
543543
output_parser = output_parser or PydanticAttrOutputFunctionsParser(

libs/community/langchain_community/chains/graph_qa/cypher.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,7 @@ def from_llm(
316316
MessagesPlaceholder(variable_name="function_response"),
317317
]
318318
)
319-
qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore
319+
qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore[operator]
320320
except (NotImplementedError, AttributeError):
321321
raise ValueError("Provided LLM does not support native tools/functions")
322322
else:
@@ -404,15 +404,15 @@ def _call(
404404
intermediate_steps.append({"context": context})
405405
if self.use_function_response:
406406
function_response = get_function_response(question, context)
407-
final_result = self.qa_chain.invoke( # type: ignore
407+
final_result = self.qa_chain.invoke( # type: ignore[assignment]
408408
{"question": question, "function_response": function_response},
409409
)
410410
else:
411-
result = self.qa_chain.invoke( # type: ignore
411+
result = self.qa_chain.invoke(
412412
{"question": question, "context": context},
413413
callbacks=callbacks,
414414
)
415-
final_result = result[self.qa_chain.output_key] # type: ignore
415+
final_result = result[self.qa_chain.output_key] # type: ignore[union-attr]
416416

417417
chain_result: Dict[str, Any] = {self.output_key: final_result}
418418
if self.return_intermediate_steps:

libs/community/langchain_community/chains/graph_qa/memgraph.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -225,11 +225,11 @@ def from_llm(
225225
MessagesPlaceholder(variable_name="function_response"),
226226
]
227227
)
228-
qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore
228+
qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore[operator]
229229
except (NotImplementedError, AttributeError):
230230
raise ValueError("Provided LLM does not support native tools/functions")
231231
else:
232-
qa_chain = use_qa_llm_kwargs["prompt"] | qa_llm | StrOutputParser() # type: ignore
232+
qa_chain = use_qa_llm_kwargs["prompt"] | qa_llm | StrOutputParser()
233233

234234
prompt = use_cypher_llm_kwargs["prompt"]
235235
llm_to_use = cypher_llm if cypher_llm is not None else llm
@@ -300,11 +300,11 @@ def _call(
300300
intermediate_steps.append({"context": context})
301301
if self.use_function_response:
302302
function_response = get_function_response(question, context)
303-
result = self.qa_chain.invoke( # type: ignore
303+
result = self.qa_chain.invoke(
304304
{"question": question, "function_response": function_response},
305305
)
306306
else:
307-
result = self.qa_chain.invoke( # type: ignore
307+
result = self.qa_chain.invoke(
308308
{"question": question, "context": context},
309309
callbacks=callbacks,
310310
)

libs/community/langchain_community/chains/graph_qa/neptune_cypher.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,11 +67,11 @@ def extract_cypher(text: str) -> str:
6767

6868
def use_simple_prompt(llm: BaseLanguageModel) -> bool:
6969
"""Decides whether to use the simple prompt"""
70-
if llm._llm_type and "anthropic" in llm._llm_type: # type: ignore
70+
if llm._llm_type and "anthropic" in llm._llm_type: # type: ignore[attr-defined]
7171
return True
7272

7373
# Bedrock anthropic
74-
if hasattr(llm, "model_id") and "anthropic" in llm.model_id: # type: ignore
74+
if hasattr(llm, "model_id") and "anthropic" in llm.model_id:
7575
return True
7676

7777
return False

libs/community/langchain_community/chains/pebblo_retrieval/base.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -313,8 +313,12 @@ async def _aget_docs(
313313
)
314314

315315
@staticmethod
316-
def _get_app_details( # type: ignore
317-
app_name: str, owner: str, description: str, llm: BaseLanguageModel, **kwargs
316+
def _get_app_details(
317+
app_name: str,
318+
owner: str,
319+
description: str,
320+
llm: BaseLanguageModel,
321+
**kwargs: Any,
318322
) -> App:
319323
"""Fetch app details. Internal method.
320324
Returns:

0 commit comments

Comments
 (0)