Skip to content

Commit bac6b5f

Browse files
authored
chore: bump deps (#343)
Fixes #342
1 parent 2c78c27 commit bac6b5f

File tree

26 files changed

+2584
-1988
lines changed

26 files changed

+2584
-1988
lines changed

libs/community/langchain_community/chains/llm_requests.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def _call(
8686
url = inputs[self.input_key]
8787
res = self.requests_wrapper.get(url)
8888
# extract the text from the html
89-
soup = BeautifulSoup(res, "html.parser")
89+
soup = BeautifulSoup(res, "html.parser") # type: ignore[arg-type]
9090
other_keys[self.requests_key] = soup.get_text()[: self.text_length]
9191
result = self.llm_chain.predict(
9292
callbacks=_run_manager.get_child(), **other_keys

libs/community/langchain_community/chat_loaders/telegram.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def _load_single_chat_session_html(file_path: str) -> ChatSession:
6060
results: List[Union[HumanMessage, AIMessage]] = []
6161
previous_sender = None
6262
for message in soup.select(".message.default"):
63-
timestamp = message.select_one(".pull_right.date.details")["title"]
63+
timestamp = message.select_one(".pull_right.date.details")["title"] # type: ignore[index]
6464
from_name_element = message.select_one(".from_name")
6565
if from_name_element is None and previous_sender is None:
6666
logger.debug("from_name not found in message")
@@ -69,7 +69,7 @@ def _load_single_chat_session_html(file_path: str) -> ChatSession:
6969
from_name = previous_sender
7070
else:
7171
from_name = from_name_element.text.strip()
72-
text = message.select_one(".text").text.strip()
72+
text = message.select_one(".text").text.strip() # type: ignore[union-attr]
7373
results.append(
7474
HumanMessage(
7575
content=text,

libs/community/langchain_community/chat_models/solar.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from langchain_community.llms.solar import SOLAR_SERVICE_URL_BASE, SolarCommon
1111

1212

13-
@deprecated( # type: ignore[arg-type]
13+
@deprecated(
1414
since="0.0.34", removal="1.0", alternative_import="langchain_upstage.ChatUpstage"
1515
)
1616
class SolarChat(SolarCommon, ChatOpenAI):

libs/community/langchain_community/document_loaders/arcgis_loader.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def __init__(
4646
self.BEAUTIFULSOUP = BeautifulSoup
4747
except ImportError:
4848
warnings.warn("BeautifulSoup not found. HTML will not be parsed.")
49-
self.BEAUTIFULSOUP = None
49+
self.BEAUTIFULSOUP = None # type: ignore[assignment]
5050

5151
self.gis = gis or arcgis.gis.GIS()
5252

@@ -93,7 +93,7 @@ def _get_layer_properties(self, lyr_desc: Optional[str] = None) -> dict:
9393
if lyr_desc is None:
9494
# retrieve description from the FeatureLayer if not provided
9595
try:
96-
if self.BEAUTIFULSOUP:
96+
if self.BEAUTIFULSOUP: # type: ignore[truthy-function]
9797
lyr_desc = self.BEAUTIFULSOUP(props["description"]).text
9898
else:
9999
lyr_desc = props["description"]
@@ -109,7 +109,7 @@ def _get_layer_properties(self, lyr_desc: Optional[str] = None) -> dict:
109109
raw_desc = item.description
110110
except AttributeError:
111111
raw_desc = item.properties.description
112-
if self.BEAUTIFULSOUP:
112+
if self.BEAUTIFULSOUP: # type: ignore # noqa: PGH003
113113
item_desc = self.BEAUTIFULSOUP(raw_desc).text
114114
else:
115115
item_desc = raw_desc

libs/community/langchain_community/document_loaders/blackboard.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -187,14 +187,14 @@ def _get_attachments(self, soup: Any) -> List[str]:
187187
# Get all attachments
188188
attachments = []
189189
attachment: Tag
190-
for attachment in content_list.find_all("ul", {"class": "attachments"}):
190+
for attachment in content_list.find_all("ul", {"class": "attachments"}): # type: ignore[assignment]
191191
link: Tag
192-
for link in attachment.find_all("a"):
192+
for link in attachment.find_all("a"): # type: ignore[assignment]
193193
href = link.get("href")
194194
# Only add if href is not None and does not start with #
195-
if href is not None and not href.startswith("#"):
195+
if href is not None and not href.startswith("#"): # type: ignore[union-attr]
196196
attachments.append(href)
197-
return attachments
197+
return attachments # type: ignore[return-value]
198198

199199
def _download_attachments(self, attachments: List[str]) -> None:
200200
"""Download all attachments.

libs/community/langchain_community/document_loaders/chm.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -98,11 +98,11 @@ def index(self) -> List[Dict[str, str]]:
9898
# <param name="Local" value="<...>">
9999
name = ""
100100
local = ""
101-
for param in obj.find_all("param"):
102-
if param["name"] == "Name":
103-
name = param["value"]
104-
if param["name"] == "Local":
105-
local = param["value"]
101+
for param in obj.find_all("param"): # type: ignore[union-attr]
102+
if param["name"] == "Name": # type: ignore[index]
103+
name = param["value"] # type: ignore[index]
104+
if param["name"] == "Local": # type: ignore[index]
105+
local = param["value"] # type: ignore[index]
106106
if not name or not local:
107107
continue
108108

libs/community/langchain_community/document_loaders/gitbook.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -149,9 +149,9 @@ def _extract_sitemap_urls(self, soup: BeautifulSoup) -> List[str]:
149149
sitemap_tags = soup.find_all("sitemap")
150150
urls: List[str] = []
151151
for sitemap in sitemap_tags:
152-
loc = sitemap.find("loc")
153-
if loc and loc.text:
154-
self._safe_add_url(urls, loc.text, "sitemap")
152+
loc = sitemap.find("loc") # type: ignore[union-attr]
153+
if loc and loc.text: # type: ignore # noqa: PGH003
154+
self._safe_add_url(urls, loc.text, "sitemap") # type: ignore[union-attr]
155155
return urls
156156

157157
def _process_sitemap(

libs/community/langchain_community/document_loaders/kinetica_loader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def lazy_load(self) -> Iterator[Document]:
8686
query_result = self._execute_query()
8787
if isinstance(query_result, Exception):
8888
print(f"An error occurred during the query: {query_result}") # noqa: T201
89-
return [] # type: ignore[return-value]
89+
return []
9090
page_content_columns, metadata_columns = self._get_columns(query_result)
9191
if "*" in page_content_columns:
9292
page_content_columns = list(query_result[0].keys())

libs/community/langchain_community/document_loaders/parsers/grobid.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,9 @@ def process_xml(
5555
title = "No title found"
5656
chunks = []
5757
for section in sections:
58-
sect = section.find("head")
58+
sect = section.find("head") # type: ignore[union-attr]
5959
if sect is not None:
60-
for i, paragraph in enumerate(section.find_all("p")):
60+
for i, paragraph in enumerate(section.find_all("p")): # type: ignore[union-attr]
6161
chunk_bboxes = []
6262
paragraph_text = []
6363
for i, sentence in enumerate(paragraph.find_all("s")):
@@ -82,8 +82,8 @@ def process_xml(
8282
"text": sentence.text,
8383
"para": str(i),
8484
"bboxes": [sbboxes],
85-
"section_title": sect.text,
86-
"section_number": sect.get("n"),
85+
"section_title": sect.text, # type: ignore[union-attr]
86+
"section_number": sect.get("n"), # type: ignore[union-attr]
8787
"pages": (fpage, lpage),
8888
}
8989
chunks.append(sentence_dict)
@@ -96,8 +96,8 @@ def process_xml(
9696
"text": "".join(paragraph_text),
9797
"para": str(i),
9898
"bboxes": chunk_bboxes,
99-
"section_title": sect.text,
100-
"section_number": sect.get("n"),
99+
"section_title": sect.text, # type: ignore[union-attr]
100+
"section_number": sect.get("n"), # type: ignore[union-attr]
101101
"pages": (fpage, lpage),
102102
}
103103
chunks.append(paragraph_dict)

libs/community/langchain_community/document_loaders/readthedocs.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -102,13 +102,13 @@ def _clean_data(self, data: str) -> str:
102102

103103
# reversed order. check the custom one first
104104
for tag, attrs in html_tags[::-1]:
105-
element = soup.find(tag, attrs)
105+
element = soup.find(tag, attrs) # type: ignore[arg-type]
106106
# if found, break
107107
if element is not None:
108108
break
109109

110-
if element is not None and _get_link_ratio(element) <= self.exclude_links_ratio:
111-
text = _get_clean_text(element)
110+
if element is not None and _get_link_ratio(element) <= self.exclude_links_ratio: # type: ignore[arg-type]
111+
text = _get_clean_text(element) # type: ignore[arg-type]
112112
else:
113113
text = ""
114114
# trim empty lines
@@ -174,9 +174,9 @@ def _get_link_ratio(section: Tag) -> float:
174174
return 0
175175

176176
link_text = "".join(
177-
str(string.string.strip())
177+
str(string.string.strip()) # type: ignore[union-attr]
178178
for link in links
179-
for string in link.strings
179+
for string in link.strings # type: ignore[union-attr]
180180
if string
181181
)
182182
return len(link_text) / len(total_text)

0 commit comments

Comments
 (0)