Skip to content

Commit 479b6fd

Browse files
authored
langchain-fireworks[patch]: Add ruff bandit rules to linter (#31796)
- Add ruff bandit rules - Address a s113 error
1 parent 625f7c3 commit 479b6fd

File tree

5 files changed

+1465
-1442
lines changed

5 files changed

+1465
-1442
lines changed

libs/partners/fireworks/langchain_fireworks/chat_models.py

Lines changed: 21 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ def lc_secrets(self) -> dict[str, str]:
288288

289289
@classmethod
290290
def get_lc_namespace(cls) -> list[str]:
291-
"""Get the namespace of the langchain object."""
291+
"""Get the namespace of the LangChain object."""
292292
return ["langchain", "chat_models", "fireworks"]
293293

294294
@property
@@ -327,7 +327,7 @@ def is_lc_serializable(cls) -> bool:
327327
)
328328
"""Fireworks API key.
329329
330-
Automatically read from env variable `FIREWORKS_API_KEY` if not provided.
330+
Automatically read from env variable ``FIREWORKS_API_KEY`` if not provided.
331331
"""
332332

333333
fireworks_api_base: Optional[str] = Field(
@@ -338,8 +338,8 @@ def is_lc_serializable(cls) -> bool:
338338
request_timeout: Union[float, tuple[float, float], Any, None] = Field(
339339
default=None, alias="timeout"
340340
)
341-
"""Timeout for requests to Fireworks completion API. Can be float, httpx.Timeout or
342-
None."""
341+
"""Timeout for requests to Fireworks completion API. Can be ``float``,
342+
``httpx.Timeout`` or ``None``."""
343343
streaming: bool = False
344344
"""Whether to stream the results or not."""
345345
n: int = 1
@@ -636,8 +636,8 @@ def bind_functions(
636636
637637
Assumes model is compatible with Fireworks function-calling API.
638638
639-
NOTE: Using bind_tools is recommended instead, as the `functions` and
640-
`function_call` request parameters are officially marked as deprecated by
639+
NOTE: Using bind_tools is recommended instead, as the ``functions`` and
640+
``function_call`` request parameters are officially marked as deprecated by
641641
Fireworks.
642642
643643
Args:
@@ -699,10 +699,10 @@ def bind_tools(
699699
:meth:`langchain_core.utils.function_calling.convert_to_openai_tool`.
700700
tool_choice: Which tool to require the model to call.
701701
Must be the name of the single provided function,
702-
"auto" to automatically determine which function to call
703-
with the option to not call any function, "any" to enforce that some
702+
``'auto'`` to automatically determine which function to call
703+
with the option to not call any function, ``'any'`` to enforce that some
704704
function is called, or a dict of the form:
705-
{"type": "function", "function": {"name": <<tool_name>>}}.
705+
``{"type": "function", "function": {"name": <<tool_name>>}}``.
706706
**kwargs: Any additional parameters to pass to
707707
:meth:`~langchain_fireworks.chat_models.ChatFireworks.bind`
708708
"""
@@ -760,11 +760,11 @@ def with_structured_output(
760760
761761
method: The method for steering model generation, one of:
762762
763-
- "function_calling":
763+
- ``'function_calling'``:
764764
Uses Fireworks's `tool-calling features <https://docs.fireworks.ai/guides/function-calling>`_.
765-
- "json_schema":
765+
- ``'json_schema'``:
766766
Uses Fireworks's `structured output feature <https://docs.fireworks.ai/structured-responses/structured-response-formatting>`_.
767-
- "json_mode":
767+
- ``'json_mode'``:
768768
Uses Fireworks's `JSON mode feature <https://docs.fireworks.ai/structured-responses/structured-response-formatting>`_.
769769
770770
.. versionchanged:: 0.2.8
@@ -793,6 +793,7 @@ def with_structured_output(
793793
- ``"parsing_error"``: Optional[BaseException]
794794
795795
Example: schema=Pydantic class, method="function_calling", include_raw=False:
796+
796797
.. code-block:: python
797798
798799
from typing import Optional
@@ -826,6 +827,7 @@ class AnswerWithJustification(BaseModel):
826827
# )
827828
828829
Example: schema=Pydantic class, method="function_calling", include_raw=True:
830+
829831
.. code-block:: python
830832
831833
from langchain_fireworks import ChatFireworks
@@ -854,6 +856,7 @@ class AnswerWithJustification(BaseModel):
854856
# }
855857
856858
Example: schema=TypedDict class, method="function_calling", include_raw=False:
859+
857860
.. code-block:: python
858861
859862
# IMPORTANT: If you are using Python <=3.8, you need to import Annotated
@@ -884,6 +887,7 @@ class AnswerWithJustification(TypedDict):
884887
# }
885888
886889
Example: schema=OpenAI function schema, method="function_calling", include_raw=False:
890+
887891
.. code-block:: python
888892
889893
from langchain_fireworks import ChatFireworks
@@ -897,9 +901,9 @@ class AnswerWithJustification(TypedDict):
897901
'answer': {'type': 'string'},
898902
'justification': {'description': 'A justification for the answer.', 'type': 'string'}
899903
},
900-
'required': ['answer']
901-
}
902-
}
904+
'required': ['answer']
905+
}
906+
}
903907
904908
llm = ChatFireworks(model="accounts/fireworks/models/firefunction-v1", temperature=0)
905909
structured_llm = llm.with_structured_output(oai_schema)
@@ -913,6 +917,7 @@ class AnswerWithJustification(TypedDict):
913917
# }
914918
915919
Example: schema=Pydantic class, method="json_mode", include_raw=True:
920+
916921
.. code-block::
917922
918923
from langchain_fireworks import ChatFireworks
@@ -941,6 +946,7 @@ class AnswerWithJustification(BaseModel):
941946
# }
942947
943948
Example: schema=None, method="json_mode", include_raw=True:
949+
944950
.. code-block::
945951
946952
structured_llm = llm.with_structured_output(method="json_mode", include_raw=True)

libs/partners/fireworks/langchain_fireworks/embeddings.py

Lines changed: 38 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -10,60 +10,63 @@
1010
class FireworksEmbeddings(BaseModel, Embeddings):
1111
"""Fireworks embedding model integration.
1212
13-
Setup:
14-
Install ``langchain_fireworks`` and set environment variable
15-
``FIREWORKS_API_KEY``.
13+
Setup:
1614
17-
.. code-block:: bash
15+
Install ``langchain_fireworks`` and set environment variable
16+
``FIREWORKS_API_KEY``.
1817
19-
pip install -U langchain_fireworks
20-
export FIREWORKS_API_KEY="your-api-key"
18+
.. code-block:: bash
2119
22-
Key init args — completion params:
23-
model: str
24-
Name of Fireworks model to use.
20+
pip install -U langchain_fireworks
21+
export FIREWORKS_API_KEY="your-api-key"
22+
23+
Key init args — completion params:
24+
model: str
25+
Name of Fireworks model to use.
2526
2627
Key init args — client params:
2728
fireworks_api_key: SecretStr
2829
Fireworks API key.
2930
30-
See full list of supported init args and their descriptions in the params section.
31+
See full list of supported init args and their descriptions in the params section.
32+
33+
Instantiate:
34+
35+
.. code-block:: python
3136
32-
Instantiate:
33-
.. code-block:: python
37+
from langchain_fireworks import FireworksEmbeddings
3438
35-
from langchain_fireworks import FireworksEmbeddings
39+
model = FireworksEmbeddings(
40+
model='nomic-ai/nomic-embed-text-v1.5'
41+
# Use FIREWORKS_API_KEY env var or pass it in directly
42+
# fireworks_api_key="..."
43+
)
3644
37-
model = FireworksEmbeddings(
38-
model='nomic-ai/nomic-embed-text-v1.5'
39-
# Use FIREWORKS_API_KEY env var or pass it in directly
40-
# fireworks_api_key="..."
41-
)
45+
Embed multiple texts:
4246
43-
Embed multiple texts:
44-
.. code-block:: python
47+
.. code-block:: python
4548
46-
vectors = embeddings.embed_documents(['hello', 'goodbye'])
47-
# Showing only the first 3 coordinates
48-
print(len(vectors))
49-
print(vectors[0][:3])
49+
vectors = embeddings.embed_documents(['hello', 'goodbye'])
50+
# Showing only the first 3 coordinates
51+
print(len(vectors))
52+
print(vectors[0][:3])
5053
51-
.. code-block:: python
54+
.. code-block:: python
5255
53-
2
54-
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
56+
2
57+
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
5558
59+
Embed single text:
5660
57-
Embed single text:
58-
.. code-block:: python
61+
.. code-block:: python
5962
60-
input_text = "The meaning of life is 42"
61-
vector = embeddings.embed_query('hello')
62-
print(vector[:3])
63+
input_text = "The meaning of life is 42"
64+
vector = embeddings.embed_query('hello')
65+
print(vector[:3])
6366
64-
.. code-block:: python
67+
.. code-block:: python
6568
66-
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
69+
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
6770
"""
6871

6972
client: OpenAI = Field(default=None, exclude=True) # type: ignore[assignment] # :meta private:
@@ -76,7 +79,7 @@ class FireworksEmbeddings(BaseModel, Embeddings):
7679
)
7780
"""Fireworks API key.
7881
79-
Automatically read from env variable `FIREWORKS_API_KEY` if not provided.
82+
Automatically read from env variable ``FIREWORKS_API_KEY`` if not provided.
8083
"""
8184
model: str = "nomic-ai/nomic-embed-text-v1.5"
8285

libs/partners/fireworks/langchain_fireworks/llms.py

Lines changed: 32 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from typing import Any, Optional
55

66
import requests
7-
from aiohttp import ClientSession
7+
from aiohttp import ClientSession, ClientTimeout
88
from langchain_core.callbacks import (
99
AsyncCallbackManagerForLLMRun,
1010
CallbackManagerForLLMRun,
@@ -22,15 +22,17 @@
2222
class Fireworks(LLM):
2323
"""LLM models from `Fireworks`.
2424
25-
To use, you'll need an API key which you can find here:
26-
https://fireworks.ai This can be passed in as init param
27-
``fireworks_api_key`` or set as environment variable ``FIREWORKS_API_KEY``.
25+
To use, you'll need an `API key <https://fireworks.ai>`__. This can be passed in as
26+
init param ``fireworks_api_key`` or set as environment variable
27+
``FIREWORKS_API_KEY``.
2828
29-
Fireworks AI API reference: https://readme.fireworks.ai/
29+
`Fireworks AI API reference <https://readme.fireworks.ai/>`__
3030
3131
Example:
32+
3233
.. code-block:: python
3334
response = fireworks.generate(["Tell me a joke."])
35+
3436
"""
3537

3638
base_url: str = "https://api.fireworks.ai/inference/v1/completions"
@@ -48,40 +50,40 @@ class Fireworks(LLM):
4850
)
4951
"""Fireworks API key.
5052
51-
Automatically read from env variable `FIREWORKS_API_KEY` if not provided.
53+
Automatically read from env variable ``FIREWORKS_API_KEY`` if not provided.
5254
"""
5355
model: str
54-
"""Model name. Available models listed here:
55-
https://readme.fireworks.ai/
56-
"""
56+
"""Model name. `(Available models) <https://readme.fireworks.ai/>`__"""
5757
temperature: Optional[float] = None
5858
"""Model temperature."""
5959
top_p: Optional[float] = None
60-
"""Used to dynamically adjust the number of choices for each predicted token based
61-
on the cumulative probabilities. A value of 1 will always yield the same
62-
output. A temperature less than 1 favors more correctness and is appropriate
63-
for question answering or summarization. A value greater than 1 introduces more
64-
randomness in the output.
60+
"""Used to dynamically adjust the number of choices for each predicted token based
61+
on the cumulative probabilities. A value of ``1`` will always yield the same output.
62+
A temperature less than ``1`` favors more correctness and is appropriate for
63+
question answering or summarization. A value greater than ``1`` introduces more
64+
randomness in the output.
6565
"""
6666
model_kwargs: dict[str, Any] = Field(default_factory=dict)
67-
"""Holds any model parameters valid for `create` call not explicitly specified."""
67+
"""Holds any model parameters valid for ``create`` call not explicitly specified."""
6868
top_k: Optional[int] = None
6969
"""Used to limit the number of choices for the next predicted word or token. It
70-
specifies the maximum number of tokens to consider at each step, based on their
71-
probability of occurrence. This technique helps to speed up the generation
72-
process and can improve the quality of the generated text by focusing on the
73-
most likely options.
70+
specifies the maximum number of tokens to consider at each step, based on their
71+
probability of occurrence. This technique helps to speed up the generation process
72+
and can improve the quality of the generated text by focusing on the most likely
73+
options.
7474
"""
7575
max_tokens: Optional[int] = None
7676
"""The maximum number of tokens to generate."""
7777
repetition_penalty: Optional[float] = None
78-
"""A number that controls the diversity of generated text by reducing the
79-
likelihood of repeated sequences. Higher values decrease repetition.
78+
"""A number that controls the diversity of generated text by reducing the likelihood
79+
of repeated sequences. Higher values decrease repetition.
8080
"""
8181
logprobs: Optional[int] = None
8282
"""An integer that specifies how many top token log probabilities are included in
83-
the response for each token generation step.
83+
the response for each token generation step.
8484
"""
85+
timeout: Optional[int] = 30
86+
"""Timeout in seconds for requests to the Fireworks API."""
8587

8688
model_config = ConfigDict(
8789
extra="forbid",
@@ -132,7 +134,7 @@ def _call(
132134
prompt: The prompt to pass into the model.
133135
134136
Returns:
135-
The string generated by the model..
137+
The string generated by the model.
136138
"""
137139
headers = {
138140
"Authorization": f"Bearer {self.fireworks_api_key.get_secret_value()}",
@@ -148,7 +150,9 @@ def _call(
148150

149151
# filter None values to not pass them to the http payload
150152
payload = {k: v for k, v in payload.items() if v is not None}
151-
response = requests.post(url=self.base_url, json=payload, headers=headers)
153+
response = requests.post(
154+
url=self.base_url, json=payload, headers=headers, timeout=self.timeout
155+
)
152156

153157
if response.status_code >= 500:
154158
raise Exception(f"Fireworks Server: Error {response.status_code}")
@@ -196,7 +200,10 @@ async def _acall(
196200
payload = {k: v for k, v in payload.items() if v is not None}
197201
async with ClientSession() as session:
198202
async with session.post(
199-
self.base_url, json=payload, headers=headers
203+
self.base_url,
204+
json=payload,
205+
headers=headers,
206+
timeout=ClientTimeout(total=self.timeout),
200207
) as response:
201208
if response.status >= 500:
202209
raise Exception(f"Fireworks Server: Error {response.status}")

libs/partners/fireworks/pyproject.toml

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ disallow_untyped_defs = "True"
5252
target-version = "py39"
5353

5454
[tool.ruff.lint]
55-
select = ["E", "F", "I", "T201", "UP"]
55+
select = ["E", "F", "I", "T201", "UP", "S"]
5656
ignore = [ "UP007", ]
5757

5858
[tool.coverage.run]
@@ -65,3 +65,9 @@ markers = [
6565
"compile: mark placeholder test used to compile integration tests without running them",
6666
]
6767
asyncio_mode = "auto"
68+
69+
[tool.ruff.lint.extend-per-file-ignores]
70+
"tests/**/*.py" = [
71+
"S101", # Tests need assertions
72+
"S311", # Standard pseudo-random generators are not suitable for cryptographic purposes
73+
]

0 commit comments

Comments
 (0)