Skip to content

Commit d0969fe

Browse files
committed
removed unused imports and comments + removed dead code
1 parent ba4e863 commit d0969fe

File tree

15 files changed

+51
-49
lines changed

15 files changed

+51
-49
lines changed

scrapegraphai/models/deepseek.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
"""
44
from langchain_openai import ChatOpenAI
55

6-
76
class DeepSeek(ChatOpenAI):
87
"""
98
A wrapper for the ChatOpenAI class (DeepSeek uses an OpenAI-like API) that

scrapegraphai/models/oneapi.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
"""
44
from langchain_openai import ChatOpenAI
55

6-
76
class OneApi(ChatOpenAI):
87
"""
98
A wrapper for the OneApi class that provides default configuration

scrapegraphai/models/openai_itt.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
11
"""
22
OpenAIImageToText Module
33
"""
4-
54
from langchain_openai import ChatOpenAI
65
from langchain_core.messages import HumanMessage
76

8-
97
class OpenAIImageToText(ChatOpenAI):
108
"""
119
A wrapper for the OpenAIImageToText class that provides default configuration
@@ -43,6 +41,5 @@ def run(self, image_url: str) -> str:
4341
]
4442
)
4543

46-
# Use the invoke method from the superclass (ChatOpenAI)
4744
result = self.invoke([message]).content
4845
return result

scrapegraphai/models/openai_tts.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,8 @@
11
"""
22
OpenAITextToSpeech Module
33
"""
4-
54
from openai import OpenAI
65

7-
86
class OpenAITextToSpeech:
97
"""
108
Implements a text-to-speech model using the OpenAI API.
@@ -20,7 +18,6 @@ class OpenAITextToSpeech:
2018

2119
def __init__(self, tts_config: dict):
2220

23-
# convert model_name to model
2421
self.client = OpenAI(api_key=tts_config.get("api_key"),
2522
base_url=tts_config.get("base_url", None))
2623
self.model = tts_config.get("model", "tts-1")

scrapegraphai/utils/copy.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ class DeepCopyError(Exception):
1212

1313
pass
1414

15-
1615
def is_boto3_client(obj):
1716
"""
1817
Function for understanding if the script is using boto3 or not
@@ -30,7 +29,6 @@ def is_boto3_client(obj):
3029
return False
3130
return False
3231

33-
3432
def safe_deepcopy(obj: Any) -> Any:
3533
"""
3634
Attempts to create a deep copy of the object using `copy.deepcopy`

scrapegraphai/utils/custom_callback.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,12 @@
88
import threading
99
from typing import Any, Dict, List, Optional
1010
from contextvars import ContextVar
11-
1211
from langchain_core.callbacks import BaseCallbackHandler
1312
from langchain_core.messages import AIMessage
1413
from langchain_core.outputs import ChatGeneration, LLMResult
1514
from langchain_core.tracers.context import register_configure_hook
16-
1715
from .model_costs import MODEL_COST_PER_1K_TOKENS_INPUT, MODEL_COST_PER_1K_TOKENS_OUTPUT
1816

19-
2017
def get_token_cost_for_model(
2118
model_name: str, num_tokens: int, is_completion: bool = False
2219
) -> float:
@@ -36,7 +33,6 @@ def get_token_cost_for_model(
3633
return 0.0
3734
if is_completion:
3835
return MODEL_COST_PER_1K_TOKENS_OUTPUT[model_name] * (num_tokens / 1000)
39-
4036
return MODEL_COST_PER_1K_TOKENS_INPUT[model_name] * (num_tokens / 1000)
4137

4238

@@ -154,4 +150,4 @@ def get_custom_callback(llm_model_name: str):
154150
cb = CustomCallbackHandler(llm_model_name)
155151
custom_callback.set(cb)
156152
yield cb
157-
custom_callback.set(None)
153+
custom_callback.set(None)

scrapegraphai/utils/llm_callback_manager.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,16 @@
33
"""
44
import threading
55
from contextlib import contextmanager
6-
from .custom_callback import get_custom_callback
7-
86
from langchain_community.callbacks import get_openai_callback
97
from langchain_community.callbacks.manager import get_bedrock_anthropic_callback
108
from langchain_openai import ChatOpenAI, AzureChatOpenAI
119
from langchain_aws import ChatBedrock
10+
from .custom_callback import get_custom_callback
1211

1312
class CustomLLMCallbackManager:
13+
"""
14+
custom LLLM calback class
15+
"""
1416
_lock = threading.Lock()
1517

1618
@contextmanager
@@ -22,7 +24,8 @@ def exclusive_get_callback(self, llm_model, llm_model_name):
2224
yield cb
2325
finally:
2426
CustomLLMCallbackManager._lock.release()
25-
elif isinstance(llm_model, ChatBedrock) and llm_model_name is not None and "claude" in llm_model_name:
27+
elif isinstance(llm_model, ChatBedrock) and \
28+
llm_model_name is not None and "claude" in llm_model_name:
2629
try:
2730
with get_bedrock_anthropic_callback() as cb:
2831
yield cb
@@ -35,4 +38,4 @@ def exclusive_get_callback(self, llm_model, llm_model_name):
3538
finally:
3639
CustomLLMCallbackManager._lock.release()
3740
else:
38-
yield None
41+
yield None

scrapegraphai/utils/logging.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
11
"""
22
A centralized logging system for any library.
3-
43
This module provides functions to manage logging for a library. It includes
54
functions to get and set the verbosity level, add and remove handlers, and
65
control propagation. It also includes a function to set formatting for all
76
handlers bound to the root logger.
8-
97
Source code inspired by: https://gist.github.com/DiTo97/9a0377f24236b66134eb96da1ec1693f
108
"""
119

scrapegraphai/utils/model_costs.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,10 @@
22
This file contains the cost of models per 1k tokens for input and output.
33
The file is on a best effort basis and may not be up to date. Any contributions are welcome.
44
"""
5+
6+
"""
7+
Cost for 1k tokens in input
8+
"""
59
MODEL_COST_PER_1K_TOKENS_INPUT = {
610
### MistralAI
711
# General Purpose
@@ -53,8 +57,10 @@
5357
"amazon.titan-text-premier-v1:0": 0.0005,
5458
}
5559

60+
"""
61+
Cost for 1k tokens in output
62+
"""
5663
MODEL_COST_PER_1K_TOKENS_OUTPUT = {
57-
### MistralAI
5864
# General Purpose
5965
"open-mistral-nemo": 0.00015,
6066
"open-mistral-nemo-2407": 0.00015,
@@ -102,4 +108,4 @@
102108
"amazon.titan-text-express-v1": 0.0006,
103109
"amazon.titan-text-lite-v1": 0.0002,
104110
"amazon.titan-text-premier-v1:0": 0.0015,
105-
}
111+
}

scrapegraphai/utils/output_parser.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
11
"""
22
Functions to retrieve the correct output parser and format instructions for the LLM model.
33
"""
4+
from typing import Union, Dict, Any, Type, Callable
45
from pydantic import BaseModel as BaseModelV2
56
from pydantic.v1 import BaseModel as BaseModelV1
6-
from typing import Union, Dict, Any, Type, Callable
77
from langchain_core.output_parsers import JsonOutputParser
88

9-
def get_structured_output_parser(schema: Union[Dict[str, Any], Type[BaseModelV1 | BaseModelV2], Type]) -> Callable:
9+
def get_structured_output_parser(schema: Union[Dict[str, Any],
10+
Type[BaseModelV1 | BaseModelV2], Type]) -> Callable:
1011
"""
1112
Get the correct output parser for the LLM model.
1213
@@ -15,7 +16,7 @@ def get_structured_output_parser(schema: Union[Dict[str, Any], Type[BaseModelV1
1516
"""
1617
if issubclass(schema, BaseModelV1):
1718
return _base_model_v1_output_parser
18-
19+
1920
if issubclass(schema, BaseModelV2):
2021
return _base_model_v2_output_parser
2122

@@ -29,12 +30,14 @@ def get_pydantic_output_parser(schema: Union[Dict[str, Any], Type[BaseModelV1 |
2930
JsonOutputParser: The output parser object.
3031
"""
3132
if issubclass(schema, BaseModelV1):
32-
raise ValueError("pydantic.v1 and langchain_core.pydantic_v1 are not supported with this LLM model. Please use pydantic v2 instead.")
33-
33+
raise ValueError("""pydantic.v1 and langchain_core.pydantic_v1
34+
are not supported with this LLM model. Please use pydantic v2 instead.""")
35+
3436
if issubclass(schema, BaseModelV2):
3537
return JsonOutputParser(pydantic_object=schema)
3638

37-
raise ValueError("The schema is not a pydantic subclass. With this LLM model you must use a pydantic schemas.")
39+
raise ValueError("""The schema is not a pydantic subclass.
40+
With this LLM model you must use a pydantic schemas.""")
3841

3942
def _base_model_v1_output_parser(x: BaseModelV1) -> dict:
4043
"""
@@ -47,16 +50,15 @@ def _base_model_v1_output_parser(x: BaseModelV1) -> dict:
4750
dict: The parsed output.
4851
"""
4952
work_dict = x.dict()
50-
51-
# recursive dict parser
53+
5254
def recursive_dict_parser(work_dict: dict) -> dict:
5355
dict_keys = work_dict.keys()
5456
for key in dict_keys:
5557
if isinstance(work_dict[key], BaseModelV1):
5658
work_dict[key] = work_dict[key].dict()
5759
recursive_dict_parser(work_dict[key])
5860
return work_dict
59-
61+
6062
return recursive_dict_parser(work_dict)
6163

6264

0 commit comments

Comments
 (0)