Skip to content

Commit de1ec25

Browse files
refactoring pyproject.toml
Co-Authored-By: Matteo Vedovati <[email protected]>
1 parent cec5537 commit de1ec25

File tree

4 files changed

+7
-14
lines changed

4 files changed

+7
-14
lines changed

examples/local_models/script_generator_ollama.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,16 +9,11 @@
99

1010
graph_config = {
1111
"llm": {
12-
"model": "ollama/mistral",
13-
"temperature": 0,
12+
"model": "ollama/llama3.1",
13+
"temperature": 0.5,
1414
# "model_tokens": 2000, # set context length arbitrarily,
1515
"base_url": "http://localhost:11434", # set ollama URL arbitrarily
1616
},
17-
"embeddings": {
18-
"model": "ollama/nomic-embed-text",
19-
"temperature": 0,
20-
"base_url": "http://localhost:11434", # set ollama URL arbitrarily
21-
},
2217
"library": "beautifoulsoup",
2318
"verbose": True,
2419
}

pyproject.toml

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ dependencies = [
2323
"langchain-groq>=0.1.3",
2424
"langchain-aws>=0.1.3",
2525
"langchain-anthropic>=0.1.11",
26+
"langchain-mistralai>=0.1.12",
27+
"langchain-huggingface>=0.0.3",
2628
"langchain-nvidia-ai-endpoints>=0.1.6",
2729
"html2text>=2024.2.26",
2830
"faiss-cpu>=1.8.0",
@@ -38,11 +40,7 @@ dependencies = [
3840
"google>=3.0.0",
3941
"undetected-playwright>=0.3.0",
4042
"semchunk>=1.0.1",
41-
"langchain-fireworks>=0.1.3",
42-
"langchain-community>=0.2.9",
43-
"langchain-huggingface>=0.0.3",
4443
"browserbase>=0.3.0",
45-
"langchain-mistralai>=0.1.12",
4644
]
4745

4846
license = "MIT"

scrapegraphai/graphs/abstract_graph.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -170,12 +170,12 @@ def handle_model(model_name, provider, token_key, default_token=8192):
170170

171171
if llm_params["model"].startswith("vertexai"):
172172
return handle_model(llm_params["model"], "google_vertexai", llm_params["model"])
173-
173+
174174
if "ollama" in llm_params["model"]:
175175
model_name = llm_params["model"].split("ollama/")[-1]
176176
token_key = model_name if "model_tokens" not in llm_params else llm_params["model_tokens"]
177177
return handle_model(model_name, "ollama", token_key)
178-
178+
179179
if "hugging_face" in llm_params["model"]:
180180
model_name = llm_params["model"].split("/")[-1]
181181
return handle_model(model_name, "hugging_face", model_name)

scrapegraphai/nodes/parse_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ def execute(self, state: dict) -> dict:
9090
chunk_size=self.node_config.get("chunk_size", 4096)-250,
9191
token_counter=lambda text: len(text.split()),
9292
memoize=False)
93-
93+
9494
state.update({self.output[0]: chunks})
9595

9696
return state

0 commit comments

Comments
 (0)