We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent afc6aba commit 8a32f3dCopy full SHA for 8a32f3d
python-llamaindex/demo_ollama.py
@@ -5,7 +5,11 @@
5
reader = SimpleDirectoryReader(input_files=["./data/pep8.rst"])
6
documents = reader.load_data()
7
8
-embed_model = OllamaEmbedding(model_name="embeddinggemma")
+embed_model = OllamaEmbedding(
9
+ model_name="embeddinggemma",
10
+ request_timeout=60.0, # For low-performance hardware
11
+ context_window=8000, # For reducing memory usage
12
+)
13
index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
14
15
llm = Ollama(model="llama3.2")
0 commit comments