Skip to content

Commit ca9cba9

Browse files
committed
TR updates, first round
1 parent 7195202 commit ca9cba9

File tree

6 files changed

+64
-22
lines changed

6 files changed

+64
-22
lines changed

python-llamaindex/async_query.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import asyncio
2-
import logging
32
from pathlib import Path
43

54
from llama_index.core import (
@@ -9,24 +8,24 @@
98
load_index_from_storage,
109
)
1110

12-
# Disable logging messages
13-
logging.getLogger("llama_index").setLevel(logging.WARNING)
14-
logging.getLogger("httpx").setLevel(logging.WARNING)
15-
1611
# Define the storage directory
17-
PERSIST_DIR = "./storage"
12+
BASE_DIR = Path(__file__).resolve().parent
13+
PERSIST_DIR = BASE_DIR / "storage"
14+
DATA_FILE = BASE_DIR / "data" / "pep8.rst"
1815

1916

20-
def get_index(persist_dir=PERSIST_DIR):
21-
if Path(persist_dir).exists():
22-
storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
17+
def get_index(persist_dir=PERSIST_DIR, data_file=DATA_FILE):
18+
if persist_dir.exists():
19+
storage_context = StorageContext.from_defaults(
20+
persist_dir=str(persist_dir),
21+
)
2322
index = load_index_from_storage(storage_context)
2423
print("Index loaded from storage...")
2524
else:
26-
reader = SimpleDirectoryReader(input_files=["./data/pep8.rst"])
25+
reader = SimpleDirectoryReader(input_files=[str(data_file)])
2726
documents = reader.load_data()
2827
index = VectorStoreIndex.from_documents(documents)
29-
index.storage_context.persist(persist_dir=persist_dir)
28+
index.storage_context.persist(persist_dir=str(persist_dir))
3029
print("Index created and persisted to storage...")
3130

3231
return index

python-llamaindex/demo.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22

33
reader = SimpleDirectoryReader(input_files=["./data/pep8.rst"])
44
documents = reader.load_data()
5+
56
index = VectorStoreIndex.from_documents(documents)
7+
68
query_engine = index.as_query_engine()
79
print(query_engine.query("What is this document about?"))

python-llamaindex/demo_google.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
2+
from llama_index.embeddings.google_genai import GoogleGenAIEmbedding
3+
from llama_index.llms.google_genai import GoogleGenAI
4+
5+
reader = SimpleDirectoryReader(input_files=["./data/pep8.rst"])
6+
documents = reader.load_data()
7+
8+
embed_model = GoogleGenAIEmbedding(model="models/embedding-gecko-001")
9+
index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
10+
11+
llm = GoogleGenAI(model="gemini-2.5-flash")
12+
query_engine = index.as_query_engine(llm=llm)
13+
14+
response = query_engine.query("What is this document about?")
15+
print(response)

python-llamaindex/demo_gpt5.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
2+
from llama_index.llms.openai import OpenAI
3+
4+
reader = SimpleDirectoryReader(input_files=["./data/pep8.rst"])
5+
documents = reader.load_data()
6+
index = VectorStoreIndex.from_documents(documents)
7+
8+
llm = OpenAI(model="gpt-5.1")
9+
query_engine = index.as_query_engine(llm=llm)
10+
11+
response = query_engine.query("Summarize the import rules.")
12+
print(response)

python-llamaindex/demo_ollama.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
2+
from llama_index.embeddings.ollama import OllamaEmbedding
3+
from llama_index.llms.ollama import Ollama
4+
5+
reader = SimpleDirectoryReader(input_files=["./data/pep8.rst"])
6+
documents = reader.load_data()
7+
8+
embed_model = OllamaEmbedding(model_name="embeddinggemma")
9+
index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
10+
11+
llm = Ollama(model="llama3.2")
12+
query_engine = index.as_query_engine(llm=llm)
13+
14+
response = query_engine.query("What is this document about?")
15+
print(response)
Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import logging
21
from pathlib import Path
32

43
from llama_index.core import (
@@ -8,24 +7,24 @@
87
load_index_from_storage,
98
)
109

11-
# Disable logging messages
12-
logging.getLogger("llama_index").setLevel(logging.WARNING)
13-
logging.getLogger("httpx").setLevel(logging.WARNING)
14-
1510
# Define the storage directory
16-
PERSIST_DIR = "./storage"
11+
BASE_DIR = Path(__file__).resolve().parent
12+
PERSIST_DIR = BASE_DIR / "storage"
13+
DATA_FILE = BASE_DIR / "data" / "pep8.rst"
1714

1815

19-
def get_index(persist_dir=PERSIST_DIR):
20-
if Path(persist_dir).exists():
21-
storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
16+
def get_index(persist_dir=PERSIST_DIR, data_file=DATA_FILE):
17+
if persist_dir.exists():
18+
storage_context = StorageContext.from_defaults(
19+
persist_dir=str(persist_dir),
20+
)
2221
index = load_index_from_storage(storage_context)
2322
print("Index loaded from storage...")
2423
else:
25-
reader = SimpleDirectoryReader(input_files=["./data/pep8.rst"])
24+
reader = SimpleDirectoryReader(input_files=[str(data_file)])
2625
documents = reader.load_data()
2726
index = VectorStoreIndex.from_documents(documents)
28-
index.storage_context.persist(persist_dir=persist_dir)
27+
index.storage_context.persist(persist_dir=str(persist_dir))
2928
print("Index created and persisted to storage...")
3029

3130
return index

0 commit comments

Comments
 (0)