Skip to content

Commit 3ed698c

Browse files
committed
feat: update Ollama model handling to allow dynamic model specification
1 parent af7f303 commit 3ed698c

File tree

1 file changed

+8
-19
lines changed

1 file changed

+8
-19
lines changed

examples/image_search/main.py

Lines changed: 8 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,9 @@
1515
from qdrant_client import QdrantClient
1616
from transformers import CLIPModel, CLIPProcessor
1717

18+
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://localhost:11434/")
1819
QDRANT_URL = os.getenv("QDRANT_URL", "http://localhost:6334/")
1920
QDRANT_COLLECTION = "ImageSearch"
20-
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://localhost:11434/")
21-
OLLAMA_MODEL = "gemma3"
2221
CLIP_MODEL_NAME = "openai/clip-vit-large-patch14"
2322
CLIP_MODEL_DIMENSION = 768
2423

@@ -71,22 +70,14 @@ def image_object_embedding_flow(
7170
)
7271
img_embeddings = data_scope.add_collector()
7372
with data_scope["images"].row() as img:
74-
has_ollama_model = os.getenv("OLLAMA_MODEL") == OLLAMA_MODEL
75-
if has_ollama_model:
73+
ollama_model_name = os.getenv("OLLAMA_MODEL")
74+
if ollama_model_name is not None:
75+
# If an Ollama model is specified, generate an image caption
7676
img["caption"] = flow_builder.transform(
7777
cocoindex.functions.ExtractByLlm(
7878
llm_spec=cocoindex.llm.LlmSpec(
79-
api_type=cocoindex.LlmApiType.OLLAMA, model=OLLAMA_MODEL
79+
api_type=cocoindex.LlmApiType.OLLAMA, model=ollama_model_name
8080
),
81-
# Replace by this spec below, to use OpenAI API model instead of ollama
82-
# llm_spec=cocoindex.LlmSpec(
83-
# api_type=cocoindex.LlmApiType.OPENAI, model="gpt-4o"),
84-
# Replace by this spec below, to use Gemini API model
85-
# llm_spec=cocoindex.LlmSpec(
86-
# api_type=cocoindex.LlmApiType.GEMINI, model="gemini-2.0-flash"),
87-
# Replace by this spec below, to use Anthropic API model
88-
# llm_spec=cocoindex.LlmSpec(
89-
# api_type=cocoindex.LlmApiType.ANTHROPIC, model="claude-3-5-sonnet-latest"),
9081
instruction=(
9182
"Describe the image in one detailed sentence. "
9283
"Name all visible animal species, objects, and the main scene. "
@@ -105,13 +96,11 @@ def image_object_embedding_flow(
10596
"embedding": img["embedding"],
10697
}
10798

108-
if has_ollama_model:
109-
print(
110-
f"Ollama model '{OLLAMA_MODEL}' is available — captions will be extracted."
111-
)
99+
if ollama_model_name is not None:
100+
print(f"Using Ollama model '{ollama_model_name}' for captioning.")
112101
collect_fields["caption"] = img["caption"]
113102
else:
114-
print(f"Ollama model '{OLLAMA_MODEL}' not found — skipping captioning.")
103+
print(f"No Ollama model '{ollama_model_name}' found — skipping captioning.")
115104

116105
img_embeddings.collect(**collect_fields)
117106

0 commit comments

Comments
 (0)