Skip to content

Commit 9fb3b9b

Browse files
committed
Add hybrid platform info: global default with per-annotation overrides
Backend: - Add optional platform field to VLMResult for per-annotation overrides - Add platform_override param to VLMService for cloud APIs/custom setups - Platform is null by default (uses metadata default like RTX 4090) Frontend: - Add platform field to Annotation type for overrides - Update AnnotationViewer to accept and display effective platform - Show platform badge in annotation details when available - Compute effectivePlatform: annotation override || global default This enables future support for mixed-platform annotations (e.g., some local GPU, some cloud inference) while keeping current behavior unchanged. Ref #9
1 parent 129d233 commit 9fb3b9b

File tree

4 files changed

+41
-9
lines changed

4 files changed

+41
-9
lines changed

frontend/app/components/AnnotationViewer.tsx

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,16 @@
11
'use client'
22

33
import { useState } from 'react'
4-
import { PromptAnnotation } from '../types'
4+
import { PromptAnnotation, PlatformInfo } from '../types'
55
import { Copy, Check, FileText, FileJson, Activity, Zap } from 'lucide-react'
6+
import PlatformBadge from './PlatformBadge'
67

78
interface AnnotationViewerProps {
89
annotation: PromptAnnotation
10+
platform?: PlatformInfo | null
911
}
1012

11-
export default function AnnotationViewer({ annotation }: AnnotationViewerProps) {
13+
export default function AnnotationViewer({ annotation, platform }: AnnotationViewerProps) {
1214
const [copied, setCopied] = useState(false)
1315
const [viewMode, setViewMode] = useState<'text' | 'json'>('text')
1416

@@ -190,6 +192,14 @@ export default function AnnotationViewer({ annotation }: AnnotationViewerProps)
190192
)}
191193
</div>
192194
)}
195+
196+
{/* Platform info (shown inline with metrics) */}
197+
{platform && (
198+
<div className="flex items-center gap-2 mt-3">
199+
<span className="text-xs text-agi-teal-500 dark:text-zinc-500">Platform:</span>
200+
<PlatformBadge platform={platform} />
201+
</div>
202+
)}
193203
</div>
194204
)
195205
}

frontend/app/page.tsx

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -89,13 +89,23 @@ export default function Dashboard() {
8989
return []
9090
}, [annotations, selectedImageIndex, images, selectedModel])
9191

92+
// Get current model annotation (includes platform override if present)
93+
const currentModelAnnotation = useMemo(() => {
94+
if (!images[selectedImageIndex] || !selectedModel) return null
95+
const imageAnnotations = annotations[images[selectedImageIndex].id] || []
96+
return imageAnnotations.find(a => a.model === selectedModel) || null
97+
}, [annotations, selectedImageIndex, images, selectedModel])
98+
9299
// Get current prompt annotation
93100
const currentPromptAnnotation = useMemo(() => {
94-
if (!images[selectedImageIndex] || !selectedModel || !selectedPromptKey) return null
95-
const imageAnnotations = annotations[images[selectedImageIndex].id] || []
96-
const modelAnnotation = imageAnnotations.find(a => a.model === selectedModel)
97-
return modelAnnotation?.prompts[selectedPromptKey] || null
98-
}, [annotations, selectedImageIndex, images, selectedModel, selectedPromptKey])
101+
if (!currentModelAnnotation || !selectedPromptKey) return null
102+
return currentModelAnnotation.prompts[selectedPromptKey] || null
103+
}, [currentModelAnnotation, selectedPromptKey])
104+
105+
// Get effective platform (annotation override or default)
106+
const effectivePlatform = useMemo(() => {
107+
return currentModelAnnotation?.platform || platformInfo
108+
}, [currentModelAnnotation, platformInfo])
99109

100110
async function loadAnnotationsForImage(imageId: string) {
101111
setImageLoading(true)
@@ -307,7 +317,7 @@ export default function Dashboard() {
307317
</h3>
308318
<div className="flex-1 overflow-auto">
309319
{currentPromptAnnotation ? (
310-
<AnnotationViewer annotation={currentPromptAnnotation} />
320+
<AnnotationViewer annotation={currentPromptAnnotation} platform={effectivePlatform} />
311321
) : (
312322
<div className="text-agi-teal-500 dark:text-zinc-500 text-center py-8">
313323
{!selectedModel ? 'Select a vision model to explore annotations' :

frontend/app/types.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ export interface Annotation {
2929
model: string
3030
temperature?: number
3131
prompts: Record<string, PromptAnnotation>
32+
platform?: PlatformInfo | null // Override for this annotation (e.g., cloud API)
3233
}
3334

3435
export interface GPUInfo {

src/image_annotation/services/vlm_service.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from langchain_ollama import ChatOllama
1111
from pydantic import BaseModel, Field
1212

13-
from image_annotation.utils.platform_info import get_platform_info
13+
from image_annotation.utils.platform_info import PlatformInfo, get_platform_info
1414

1515

1616
class VLMPrompt(BaseModel):
@@ -67,6 +67,11 @@ class VLMResult(BaseModel):
6767

6868
# Additional metadata
6969
temperature: float | None = None
70+
platform: dict | None = Field(
71+
None,
72+
description="Platform info override for this annotation (e.g., cloud API). "
73+
"If None, uses the default platform from metadata.",
74+
)
7075

7176

7277
class VLMService:
@@ -78,6 +83,7 @@ def __init__(
7883
base_url: str = "http://localhost:11434",
7984
temperature: float = 0.3,
8085
timeout: int = 60,
86+
platform_override: dict | None = None,
8187
):
8288
"""Initialize the VLM service.
8389
@@ -86,11 +92,14 @@ def __init__(
8692
base_url: OLLAMA API base URL.
8793
temperature: Generation temperature.
8894
timeout: Request timeout in seconds.
95+
platform_override: Optional platform info to use instead of auto-detection.
96+
Useful for cloud APIs (OpenAI, Anthropic) or custom setups.
8997
"""
9098
self.model = model
9199
self.base_url = base_url
92100
self.temperature = temperature
93101
self.timeout = timeout
102+
self.platform_override = platform_override
94103
self._llm_cache = {}
95104

96105
def _get_llm(self, model: str | None = None) -> ChatOllama:
@@ -289,6 +298,7 @@ def annotate_image(
289298
performance_metrics=performance_metrics,
290299
temperature=self.temperature,
291300
error=error,
301+
platform=self.platform_override, # Include if set (for cloud APIs, etc.)
292302
)
293303

294304
except Exception as e:
@@ -305,6 +315,7 @@ def annotate_image(
305315
performance_metrics=PerformanceMetrics(total_duration_ms=total_time_ms),
306316
temperature=self.temperature,
307317
error=str(e),
318+
platform=self.platform_override,
308319
)
309320

310321
def annotate_batch(

0 commit comments

Comments
 (0)