Skip to content

Commit 48d164e

Browse files
Merge pull request #10 from Annotation-Garden/claude/update-gpu-docs-01UeUn4bN1MiBzCjTQTdbKvt
Update GPU inference platform documentation
2 parents 11d4f47 + 9fb3b9b commit 48d164e

File tree

10 files changed

+447
-20
lines changed

10 files changed

+447
-20
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ yarn-error.log*
5858
out/
5959
.nuxt/
6060
.cache/
61+
tsconfig.tsbuildinfo
6162

6263
# Virtual environments
6364
venv/

README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,10 @@ Annotations follow stimuli-BIDS specifications:
8686
- **Storage**: JSON files with database support for large datasets
8787
- **Processing**: Stateless VLM calls with comprehensive metrics
8888

89+
### Inference Platform
90+
91+
All performance metrics in this repository were generated using an **NVIDIA GeForce RTX 4090 GPU** with OLLAMA for local model inference. See [Quality Control](docs/quality-control.md#inference-platform) for details.
92+
8993
## Annotation Tools
9094

9195
Powerful CLI tools for post-processing annotations:

docs/quality-control.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,14 @@ python scripts/reannotate_missing_prompts.py
108108
python scripts/check_annotation_quality.py annotations/nsd/
109109
```
110110

111+
## Inference Platform
112+
113+
All inference was performed on an **NVIDIA GeForce RTX 4090 GPU** using OLLAMA for local model execution. This platform information applies to all performance metrics documented in this repository.
114+
111115
## Case Study: Issue #4
112116

117+
**Inference Platform**: NVIDIA RTX 4090 GPU
118+
113119
**Analysis** of 30,000 responses (1,000 images × 6 models × 5 prompts):
114120
- 143 problematic responses (0.5% of total)
115121
- 139 files affected (13.9%)

frontend/app/components/AnnotationViewer.tsx

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,16 @@
11
'use client'
22

33
import { useState } from 'react'
4-
import { PromptAnnotation } from '../types'
4+
import { PromptAnnotation, PlatformInfo } from '../types'
55
import { Copy, Check, FileText, FileJson, Activity, Zap } from 'lucide-react'
6+
import PlatformBadge from './PlatformBadge'
67

78
interface AnnotationViewerProps {
89
annotation: PromptAnnotation
10+
platform?: PlatformInfo | null
911
}
1012

11-
export default function AnnotationViewer({ annotation }: AnnotationViewerProps) {
13+
export default function AnnotationViewer({ annotation, platform }: AnnotationViewerProps) {
1214
const [copied, setCopied] = useState(false)
1315
const [viewMode, setViewMode] = useState<'text' | 'json'>('text')
1416

@@ -190,6 +192,14 @@ export default function AnnotationViewer({ annotation }: AnnotationViewerProps)
190192
)}
191193
</div>
192194
)}
195+
196+
{/* Platform info (shown inline with metrics) */}
197+
{platform && (
198+
<div className="flex items-center gap-2 mt-3">
199+
<span className="text-xs text-agi-teal-500 dark:text-zinc-500">Platform:</span>
200+
<PlatformBadge platform={platform} />
201+
</div>
202+
)}
193203
</div>
194204
)
195205
}
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
'use client'
2+
3+
import { PlatformInfo } from '../types'
4+
import { Cpu, Monitor } from 'lucide-react'
5+
6+
interface PlatformBadgeProps {
7+
platform?: PlatformInfo | null
8+
}
9+
10+
// Map vendor to display color
11+
const vendorColors: Record<string, string> = {
12+
nvidia: 'text-green-600 dark:text-green-400',
13+
amd: 'text-red-600 dark:text-red-400',
14+
intel: 'text-blue-600 dark:text-blue-400',
15+
apple: 'text-gray-600 dark:text-gray-400',
16+
}
17+
18+
// Map compute backend to display name
19+
const backendNames: Record<string, string> = {
20+
cuda: 'CUDA',
21+
rocm: 'ROCm',
22+
mps: 'Metal',
23+
oneapi: 'oneAPI',
24+
cpu: 'CPU',
25+
}
26+
27+
export default function PlatformBadge({ platform }: PlatformBadgeProps) {
28+
if (!platform) {
29+
return null
30+
}
31+
32+
const hasGPU = platform.accelerators && platform.accelerators.length > 0
33+
const primaryGPU = hasGPU ? platform.accelerators[0] : null
34+
const backendDisplay = platform.compute_backend
35+
? backendNames[platform.compute_backend] || platform.compute_backend.toUpperCase()
36+
: null
37+
38+
return (
39+
<div className="flex items-center gap-2 px-3 py-1.5 bg-agi-teal/5 dark:bg-agi-teal/10 rounded-lg border border-agi-teal/10 dark:border-agi-teal/20">
40+
{hasGPU ? (
41+
<Monitor className="w-4 h-4 text-agi-orange" />
42+
) : (
43+
<Cpu className="w-4 h-4 text-agi-teal-500" />
44+
)}
45+
<div className="flex items-center gap-2 text-xs">
46+
{primaryGPU ? (
47+
<>
48+
<span className={`font-medium ${vendorColors[primaryGPU.vendor] || 'text-agi-teal-600'}`}>
49+
{primaryGPU.name}
50+
</span>
51+
{primaryGPU.memory_mb && (
52+
<span className="text-agi-teal-500 dark:text-zinc-500">
53+
({Math.round(primaryGPU.memory_mb / 1024)}GB)
54+
</span>
55+
)}
56+
</>
57+
) : (
58+
<span className="text-agi-teal-600 dark:text-zinc-400">CPU Only</span>
59+
)}
60+
{backendDisplay && (
61+
<span className="text-agi-teal-400 dark:text-zinc-500">
62+
{backendDisplay}
63+
</span>
64+
)}
65+
</div>
66+
</div>
67+
)
68+
}

frontend/app/page.tsx

Lines changed: 43 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,15 @@ import { useState, useEffect, useMemo } from 'react'
44
import Image from 'next/image'
55
import ThumbnailRibbon from './components/ThumbnailRibbon'
66
import AnnotationViewer from './components/AnnotationViewer'
7-
import { ImageData, Annotation, PromptAnnotation } from './types'
7+
import PlatformBadge from './components/PlatformBadge'
8+
import { ImageData, Annotation, PromptAnnotation, PlatformInfo } from './types'
89
import { Sparkles, ChevronDown, Loader2, ExternalLink, Sun, Moon } from 'lucide-react'
910

1011
export default function Dashboard() {
1112
const [images, setImages] = useState<ImageData[]>([])
1213
const [selectedImageIndex, setSelectedImageIndex] = useState(0)
1314
const [annotations, setAnnotations] = useState<Record<string, Annotation[]>>({})
15+
const [platformInfo, setPlatformInfo] = useState<PlatformInfo | null>(null)
1416
const [selectedModel, setSelectedModel] = useState<string>('')
1517
const [selectedPromptKey, setSelectedPromptKey] = useState<string>('')
1618
const [loading, setLoading] = useState(true)
@@ -87,13 +89,23 @@ export default function Dashboard() {
8789
return []
8890
}, [annotations, selectedImageIndex, images, selectedModel])
8991

92+
// Get current model annotation (includes platform override if present)
93+
const currentModelAnnotation = useMemo(() => {
94+
if (!images[selectedImageIndex] || !selectedModel) return null
95+
const imageAnnotations = annotations[images[selectedImageIndex].id] || []
96+
return imageAnnotations.find(a => a.model === selectedModel) || null
97+
}, [annotations, selectedImageIndex, images, selectedModel])
98+
9099
// Get current prompt annotation
91100
const currentPromptAnnotation = useMemo(() => {
92-
if (!images[selectedImageIndex] || !selectedModel || !selectedPromptKey) return null
93-
const imageAnnotations = annotations[images[selectedImageIndex].id] || []
94-
const modelAnnotation = imageAnnotations.find(a => a.model === selectedModel)
95-
return modelAnnotation?.prompts[selectedPromptKey] || null
96-
}, [annotations, selectedImageIndex, images, selectedModel, selectedPromptKey])
101+
if (!currentModelAnnotation || !selectedPromptKey) return null
102+
return currentModelAnnotation.prompts[selectedPromptKey] || null
103+
}, [currentModelAnnotation, selectedPromptKey])
104+
105+
// Get effective platform (annotation override or default)
106+
const effectivePlatform = useMemo(() => {
107+
return currentModelAnnotation?.platform || platformInfo
108+
}, [currentModelAnnotation, platformInfo])
97109

98110
async function loadAnnotationsForImage(imageId: string) {
99111
setImageLoading(true)
@@ -104,6 +116,11 @@ export default function Dashboard() {
104116
const data = await response.json()
105117
setAnnotations(prev => ({ ...prev, [imageId]: data.annotations || [] }))
106118

119+
// Extract platform info from metadata if available
120+
if (data.metadata?.platform) {
121+
setPlatformInfo(data.metadata.platform)
122+
}
123+
107124
// Auto-select first model and prompt if nothing selected
108125
if (data.annotations && data.annotations.length > 0) {
109126
const firstAnnotation = data.annotations[0]
@@ -300,7 +317,7 @@ export default function Dashboard() {
300317
</h3>
301318
<div className="flex-1 overflow-auto">
302319
{currentPromptAnnotation ? (
303-
<AnnotationViewer annotation={currentPromptAnnotation} />
320+
<AnnotationViewer annotation={currentPromptAnnotation} platform={effectivePlatform} />
304321
) : (
305322
<div className="text-agi-teal-500 dark:text-zinc-500 text-center py-8">
306323
{!selectedModel ? 'Select a vision model to explore annotations' :
@@ -324,17 +341,25 @@ export default function Dashboard() {
324341

325342
{/* Footer */}
326343
<footer className="glass-footer px-6 py-3">
327-
<div className="text-center text-sm text-agi-teal-600 dark:text-zinc-400">
328-
© 2025{' '}
329-
<a
330-
href="https://annotation.garden"
331-
target="_blank"
332-
rel="noopener noreferrer"
333-
className="text-agi-teal dark:text-agi-teal-400 hover:text-agi-orange transition-colors inline-flex items-center gap-1"
334-
>
335-
Annotation Garden Initiative
336-
<ExternalLink className="w-3 h-3" />
337-
</a>
344+
<div className="flex flex-col md:flex-row items-center justify-between gap-2">
345+
<div className="text-sm text-agi-teal-600 dark:text-zinc-400">
346+
© 2025{' '}
347+
<a
348+
href="https://annotation.garden"
349+
target="_blank"
350+
rel="noopener noreferrer"
351+
className="text-agi-teal dark:text-agi-teal-400 hover:text-agi-orange transition-colors inline-flex items-center gap-1"
352+
>
353+
Annotation Garden Initiative
354+
<ExternalLink className="w-3 h-3" />
355+
</a>
356+
</div>
357+
{platformInfo && (
358+
<div className="flex items-center gap-2">
359+
<span className="text-xs text-agi-teal-500 dark:text-zinc-500">Inference Platform:</span>
360+
<PlatformBadge platform={platformInfo} />
361+
</div>
362+
)}
338363
</div>
339364
</footer>
340365
</main>

frontend/app/types.ts

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,22 @@ export interface Annotation {
2929
model: string
3030
temperature?: number
3131
prompts: Record<string, PromptAnnotation>
32+
platform?: PlatformInfo | null // Override for this annotation (e.g., cloud API)
33+
}
34+
35+
export interface GPUInfo {
36+
name: string
37+
vendor: string // nvidia, amd, intel, apple
38+
memory_mb?: number | null
39+
driver_version?: string | null
40+
}
41+
42+
export interface PlatformInfo {
43+
os_name: string
44+
os_version: string
45+
python_version: string
46+
accelerators: GPUInfo[]
47+
compute_backend?: string | null // cuda, rocm, mps, oneapi, cpu
3248
}
3349

3450
export interface AnnotationFile {
@@ -37,6 +53,7 @@ export interface AnnotationFile {
3753
annotations: Annotation[]
3854
metadata?: {
3955
processed_at?: string
56+
platform?: PlatformInfo
4057
[key: string]: any
4158
}
4259
}

src/image_annotation/services/vlm_service.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
from langchain_ollama import ChatOllama
1111
from pydantic import BaseModel, Field
1212

13+
from image_annotation.utils.platform_info import PlatformInfo, get_platform_info
14+
1315

1416
class VLMPrompt(BaseModel):
1517
"""Configuration for a VLM prompt."""
@@ -65,6 +67,11 @@ class VLMResult(BaseModel):
6567

6668
# Additional metadata
6769
temperature: float | None = None
70+
platform: dict | None = Field(
71+
None,
72+
description="Platform info override for this annotation (e.g., cloud API). "
73+
"If None, uses the default platform from metadata.",
74+
)
6875

6976

7077
class VLMService:
@@ -76,6 +83,7 @@ def __init__(
7683
base_url: str = "http://localhost:11434",
7784
temperature: float = 0.3,
7885
timeout: int = 60,
86+
platform_override: dict | None = None,
7987
):
8088
"""Initialize the VLM service.
8189
@@ -84,11 +92,14 @@ def __init__(
8492
base_url: OLLAMA API base URL.
8593
temperature: Generation temperature.
8694
timeout: Request timeout in seconds.
95+
platform_override: Optional platform info to use instead of auto-detection.
96+
Useful for cloud APIs (OpenAI, Anthropic) or custom setups.
8797
"""
8898
self.model = model
8999
self.base_url = base_url
90100
self.temperature = temperature
91101
self.timeout = timeout
102+
self.platform_override = platform_override
92103
self._llm_cache = {}
93104

94105
def _get_llm(self, model: str | None = None) -> ChatOllama:
@@ -287,6 +298,7 @@ def annotate_image(
287298
performance_metrics=performance_metrics,
288299
temperature=self.temperature,
289300
error=error,
301+
platform=self.platform_override, # Include if set (for cloud APIs, etc.)
290302
)
291303

292304
except Exception as e:
@@ -303,6 +315,7 @@ def annotate_image(
303315
performance_metrics=PerformanceMetrics(total_duration_ms=total_time_ms),
304316
temperature=self.temperature,
305317
error=str(e),
318+
platform=self.platform_override,
306319
)
307320

308321
def annotate_batch(
@@ -400,6 +413,9 @@ def save_results(results: list[VLMResult], output_dir: str | Path) -> Path:
400413
if num_with_speed > 0:
401414
avg_speed = avg_speed / num_with_speed
402415

416+
# Get platform information
417+
platform_info = get_platform_info()
418+
403419
# Convert results to dict format
404420
results_dict = {
405421
"metadata": {
@@ -409,6 +425,7 @@ def save_results(results: list[VLMResult], output_dir: str | Path) -> Path:
409425
"failed": sum(1 for r in results if r.error is not None),
410426
"total_tokens_used": total_tokens,
411427
"average_tokens_per_second": round(avg_speed, 2) if num_with_speed > 0 else None,
428+
"platform": platform_info.to_dict(),
412429
},
413430
"annotations": [r.model_dump(mode="json") for r in results],
414431
}

src/image_annotation/utils/__init__.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,21 @@
77
remove_model,
88
reorder_annotations,
99
)
10+
from .platform_info import (
11+
GPUInfo,
12+
PlatformInfo,
13+
get_platform_info,
14+
get_platform_summary,
15+
)
1016

1117
__all__ = [
1218
"reorder_annotations",
1319
"remove_model",
1420
"get_annotation_stats",
1521
"filter_annotations_by_tokens",
1622
"export_to_csv",
23+
"GPUInfo",
24+
"PlatformInfo",
25+
"get_platform_info",
26+
"get_platform_summary",
1727
]

0 commit comments

Comments
 (0)