Skip to content

Commit 3ff6dfa

Browse files
Refactor: Replace CLI wrapper with explicit OpenVINO API calls
1 parent af9c948 commit 3ff6dfa

File tree

2 files changed

+47
-178
lines changed

2 files changed

+47
-178
lines changed
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# Biological Texture Generation (Iris) using LCM & OpenVINO™
2+
3+
This notebook demonstrates how to generate high-fidelity biological textures, specifically human iris patterns, using **Latent Consistency Models (LCM)**.
4+
5+
## Key Features
6+
- **Model:** [SimianLuo/LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7)
7+
- **Library:** [Optimum Intel](https://github.com/huggingface/optimum-intel) with OpenVINO™ backend.
8+
- **Performance:** Generates high-quality images in just 4-8 steps (vs 25-50 steps for standard Stable Diffusion).
9+
- **Hardware:** Supports Intel CPU, GPU (iGPU/dGPU), and NPU.
10+
11+
## Usage
12+
1. Install dependencies.
13+
2. Select your target device (CPU/GPU) from the widget.
14+
3. Run the pipeline to generate synthetic iris data for biometric research.

notebooks/biological-texture-generation-lcm/biological-texture-generation-lcm.ipynb

Lines changed: 33 additions & 178 deletions
Original file line numberDiff line numberDiff line change
@@ -10,25 +10,9 @@
1010
"id": "cXoFhlAHytJW",
1111
"outputId": "aa7ff889-4d2e-4ee9-ae63-385037be013a"
1212
},
13-
"outputs": [
14-
{
15-
"name": "stdout",
16-
"output_type": "stream",
17-
"text": [
18-
"🗑️ Old images deleted. Ready for fresh generation.\n"
19-
]
20-
}
21-
],
13+
"outputs": [],
2214
"source": [
23-
"# Force delete old experiments to ensure next result is fresh\n",
24-
"import shutil\n",
25-
"import os\n",
26-
"\n",
27-
"if os.path.exists(\"/content/Ocular_Core_Lite/experiments\"):\n",
28-
" shutil.rmtree(\"/content/Ocular_Core_Lite/experiments\")\n",
29-
" print(\"🗑️ Old images deleted. Ready for fresh generation.\")\n",
30-
"\n",
31-
"<img referrerpolicy=\"no-referrer-when-downgrade\" src=\"https://static.scarf.sh/a.png?x-pxid=5b5a4db0-7875-4bfb-bdbd-01698b5b1a77&file=notebooks/biological-texture-generation-lcm/biological-texture-generation-lcm.ipynb\" />\n"
15+
"%pip install -q \"openvino>=2023.2.0\" \"git+https://github.com/huggingface/optimum-intel.git\" \"diffusers\" \"transformers\" \"gradio\" --extra-index-url https://download.pytorch.org/whl/cpu"
3216
]
3317
},
3418
{
@@ -41,142 +25,32 @@
4125
"id": "4hhdDBTlOHhv",
4226
"outputId": "850624c3-4a1e-42ea-b464-cb523b0927e9"
4327
},
44-
"outputs": [
45-
{
46-
"name": "stdout",
47-
"output_type": "stream",
48-
"text": [
49-
"⏳ Configuring environment... (This resolves Python 3.12 version conflicts)\n",
50-
"📦 Installing Ocular-Core-Lite library...\n"
51-
]
52-
}
53-
],
28+
"outputs": [],
5429
"source": [
55-
"# @title 1. Initialize Ocular-Core Environment\n",
56-
"# @markdown This cell sets up the environment by resolving dependency conflicts between Colab's default Python 3.12 and our AI libraries.\n",
57-
"# @markdown **Note:** A \"Session Crashed\" message is expected at the end. This is a deliberate restart to load new libraries.\n",
58-
"\n",
59-
"import os\n",
60-
"import sys\n",
61-
"import subprocess\n",
62-
"\n",
63-
"print(\"⏳ Configuring environment... (This resolves Python 3.12 version conflicts)\")\n",
64-
"\n",
65-
"# 1. CLEANUP: Force uninstall libraries that often conflict in pre-installed Colab environments.\n",
66-
"# We remove 'transformers', 'optimum', etc., to start with a clean slate.\n",
67-
"subprocess.check_call(\n",
68-
" [\n",
69-
" sys.executable,\n",
70-
" \"-m\",\n",
71-
" \"pip\",\n",
72-
" \"uninstall\",\n",
73-
" \"-y\",\n",
74-
" \"transformers\",\n",
75-
" \"optimum\",\n",
76-
" \"optimum-intel\",\n",
77-
" \"openvino\",\n",
78-
" \"peft\",\n",
79-
" \"huggingface-hub\",\n",
80-
" \"tokenizers\",\n",
81-
" \"diffusers\",\n",
82-
" ]\n",
83-
")\n",
30+
"import ipywidgets as widgets\n",
31+
"import openvino as ov\n",
32+
"from optimum.intel.openvino import OVLatentConsistencyModelPipeline\n",
33+
"import gc\n",
8434
"\n",
85-
"# 2. INSTALL STABLE STACK: Install a specific set of versions known to work together (\"The Goldilocks Stack\").\n",
86-
"# - huggingface-hub==0.23.5: Pinned to keep the 'LocalEntryNotFoundError' class that peft needs.\n",
87-
"# - openvino>=2024.1.0: Required for Python 3.12 compatibility.\n",
88-
"# - optimum-intel: The bridge between Transformers and OpenVINO optimization.\n",
89-
"subprocess.check_call(\n",
90-
" [\n",
91-
" sys.executable,\n",
92-
" \"-m\",\n",
93-
" \"pip\",\n",
94-
" \"install\",\n",
95-
" \"huggingface-hub==0.23.5\", # Fixes 'LocalEntryNotFoundError' error\n",
96-
" \"peft==0.11.1\", # Adapter support for LoRA/LCM\n",
97-
" \"transformers==4.41.2\", # Core model architecture\n",
98-
" \"optimum-intel[openvino]>=1.17.0\", # Intel optimization toolkit\n",
99-
" \"openvino>=2024.1.0\", # Inference engine (CPU optimized)\n",
100-
" \"diffusers>=0.27.0\", # Diffusion pipeline support\n",
101-
" ]\n",
102-
")\n",
103-
"\n",
104-
"# 3. INSTALL PROJECT: Install Ocular-Core-Lite directly from your GitHub source.\n",
105-
"print(\"📦 Installing Ocular-Core-Lite library...\")\n",
106-
"subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"git+https://github.com/humbeaniket2006-max/Ocular_Core_Lite.git\"])\n",
107-
"\n",
108-
"print(\"✅ Dependencies locked. Restarting runtime to apply changes...\")\n",
109-
"\n",
110-
"# 4. RESTART RUNTIME: Crucial step!\n",
111-
"# Python needs to reload the newly installed libraries. We force a crash/restart to ensure this happens.\n",
112-
"os.kill(os.getpid(), 9)"
35+
"# Initialize OpenVINO Core\n",
36+
"core = ov.Core()"
11337
]
11438
},
11539
{
11640
"cell_type": "code",
11741
"execution_count": null,
118-
"metadata": {
119-
"colab": {
120-
"base_uri": "https://localhost:8080/"
121-
},
122-
"id": "UslPhY2mRFCD",
123-
"outputId": "98b561d7-1460-47a7-8950-d31e9aa22130"
124-
},
125-
"outputs": [
126-
{
127-
"name": "stdout",
128-
"output_type": "stream",
129-
"text": [
130-
"/content/Ocular_Core_Lite\n",
131-
"👁️ Generating texture for: 'extreme macro photo, human eye, Blue iris, detailed texture, 8k, photorealistic'...\n",
132-
"/usr/local/lib/python3.12/dist-packages/openvino/runtime/__init__.py:10: DeprecationWarning: The `openvino.runtime` module is deprecated and will be removed in the 2026.0 release. Please replace `openvino.runtime` with `openvino`.\n",
133-
" warnings.warn(\n",
134-
"OpenVINO Tokenizer version is not compatible with OpenVINO version. Installed OpenVINO version: 2025.4.1,OpenVINO Tokenizers requires . OpenVINO Tokenizers models will not be added during export.\n",
135-
"2025-12-31 14:24:09.606657: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
136-
"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n",
137-
"E0000 00:00:1767191049.698336 36401 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
138-
"E0000 00:00:1767191049.734473 36401 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
139-
"W0000 00:00:1767191049.835850 36401 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
140-
"W0000 00:00:1767191049.835909 36401 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
141-
"W0000 00:00:1767191049.835917 36401 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
142-
"W0000 00:00:1767191049.835926 36401 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
143-
"/usr/local/lib/python3.12/dist-packages/transformers/utils/import_utils.py:519: FutureWarning: `is_torch_tpu_available` is deprecated and will be removed in 4.41.0. Please use the `is_torch_xla_available` instead.\n",
144-
" warnings.warn(\n",
145-
"/usr/local/lib/python3.12/dist-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
146-
" warnings.warn(\n",
147-
"Loading Neural Engine...\n",
148-
"Fetching 24 files: 100% 24/24 [00:00<00:00, 112724.86it/s]\n",
149-
"Compiling the vae_decoder to CPU ...\n",
150-
"Compiling the unet to CPU ...\n",
151-
"Compiling the vae_encoder to CPU ...\n",
152-
"Compiling the text_encoder to CPU ...\n",
153-
"Generating...\n",
154-
"100% 21/21 [12:33<00:00, 35.87s/it]\n",
155-
"Saved to ./output.png\n",
156-
"./output.png\n"
157-
]
158-
}
159-
],
42+
"metadata": {},
43+
"outputs": [],
16044
"source": [
161-
"# @title 2. Generate Biological Texture\n",
162-
"import os\n",
163-
"\n",
164-
"# 1. DIRECTORY CHECK: After the runtime restarts (from Cell 1), Colab resets to the root folder.\n",
165-
"# We must navigate back into the project folder so the generator knows where to save files.\n",
166-
"if os.path.exists(\"/content/Ocular_Core_Lite\"):\n",
167-
" %cd /content/Ocular_Core_Lite\n",
168-
"else:\n",
169-
" print(\"⚠️ Project folder not found. Did you run Cell 1?\")\n",
170-
"\n",
171-
"# 2. INPUT: Define the text prompt for the Latent Consistency Model (LCM).\n",
172-
"# We use '8k' and 'photorealistic' keywords to guide the generation quality.\n",
173-
"prompt = \"extreme macro photo, human eye, Blue iris, detailed texture, 8k, photorealistic\" # @param {type:\"string\"}\n",
174-
"\n",
175-
"print(f\"👁️ Generating texture for: '{prompt}'...\")\n",
45+
"# Create a dropdown widget to select the device (CPU, GPU, AUTO)\n",
46+
"device = widgets.Dropdown(\n",
47+
" options=core.available_devices + [\"AUTO\"],\n",
48+
" value='AUTO',\n",
49+
" description='Device:',\n",
50+
" disabled=False,\n",
51+
")\n",
17652
"\n",
177-
"# 3. EXECUTE: Run the command-line tool (CLI) installed by your package.\n",
178-
"# This triggers the 'ocular-generate' entry point defined in your pyproject.toml.\n",
179-
"!ocular-generate \"{prompt}\""
53+
"device"
18054
]
18155
},
18256
{
@@ -186,44 +60,25 @@
18660
"colab": {
18761
"base_uri": "https://localhost:8080/"
18862
},
189-
"id": "fXALqNSmby8W",
190-
"outputId": "8829ab81-272e-42d4-9cec-948186d41c49"
63+
"id": "UslPhY2mRFCD",
64+
"outputId": "98b561d7-1460-47a7-8950-d31e9aa22130"
19165
},
192-
"outputs": [
193-
{
194-
"name": "stdout",
195-
"output_type": "stream",
196-
"text": [
197-
"❌ No images found yet.\n",
198-
"Searched in: /content/Ocular_Core_Lite/experiments/*.png\n",
199-
"Try running Cell 2 again!\n"
200-
]
201-
}
202-
],
66+
"outputs": [],
20367
"source": [
204-
"# @title 3. View Result\n",
205-
"import glob\n",
206-
"import os\n",
207-
"from IPython.display import Image, display\n",
68+
"model_id = \"SimianLuo/LCM_Dreamshaper_v7\"\n",
20869
"\n",
209-
"# 1. SEARCH: Look for PNG images specifically inside the experiments folder.\n",
210-
"# We use a wildcard (*) to find any file ending in .png.\n",
211-
"search_path = \"/content/Ocular_Core_Lite/experiments/*.png\"\n",
212-
"list_of_files = glob.glob(search_path)\n",
70+
"print(f\"Loading {model_id} and converting to OpenVINO IR... (This may take a few minutes)\")\n",
21371
"\n",
214-
"if list_of_files:\n",
215-
" # 2. FILTER: If multiple images exist, pick the most recently created one.\n",
216-
" # This ensures we always show the result of the *latest* run.\n",
217-
" latest_file = max(list_of_files, key=os.path.getctime)\n",
218-
" print(f\"✅ Found generated image: {latest_file}\")\n",
72+
"# Load the model and export it to OpenVINO format automatically\n",
73+
"pipe = OVLatentConsistencyModelPipeline.from_pretrained(\n",
74+
" model_id,\n",
75+
" export=True,\n",
76+
" device=device.value \n",
77+
")\n",
21978
"\n",
220-
" # 3. DISPLAY: Render the image directly in the notebook output.\n",
221-
" display(Image(filename=latest_file))\n",
222-
"else:\n",
223-
" # Error handling if generation failed or folder is missing\n",
224-
" print(\"❌ No images found yet.\")\n",
225-
" print(f\"Searched in: {search_path}\")\n",
226-
" print(\"Try running Cell 2 again!\")"
79+
"# Compile the model explicitly for the selected device\n",
80+
"pipe.compile()\n",
81+
"print(f\"✅ Model successfully compiled for {device.value}\")"
22782
]
22883
},
22984
{

0 commit comments

Comments
 (0)