Skip to content

Commit 67ae555

Browse files
authored
[GHA] Replaced visual_language_chat_sample-ubuntu-llava (#1802)
1 parent e54ac82 commit 67ae555

File tree

7 files changed

+64
-65
lines changed

7 files changed

+64
-65
lines changed

.github/workflows/causal_lm_cpp.yml

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -363,18 +363,6 @@ jobs:
363363
f.write(content.encode("utf-8"))
364364
- run: diff cpp2.txt py2.txt
365365

366-
visual_language_chat_sample-ubuntu-llava_1_5:
367-
uses: ./.github/workflows/job_vlm_sample_llava.yml
368-
with:
369-
model_id: llava-hf/llava-1.5-7b-hf
370-
model_dir: llava_1_5_7b_ov
371-
372-
visual_language_chat_sample-ubuntu-llava_next:
373-
uses: ./.github/workflows/job_vlm_sample_llava.yml
374-
with:
375-
model_id: llava-hf/llava-v1.6-mistral-7b-hf
376-
model_dir: llava_v1_6_mistral_7b_ov
377-
378366
visual_language_chat_sample-ubuntu-internvl2:
379367
runs-on: ubuntu-22.04-16-cores
380368
steps:
@@ -575,7 +563,7 @@ jobs:
575563
Overall_Status:
576564
name: ci/gha_overall_status_causal_lm
577565
needs: [cpp-greedy_causal_lm-windows, cpp-Phi-1_5, cpp-greedy_causal_lm-redpajama-3b-chat, cpp-chat_sample-ubuntu, cpp-continuous-batching-ubuntu,
578-
visual_language_chat_sample-ubuntu-minicpm_v2_6, visual_language_chat_sample-ubuntu-llava_1_5, visual_language_chat_sample-ubuntu-llava_next, visual_language_chat_sample-ubuntu-internvl2,
566+
visual_language_chat_sample-ubuntu-minicpm_v2_6, visual_language_chat_sample-ubuntu-internvl2,
579567
cpp-continuous-batching-windows, cpp-continuous-batching-macos]
580568
if: ${{ always() }}
581569
runs-on: ubuntu-latest

.github/workflows/job_vlm_sample_llava.yml

Lines changed: 0 additions & 49 deletions
This file was deleted.

.github/workflows/linux.yml

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -336,9 +336,13 @@ jobs:
336336
marker: 'image_generation'
337337
cmd: 'tests/python_tests/samples'
338338
runner: 'aks-linux-8-cores-64gb'
339+
- name: 'VLM'
340+
marker: 'vlm'
341+
cmd: 'tests/python_tests/samples'
342+
runner: 'aks-linux-8-cores-64gb'
339343

340344
needs: [ openvino_download, genai_build_cmake, genai_build_wheel, genai_build_samples ]
341-
timeout-minutes: 45
345+
timeout-minutes: 60
342346
defaults:
343347
run:
344348
shell: bash

tests/python_tests/pytest.ini

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,14 @@ markers =
99
; llm - Tests related to large language models.
1010
; whisper - Tests related to the Whisper model.
1111
; image_generation - Tests related to image generation.
12+
; vlm - Tests related to the VLM model.
1213
precommit
1314
nightly
1415
real_models
1516
samples
1617
llm
1718
whisper
1819
image_generation
20+
vlm
1921

2022
addopts = -m precommit

tests/python_tests/samples/conftest.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,22 @@
5757
"name": "dreamlike-art/dreamlike-anime-1.0",
5858
"convert_args": ['--trust-remote-code', '--weight-format', 'fp16', "--task", "stable-diffusion"]
5959
},
60+
"LCM_Dreamshaper_v7-int8-ov": {
61+
"name": "OpenVINO/LCM_Dreamshaper_v7-int8-ov",
62+
"convert_args": []
63+
},
64+
"llava-1.5-7b-hf": {
65+
"name": "llava-hf/llava-1.5-7b-hf",
66+
"convert_args": ['--trust-remote-code']
67+
},
68+
"llava-v1.6-mistral-7b-hf": {
69+
"name": "llava-hf/llava-v1.6-mistral-7b-hf",
70+
"convert_args": ['--trust-remote-code']
71+
},
72+
"dreamlike-anime-1.0": {
73+
"name": "dreamlike-art/dreamlike-anime-1.0",
74+
"convert_args": ['--trust-remote-code', '--weight-format', 'fp16', "--task", "stable-diffusion"]
75+
},
6076
"LCM_Dreamshaper_v7-int8-ov": {
6177
"name": "OpenVINO/LCM_Dreamshaper_v7-int8-ov",
6278
"convert_args": []
@@ -66,6 +82,7 @@
6682
TEST_FILES = {
6783
"how_are_you_doing_today.wav": "https://storage.openvinotoolkit.org/models_contrib/speech/2021.2/librispeech_s5/how_are_you_doing_today.wav",
6884
"adapter_model.safetensors": "https://huggingface.co/smangrul/tinyllama_lora_sql/resolve/main/adapter_model.safetensors",
85+
"monalisa.jpg": "https://llava-vl.github.io/static/images/monalisa.jpg",
6986
"soulcard.safetensors": "https://civitai.com/api/download/models/72591",
7087
"image.png": "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png",
7188
"mask_image.png": "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"

tests/python_tests/samples/test_utils.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,10 @@
33
from conftest import logger
44
import subprocess # nosec B404
55

6-
def run_sample(command):
6+
def run_sample(command, input_data=None):
77
logger.info(f"Running sample command: {' '.join(command)}")
8-
result = subprocess.run(command, capture_output=True, text=True, check=True, encoding='utf-8')
8+
if input_data:
9+
logger.info(f"Input data: {input_data}")
10+
result = subprocess.run(command, capture_output=True, text=True, check=True, encoding='utf-8', input=input_data)
911
logger.info(f"Sample output: {result.stdout}")
1012
return result
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# Copyright (C) 2025 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
import os
5+
import pytest
6+
import sys
7+
8+
from conftest import SAMPLES_PY_DIR, SAMPLES_CPP_DIR
9+
from test_utils import run_sample
10+
11+
class TestVisualLanguageChat:
12+
@pytest.mark.vlm
13+
@pytest.mark.samples
14+
@pytest.mark.parametrize(
15+
"convert_model, sample_args",
16+
[
17+
pytest.param("llava-1.5-7b-hf", 'Who drew this painting?\nWhen did the painter live?'),
18+
pytest.param("llava-v1.6-mistral-7b-hf", 'Who drew this painting?\nWhen did the painter live?'),
19+
],
20+
indirect=["convert_model"],
21+
)
22+
@pytest.mark.parametrize("download_test_content", ["monalisa.jpg"], indirect=True)
23+
def test_sample_visual_language_chat(self, convert_model, download_test_content, sample_args):
24+
# Test Python sample
25+
py_script = os.path.join(SAMPLES_PY_DIR, "visual_language_chat/visual_language_chat.py")
26+
py_command = [sys.executable, py_script, convert_model, download_test_content]
27+
py_result = run_sample(py_command, sample_args)
28+
29+
# Test CPP sample
30+
cpp_sample = os.path.join(SAMPLES_CPP_DIR, 'visual_language_chat')
31+
cpp_command =[cpp_sample, convert_model, download_test_content]
32+
cpp_result = run_sample(cpp_command, sample_args)
33+
34+
# Compare results
35+
assert py_result.stdout == cpp_result.stdout, f"Results should match"

0 commit comments

Comments
 (0)