Skip to content

Commit e54ac82

Browse files
authored
[GHA] replaced benchmark_genai-ubuntu (#1817)
replaced benchmark_genai-ubuntu pipeline
1 parent 088f3d0 commit e54ac82

File tree

2 files changed

+40
-36
lines changed

2 files changed

+40
-36
lines changed

.github/workflows/causal_lm_cpp.yml

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -269,42 +269,6 @@ jobs:
269269
diff pred2.txt ref.txt
270270
echo "Chat sample python" passed
271271
272-
benchmark_genai-ubuntu:
273-
runs-on: ubuntu-24.04
274-
defaults:
275-
run:
276-
shell: bash
277-
steps:
278-
- uses: actions/checkout@v4
279-
with:
280-
submodules: recursive
281-
- uses: actions/setup-python@v4
282-
with:
283-
python-version: 3.11
284-
- name: Install OpenVINO
285-
run: |
286-
mkdir ./ov/
287-
curl ${{ env.l_ov_link }} | tar --directory ./ov/ --strip-components 1 -xz
288-
sudo ./ov/install_dependencies/install_openvino_dependencies.sh
289-
- name: Build app
290-
run: |
291-
source ./ov/setupvars.sh
292-
cmake -DCMAKE_BUILD_TYPE=Release -S ./ -B ./build/
293-
cmake --build ./build/ --config Release -j
294-
- name: Download and convert and model
295-
run: |
296-
source ./ov/setupvars.sh
297-
python -m pip install ./thirdparty/openvino_tokenizers/[transformers] --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/nightly
298-
python -m pip install -r ./samples/requirements.txt
299-
optimum-cli export openvino --trust-remote-code --weight-format fp16 --model TinyLlama/TinyLlama-1.1B-Chat-v1.0 TinyLlama-1.1B-Chat-v1.0
300-
- name: Run
301-
env:
302-
PYTHONPATH: "./build"
303-
run: |
304-
source ./ov/setupvars.sh
305-
timeout 60s ./build/samples/cpp/text_generation/benchmark_genai -m ./TinyLlama-1.1B-Chat-v1.0/ -p "Why is the sun yellow?" --nw 2 -n 3 --mt 50 -d CPU
306-
timeout 60s python ./samples/python/text_generation/benchmark_genai.py -m ./TinyLlama-1.1B-Chat-v1.0/ -p "Why is the sun yellow?" -nw 2 -n 3 -mt 50 -d CPU
307-
308272
visual_language_chat_sample-ubuntu-minicpm_v2_6:
309273
runs-on: ubuntu-22.04-16-cores
310274
steps:
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
# Copyright (C) 2025 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
import os
5+
import pytest
6+
import sys
7+
8+
from conftest import SAMPLES_PY_DIR, SAMPLES_CPP_DIR
9+
from test_utils import run_sample
10+
11+
class TestBenchmarkGenAI:
12+
@pytest.mark.llm
13+
@pytest.mark.samples
14+
@pytest.mark.parametrize(
15+
"convert_model, prompt, sample_args",
16+
[
17+
pytest.param("TinyLlama-1.1B-Chat-v1.0", "Why is the sun yellow?", ["-nw", "2", "-n", "3", "-mt", "50", "-d", "CPU"]),
18+
],
19+
indirect=["convert_model"],
20+
)
21+
def test_py_sample_benchmark_genai(self, convert_model, prompt, sample_args):
22+
# Test Python sample
23+
py_script = os.path.join(SAMPLES_PY_DIR, "text_generation/benchmark_genai.py")
24+
py_command = [sys.executable, py_script, '-m', convert_model, '-p', f'"{prompt}"'] + sample_args
25+
run_sample(py_command)
26+
27+
@pytest.mark.llm
28+
@pytest.mark.samples
29+
@pytest.mark.parametrize(
30+
"convert_model, prompt, sample_args",
31+
[
32+
pytest.param("TinyLlama-1.1B-Chat-v1.0", "Why is the sun yellow?", ["--nw", "2", "-n", "3", "--mt", "50", "-d", "CPU"]),
33+
],
34+
indirect=["convert_model"],
35+
)
36+
def test_cpp_sample_benchmark_genai(self, convert_model, prompt, sample_args):
37+
# Test CPP sample
38+
cpp_sample = os.path.join(SAMPLES_CPP_DIR, 'benchmark_genai')
39+
cpp_command =[cpp_sample, '-m', convert_model, '-p', f'"{prompt}"'] + sample_args
40+
run_sample(cpp_command)

0 commit comments

Comments
 (0)