Skip to content

Commit b29b4f8

Browse files
[ci] More workers for WC tests (#3701)
### Changes Parallel tests on 6 workers to run all tests to complete in around 30 minutes. Fill missed durations ### Reason for changes Speed up job ### Tests https://github.com/openvinotoolkit/nncf/actions/runs/18853306010
1 parent 208b2ef commit b29b4f8

File tree

2 files changed

+20
-8
lines changed

2 files changed

+20
-8
lines changed

.github/workflows/conformance_weight_compression.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,13 @@ on:
1414

1515
jobs:
1616
examples-cpu:
17-
name: Weight compression [${{ matrix.group }}/4]
17+
name: Weight compression [${{ matrix.group }}/6]
1818
runs-on: ubuntu-latest-16-cores
1919
timeout-minutes: 60
2020
strategy:
2121
fail-fast: false
2222
matrix:
23-
group: [1, 2, 3, 4]
23+
group: [1, 2, 3, 4, 5, 6]
2424
defaults:
2525
run:
2626
shell: bash
@@ -50,7 +50,7 @@ jobs:
5050
--junit-xml=pytest-results.xml \
5151
--durations-path=tests/post_training/data/wc_test_durations.json \
5252
--splitting-algorithm=least_duration \
53-
--splits 4 \
53+
--splits 6 \
5454
--group ${{ matrix.group }} \
5555
${{ github.event.inputs.pytest_args || '' }}
5656
ret=$?
Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,32 @@
11
{
22
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_awq_backup_mode_none_backend_OV]": 249,
3+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_scale_estimation_backend_ONNX]": 725,
34
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_scale_estimation_backend_OV]": 368,
45
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_scale_estimation_stateful_backend_OV]": 371,
56
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_stateful_backend_OV]": 205,
7+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_backend_FX_TORCH]": 368,
8+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_backend_ONNX]": 213,
69
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_backend_OV]": 206,
10+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_backend_TORCH]": 206,
711
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_gptq_scale_estimation_stateful_backend_OV]": 1161,
812
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_lora_stateful_backend_OV]": 473,
13+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_FX_TORCH]": 352,
14+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_ONNX]": 154,
15+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_OV]": 164,
16+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_TORCH]": 210,
917
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_backend_FP32]": 0,
18+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_backend_ONNX]": 182,
1019
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_backend_OV]": 187,
20+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_opset19_backend_ONNX]": 512,
21+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_int4_data_free_backend_FX_TORCH]": 274,
1122
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_int4_data_free_backend_TORCH]": 123,
23+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_int8_data_free_backend_FX_TORCH]": 276,
1224
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_int8_data_free_backend_TORCH]": 165,
1325
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_NF4_scale_estimation_stateful_per_channel_backend_OV]": 193,
26+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_scale_estimation_per_channel_backend_FX_TORCH]": 423,
27+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_scale_estimation_per_channel_backend_ONNX]": 518,
1428
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_scale_estimation_per_channel_backend_OV]": 251,
15-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_OV]": 164,
16-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_TORCH]": 210,
17-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_backend_ONNX]": 182,
18-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_opset19_backend_ONNX]": 512,
19-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_ONNX]": 154
29+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_scale_estimation_per_channel_backend_TORCH]": 274,
30+
"tests/post_training/test_weight_compression[tinyllama_data_aware_awq_scale_estimation_backend_FX_TORCH]": 472,
31+
"tests/post_training/test_weight_compression[tinyllama_data_aware_awq_scale_estimation_backend_TORCH]": 352
2032
}

0 commit comments

Comments
 (0)