Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .github/workflows/conformance_weight_compression.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,13 @@ on:

jobs:
examples-cpu:
name: Weight compression [${{ matrix.group }}/4]
name: Weight compression [${{ matrix.group }}/6]
runs-on: ubuntu-latest-16-cores
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
group: [1, 2, 3, 4]
group: [1, 2, 3, 4, 5, 6]
defaults:
run:
shell: bash
Expand Down Expand Up @@ -50,7 +50,7 @@ jobs:
--junit-xml=pytest-results.xml \
--durations-path=tests/post_training/data/wc_test_durations.json \
--splitting-algorithm=least_duration \
--splits 4 \
--splits 6 \
--group ${{ matrix.group }} \
${{ github.event.inputs.pytest_args || '' }}
ret=$?
Expand Down
22 changes: 17 additions & 5 deletions tests/post_training/data/wc_test_durations.json
Original file line number Diff line number Diff line change
@@ -1,20 +1,32 @@
{
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_awq_backup_mode_none_backend_OV]": 249,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_scale_estimation_backend_ONNX]": 725,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_scale_estimation_backend_OV]": 368,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_scale_estimation_stateful_backend_OV]": 371,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_stateful_backend_OV]": 205,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_backend_FX_TORCH]": 368,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_backend_ONNX]": 213,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_backend_OV]": 206,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_backend_TORCH]": 206,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_gptq_scale_estimation_stateful_backend_OV]": 1161,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_lora_stateful_backend_OV]": 473,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_FX_TORCH]": 352,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_ONNX]": 154,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_OV]": 164,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_TORCH]": 210,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_backend_FP32]": 0,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_backend_ONNX]": 182,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_backend_OV]": 187,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_opset19_backend_ONNX]": 512,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_int4_data_free_backend_FX_TORCH]": 274,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_int4_data_free_backend_TORCH]": 123,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_int8_data_free_backend_FX_TORCH]": 276,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_int8_data_free_backend_TORCH]": 165,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_NF4_scale_estimation_stateful_per_channel_backend_OV]": 193,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_scale_estimation_per_channel_backend_FX_TORCH]": 423,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_scale_estimation_per_channel_backend_ONNX]": 518,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_scale_estimation_per_channel_backend_OV]": 251,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_OV]": 164,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_TORCH]": 210,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_backend_ONNX]": 182,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_opset19_backend_ONNX]": 512,
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_awq_backend_ONNX]": 154
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_scale_estimation_per_channel_backend_TORCH]": 274,
"tests/post_training/test_weight_compression[tinyllama_data_aware_awq_scale_estimation_backend_FX_TORCH]": 472,
"tests/post_training/test_weight_compression[tinyllama_data_aware_awq_scale_estimation_backend_TORCH]": 352
}