@@ -125,9 +125,7 @@ def fixture_report_data(output_dir, run_benchmark_app, forked):
125125 df .to_csv (output_file , index = False )
126126
127127
128- def maybe_skip_test_case (
129- test_model_param , run_fp32_backend , run_torch_cuda_backend , run_with_x86_quantizer , batch_size
130- ):
128+ def maybe_skip_test_case (test_model_param , run_fp32_backend , run_torch_cuda_backend , batch_size ):
131129 if test_model_param ["backend" ] == BackendType .FP32 and not run_fp32_backend :
132130 pytest .skip ("To run test for not quantized model use --fp32 argument" )
133131 if (
@@ -137,11 +135,6 @@ def maybe_skip_test_case(
137135 pytest .skip (f"To run test for { test_model_param ['backend' ].value } backend use --cuda argument" )
138136 if batch_size and batch_size > 1 and test_model_param .get ("batch_size" , 1 ) == 1 :
139137 pytest .skip ("The model does not support batch_size > 1. Please use --batch-size 1." )
140- if (
141- test_model_param ["backend" ] in [BackendType .X86_QUANTIZER_AO , BackendType .X86_QUANTIZER_NNCF ]
142- and not run_with_x86_quantizer
143- ):
144- pytest .skip ("To validate quantization with the X86Quantizer use the --x86quantizer argument" )
145138 return test_model_param
146139
147140
@@ -209,7 +202,6 @@ def run_pipeline(
209202 batch_size : Optional [int ],
210203 run_fp32_backend : bool ,
211204 run_torch_cuda_backend : bool ,
212- run_with_x86_quantizer : bool ,
213205 subset_size : Optional [int ],
214206 run_benchmark_app : bool ,
215207 capsys : pytest .CaptureFixture ,
@@ -222,7 +214,7 @@ def run_pipeline(
222214 msg = f"{ test_case_name } does not exist in 'reference_data.yaml'"
223215 raise nncf .ValidationError (msg )
224216 test_model_param = test_cases [test_case_name ]
225- maybe_skip_test_case (test_model_param , run_fp32_backend , run_torch_cuda_backend , run_with_x86_quantizer , batch_size )
217+ maybe_skip_test_case (test_model_param , run_fp32_backend , run_torch_cuda_backend , batch_size )
226218 pipeline_cls = test_model_param ["pipeline_cls" ]
227219 pipeline_kwargs = create_pipeline_kwargs (test_model_param , subset_size , test_case_name , reference_data )
228220 pipeline_kwargs .update (
@@ -280,7 +272,6 @@ def test_ptq_quantization(
280272 batch_size : Optional [int ],
281273 run_fp32_backend : bool ,
282274 run_torch_cuda_backend : bool ,
283- run_with_x86_quantizer : bool ,
284275 subset_size : Optional [int ],
285276 run_benchmark_app : bool ,
286277 capsys : pytest .CaptureFixture ,
@@ -298,7 +289,6 @@ def test_ptq_quantization(
298289 batch_size ,
299290 run_fp32_backend ,
300291 run_torch_cuda_backend ,
301- run_with_x86_quantizer ,
302292 subset_size ,
303293 run_benchmark_app ,
304294 capsys ,
@@ -335,7 +325,6 @@ def test_weight_compression(
335325 batch_size ,
336326 run_fp32_backend ,
337327 run_torch_cuda_backend ,
338- False , # Do not run with the X86Quantizer
339328 subset_size ,
340329 run_benchmark_app ,
341330 capsys ,
0 commit comments