|
25 | 25 | import modelopt.onnx.autocast.utils as utils |
26 | 26 | import modelopt.onnx.utils as onnx_utils |
27 | 27 | from modelopt.onnx.autocast import convert_to_mixed_precision |
| 28 | +from modelopt.onnx.autocast.__main__ import get_parser, main |
28 | 29 | from modelopt.onnx.autocast.logging_config import configure_logging |
29 | 30 |
|
30 | 31 | configure_logging("DEBUG") |
@@ -187,3 +188,112 @@ def test_conv_isinf_conversion(tmp_path, opset_version): |
187 | 188 | opset_version = onnx_utils.get_opset_version(converted_model) |
188 | 189 | supported_dtype = "float32" if opset_version < 20 else "float16" |
189 | 190 | assert assert_input_precision(isinf_nodes, dtype=supported_dtype) |
| 191 | + |
| 192 | + |
| 193 | +@pytest.mark.parametrize("target_opset", [13, 17, 19, 21]) |
| 194 | +def test_opset_parameter(temp_model_path, target_opset): |
| 195 | + """Test that the opset parameter correctly sets the output model's opset version.""" |
| 196 | + # Convert with specific opset |
| 197 | + converted_model = convert_to_mixed_precision( |
| 198 | + onnx_path=temp_model_path, low_precision_type="fp16", opset=target_opset |
| 199 | + ) |
| 200 | + |
| 201 | + # Verify the output model has the correct opset |
| 202 | + output_opset = onnx_utils.get_opset_version(converted_model) |
| 203 | + assert output_opset >= target_opset, f"Expected opset >= {target_opset}, got {output_opset}" |
| 204 | + |
| 205 | + # Validate the model |
| 206 | + onnx.checker.check_model(converted_model) |
| 207 | + |
| 208 | + |
| 209 | +def test_opset_fp16_warning(temp_model_path, caplog): |
| 210 | + """Test that a warning is issued when using fp16 with opset < 13.""" |
| 211 | + # Convert with fp16 and very low opset |
| 212 | + converted_model = convert_to_mixed_precision( |
| 213 | + onnx_path=temp_model_path, low_precision_type="fp16", opset=11 |
| 214 | + ) |
| 215 | + |
| 216 | + # Check that a warning was logged |
| 217 | + assert "limited FP16 support" in caplog.text, ( |
| 218 | + "Expected warning about FP16 support with low opset" |
| 219 | + ) |
| 220 | + assert "Recommended minimum opset is 13" in caplog.text |
| 221 | + |
| 222 | + # Model should still be created |
| 223 | + assert isinstance(converted_model, onnx.ModelProto) |
| 224 | + |
| 225 | + |
| 226 | +def test_opset_bf16_warning(temp_model_path, caplog): |
| 227 | + """Test that a warning is issued when using bf16 with opset < 22.""" |
| 228 | + # Convert with bf16 and low opset |
| 229 | + converted_model = convert_to_mixed_precision( |
| 230 | + onnx_path=temp_model_path, low_precision_type="bf16", opset=13 |
| 231 | + ) |
| 232 | + |
| 233 | + # Check that a warning was logged |
| 234 | + assert "limited BF16 support" in caplog.text, ( |
| 235 | + "Expected warning about BF16 support with low opset" |
| 236 | + ) |
| 237 | + assert "Recommended minimum opset is 22" in caplog.text |
| 238 | + |
| 239 | + # Model should still be created |
| 240 | + assert isinstance(converted_model, onnx.ModelProto) |
| 241 | + |
| 242 | + |
| 243 | +def test_opset_downgrade_warning(temp_model_path, caplog): |
| 244 | + """Test that a warning is issued when specified opset is lower than original model's opset.""" |
| 245 | + # temp_model_path fixture creates a model with opset 20 |
| 246 | + # Convert with lower opset |
| 247 | + converted_model = convert_to_mixed_precision( |
| 248 | + onnx_path=temp_model_path, low_precision_type="fp16", opset=13 |
| 249 | + ) |
| 250 | + |
| 251 | + # Check that a warning was logged about downgrading |
| 252 | + assert "lower than the original model's opset" in caplog.text, ( |
| 253 | + "Expected warning about downgrading opset" |
| 254 | + ) |
| 255 | + |
| 256 | + # Model should still be created |
| 257 | + assert isinstance(converted_model, onnx.ModelProto) |
| 258 | + |
| 259 | + |
| 260 | +def test_opset_cli_argument(temp_model_path, tmp_path): |
| 261 | + """Test that the --opset CLI argument is properly parsed and used.""" |
| 262 | + # Test the CLI with opset argument |
| 263 | + output_path = tmp_path / "test_output.onnx" |
| 264 | + args = [ |
| 265 | + "--onnx_path", |
| 266 | + temp_model_path, |
| 267 | + "--output_path", |
| 268 | + str(output_path), |
| 269 | + "--opset", |
| 270 | + "21", |
| 271 | + "--low_precision_type", |
| 272 | + "fp16", |
| 273 | + ] |
| 274 | + |
| 275 | + result_model = main(args) |
| 276 | + |
| 277 | + # Verify the output model has the correct opset |
| 278 | + output_opset = onnx_utils.get_opset_version(result_model) |
| 279 | + assert output_opset >= 21, f"Expected opset >= 21, got {output_opset}" |
| 280 | + |
| 281 | + # Verify the file was created |
| 282 | + assert output_path.exists() |
| 283 | + |
| 284 | + # Load and validate the saved model |
| 285 | + saved_model = onnx.load(str(output_path)) |
| 286 | + onnx.checker.check_model(saved_model) |
| 287 | + |
| 288 | + |
| 289 | +def test_opset_parser_argument(): |
| 290 | + """Test that the parser correctly accepts the --opset argument.""" |
| 291 | + parser = get_parser() |
| 292 | + |
| 293 | + # Test parsing with opset |
| 294 | + args = parser.parse_args(["--onnx_path", "test.onnx", "--opset", "19"]) |
| 295 | + assert args.opset == 19 |
| 296 | + |
| 297 | + # Test parsing without opset (should be None) |
| 298 | + args = parser.parse_args(["--onnx_path", "test.onnx"]) |
| 299 | + assert args.opset is None |
0 commit comments