|
4 | 4 | import numpy as np |
5 | 5 | import pytest |
6 | 6 | from qkeras.qconv2d_batchnorm import QConv2DBatchnorm |
7 | | -from qkeras.qconvolutional import QDepthwiseConv2D |
| 7 | +from qkeras.qconvolutional import QDepthwiseConv2D, QSeparableConv1D, QSeparableConv2D |
8 | 8 | from qkeras.qlayers import QActivation, QDense |
9 | 9 | from qkeras.quantizers import ( |
10 | 10 | binary, |
@@ -478,3 +478,95 @@ def test_quantised_po2_bit_width(backend, io_type, strategy): |
478 | 478 | y_hls = hls_model.predict(np.ascontiguousarray(X)) |
479 | 479 |
|
480 | 480 | np.testing.assert_allclose(y_hls.flatten(), y_keras.flatten(), rtol=2e-2) |
| 481 | + |
| 482 | + |
| 483 | +@pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) |
| 484 | +@pytest.mark.parametrize('io_type', ['io_stream']) |
| 485 | +def test_qseparableconv1d(backend, io_type): |
| 486 | + ''' |
| 487 | + Test proper handling of QSeparableConv1D. |
| 488 | + ''' |
| 489 | + x_in = Input((13, 20), name='input_layer') |
| 490 | + x = QSeparableConv1D( |
| 491 | + 5, |
| 492 | + 3, |
| 493 | + depthwise_quantizer=quantized_bits(8, 3, alpha=1), |
| 494 | + pointwise_quantizer=quantized_bits(8, 3, alpha=1), |
| 495 | + bias_quantizer=quantized_bits(8, 3, alpha=1), |
| 496 | + name='qsepconv_1', |
| 497 | + )(x_in) |
| 498 | + model = Model(inputs=x_in, outputs=x) |
| 499 | + |
| 500 | + config = hls4ml.utils.config_from_keras_model( |
| 501 | + model, granularity='name', backend=backend, default_precision='fixed<23,7>' |
| 502 | + ) |
| 503 | + |
| 504 | + # Use 8 bits for input |
| 505 | + config['LayerName']['input_layer']['Precision']['result'] = 'fixed<8,1>' |
| 506 | + # default_precision is will be used for accum_t and result_t of the conv layer, so we don't need to set them here |
| 507 | + # We need <15,4> for the result of depthwise step |
| 508 | + config['LayerName']['qsepconv_1']['Precision']['dw_output'] = 'fixed<15,4>' |
| 509 | + |
| 510 | + output_dir = str(test_root_path / f'hls4mlprj_qsepconv1d_{backend}_{io_type}') |
| 511 | + hls_model = hls4ml.converters.convert_from_keras_model( |
| 512 | + model, |
| 513 | + hls_config=config, |
| 514 | + output_dir=output_dir, |
| 515 | + io_type=io_type, |
| 516 | + ) |
| 517 | + hls_model.compile() |
| 518 | + |
| 519 | + data = np.random.rand(100, 13, 20) |
| 520 | + input_quantizer = quantized_bits(8, 0, alpha=1) |
| 521 | + dataq = input_quantizer(data).numpy() |
| 522 | + |
| 523 | + y_qkeras = model.predict(dataq) |
| 524 | + y_hls4ml = hls_model.predict(dataq) |
| 525 | + |
| 526 | + np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=0, atol=0) |
| 527 | + |
| 528 | + |
| 529 | +@pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) |
| 530 | +@pytest.mark.parametrize('io_type', ['io_stream']) |
| 531 | +def test_qseparableconv2d(backend, io_type): |
| 532 | + ''' |
| 533 | + Test proper handling of QSeparableConv2D. |
| 534 | + ''' |
| 535 | + x_in = Input((13, 21, 20), name='input_layer') |
| 536 | + x = QSeparableConv2D( |
| 537 | + 5, |
| 538 | + 3, |
| 539 | + depthwise_quantizer=quantized_bits(8, 3, alpha=1), |
| 540 | + pointwise_quantizer=quantized_bits(8, 3, alpha=1), |
| 541 | + bias_quantizer=quantized_bits(8, 3, alpha=1), |
| 542 | + name='qsepconv_1', |
| 543 | + )(x_in) |
| 544 | + model = Model(inputs=x_in, outputs=x) |
| 545 | + |
| 546 | + config = hls4ml.utils.config_from_keras_model( |
| 547 | + model, granularity='name', backend=backend, default_precision='fixed<23,7>' |
| 548 | + ) |
| 549 | + |
| 550 | + # Use 8 bits for input |
| 551 | + config['LayerName']['input_layer']['Precision']['result'] = 'fixed<8,1>' |
| 552 | + # default_precision is will be used for accum_t and result_t of the conv layer, so we don't need to set them here |
| 553 | + # We need <15,4> for the result of depthwise step |
| 554 | + config['LayerName']['qsepconv_1']['Precision']['dw_output'] = 'fixed<15,4>' |
| 555 | + |
| 556 | + output_dir = str(test_root_path / f'hls4mlprj_qsepconv2d_{backend}_{io_type}') |
| 557 | + hls_model = hls4ml.converters.convert_from_keras_model( |
| 558 | + model, |
| 559 | + hls_config=config, |
| 560 | + output_dir=output_dir, |
| 561 | + io_type=io_type, |
| 562 | + ) |
| 563 | + hls_model.compile() |
| 564 | + |
| 565 | + data = np.random.rand(100, 13, 21, 20) |
| 566 | + input_quantizer = quantized_bits(8, 0, alpha=1) |
| 567 | + dataq = input_quantizer(data).numpy() |
| 568 | + |
| 569 | + y_qkeras = model.predict(dataq) |
| 570 | + y_hls4ml = hls_model.predict(dataq) |
| 571 | + |
| 572 | + np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=0, atol=0) |
0 commit comments