@@ -113,7 +113,8 @@ def randX_100_16():
113113#@pytest.mark.parametrize('bits', [4, 6, 8])
114114@pytest .mark .parametrize ('bits,alpha' , [(4 , 1 ), (4 , 'auto_po2' )])
115115@pytest .mark .parametrize ('backend' , ['Vivado' , 'Quartus' ])
116- def test_single_dense_activation_exact (randX_100_16 , bits , alpha , backend ):
116+ @pytest .mark .parametrize ('io_type' , ['io_parallel' , 'io_stream' ])
117+ def test_single_dense_activation_exact (randX_100_16 , bits , alpha , backend , io_type ):
117118 '''
118119 Test a single Dense -> Activation layer topology for
119120 bit exactness with number of bits parameter
@@ -128,11 +129,12 @@ def test_single_dense_activation_exact(randX_100_16, bits, alpha, backend):
128129
129130 hls4ml .model .optimizer .get_optimizer ('output_rounding_saturation_mode' ).configure (layers = ['relu1' ], rounding_mode = 'AP_RND_CONV' , saturation_mode = 'AP_SAT' )
130131 config = hls4ml .utils .config_from_keras_model (model , granularity = 'name' )
131- output_dir = str (test_root_path / 'hls4mlprj_qkeras_single_dense_activation_exact_{}_{}_{}' .format (bits , alpha , backend ))
132+ output_dir = str (test_root_path / 'hls4mlprj_qkeras_single_dense_activation_exact_{}_{}_{}_{} ' .format (bits , alpha , backend , io_type ))
132133 hls_model = hls4ml .converters .convert_from_keras_model (model ,
133134 hls_config = config ,
134135 output_dir = output_dir ,
135- backend = backend )
136+ backend = backend ,
137+ io_type = io_type )
136138 hls4ml .model .optimizer .get_optimizer ('output_rounding_saturation_mode' ).configure (layers = [])
137139 hls_model .compile ()
138140
@@ -168,12 +170,13 @@ def randX_100_10():
168170 (6 , 10 , ternary (alpha = 'auto' ), quantized_bits (5 ,2 ), ternary (threshold = 0.8 ), True , False ),
169171 (7 , 10 , binary (), quantized_bits (5 ,2 ), binary (), False , True )])
170172@pytest .mark .parametrize ('backend' , ['Vivado' , 'Quartus' ])
171- def test_btnn (make_btnn , randX_100_10 , backend ):
173+ @pytest .mark .parametrize ('io_type' , ['io_parallel' , 'io_stream' ])
174+ def test_btnn (make_btnn , randX_100_10 , backend , io_type ):
172175 model , is_xnor , test_no = make_btnn
173176 X = randX_100_10
174177 cfg = hls4ml .utils .config_from_keras_model (model , granularity = 'name' )
175- output_dir = str (test_root_path / 'hls4mlprj_btnn_{}_{}' .format (test_no , backend ))
176- hls_model = hls4ml .converters .convert_from_keras_model (model , output_dir = output_dir , hls_config = cfg , backend = backend )
178+ output_dir = str (test_root_path / 'hls4mlprj_btnn_{}_{}_{} ' .format (test_no , backend , io_type ))
179+ hls_model = hls4ml .converters .convert_from_keras_model (model , output_dir = output_dir , hls_config = cfg , backend = backend , io_type = io_type )
177180 hls_model .compile ()
178181 y_hls = hls_model .predict (X )
179182 # hls4ml may return XNOR binary
@@ -201,7 +204,8 @@ def randX_1000_1():
201204 (quantized_relu (10 )),
202205 (quantized_relu (10 ,5 ))])
203206@pytest .mark .parametrize ('backend' , ['Vivado' , 'Quartus' ])
204- def test_quantizer (randX_1000_1 , quantizer , backend ):
207+ @pytest .mark .parametrize ('io_type' , ['io_parallel' , 'io_stream' ])
208+ def test_quantizer (randX_1000_1 , quantizer , backend , io_type ):
205209 '''
206210 Test a single quantizer as an Activation function.
207211 Checks the type inference through the conversion is correct without just
@@ -215,12 +219,13 @@ def test_quantizer(randX_1000_1, quantizer, backend):
215219
216220 hls4ml .model .optimizer .get_optimizer ('output_rounding_saturation_mode' ).configure (layers = ['quantizer' ], rounding_mode = 'AP_RND_CONV' , saturation_mode = 'AP_SAT' )
217221 config = hls4ml .utils .config_from_keras_model (model , granularity = 'name' )
218- output_dir = str (test_root_path / 'hls4mlprj_qkeras_quantizer_{}_{}_{}_{}' .format (quantizer .__class__ .__name__ ,
219- quantizer .bits , quantizer .integer , backend ))
222+ output_dir = str (test_root_path / 'hls4mlprj_qkeras_quantizer_{}_{}_{}_{}_{} ' .format (quantizer .__class__ .__name__ ,
223+ quantizer .bits , quantizer .integer , backend , io_type ))
220224 hls_model = hls4ml .converters .convert_from_keras_model (model ,
221225 hls_config = config ,
222226 output_dir = output_dir ,
223- backend = backend )
227+ backend = backend ,
228+ io_type = io_type )
224229 hls4ml .model .optimizer .get_optimizer ('output_rounding_saturation_mode' ).configure (layers = [])
225230 hls_model .compile ()
226231
0 commit comments