|
57 | 57 |
|
58 | 58 | def _build_model_with_quantization_holder(act_layer, quant_activation_holder, input_shape, model_name): |
59 | 59 | inputs = tf.keras.layers.Input(shape=input_shape) |
60 | | - x = tf.keras.layers.Conv2D(filters=3, kernel_size=4)(inputs) |
| 60 | + # If all conv outputs are negative, the ReLU output will be 0, causing a quantizer validation error. |
| 61 | + # To include positive and negative values in the conv outputs, we set the conv weights to positive values. |
| 62 | + # Also set the upper half of the input tensor to positive and the lower half to negative. |
| 63 | + conv = tf.keras.layers.Conv2D(filters=3, kernel_size=4) |
| 64 | + conv.build(input_shape=input_shape) |
| 65 | + kernel, bias = conv.get_weights() |
| 66 | + conv.set_weights([np.abs(kernel), bias]) |
| 67 | + x = conv(inputs) |
61 | 68 | act_output = act_layer(x) |
62 | 69 | quant_output = quant_activation_holder(act_output) |
63 | 70 | return tf.keras.Model(inputs=inputs, outputs=[quant_output, act_output], name=model_name) |
64 | 71 |
|
65 | 72 |
|
66 | 73 | def _build_model_with_operator_quantization_holder(act_layer, quant_activation_holder, input_shape, model_name): |
67 | 74 | inputs = tf.keras.layers.Input(shape=input_shape) |
68 | | - x = tf.keras.layers.Conv2D(filters=3, kernel_size=4)(inputs) |
69 | | - y = tf.keras.layers.Conv2D(filters=3, kernel_size=4)(inputs) |
| 75 | + # If all conv outputs are negative, the ReLU output will be 0, causing a quantizer validation error. |
| 76 | + # To include positive and negative values in the conv outputs, we set the conv weights to positive values. |
| 77 | + # Also set the upper half of the input tensor to positive and the lower half to negative. |
| 78 | + conv1 = tf.keras.layers.Conv2D(filters=3, kernel_size=4) |
| 79 | + conv1.build(input_shape=input_shape) |
| 80 | + kernel1, bias1 = conv1.get_weights() |
| 81 | + conv1.set_weights([np.abs(kernel1), bias1]) |
| 82 | + conv2 = tf.keras.layers.Conv2D(filters=3, kernel_size=4) |
| 83 | + conv2.build(input_shape=input_shape) |
| 84 | + kernel2, bias2 = conv2.get_weights() |
| 85 | + conv2.set_weights([np.abs(kernel2), bias2]) |
| 86 | + x = conv1(inputs) |
| 87 | + y = conv2(inputs) |
70 | 88 | act_output = act_layer([x, y]) |
71 | 89 | quant_output = quant_activation_holder(act_output) |
72 | 90 | return tf.keras.Model(inputs=inputs, outputs=[quant_output, act_output], name=model_name) |
@@ -98,7 +116,14 @@ def build_and_save_model(self, quantizer, quantizer_params, layer, model_name, i |
98 | 116 | self.assertEqual(len(quant_holder_layer), 1) |
99 | 117 |
|
100 | 118 | # Verifying activation quantization after holder |
101 | | - output = model(np.random.randn(1, *input_shape)) |
| 119 | + # If all conv outputs are negative, the ReLU output will be 0, causing a quantizer validation error. |
| 120 | + # To include positive and negative values in the conv outputs, we set the conv weights to positive values. |
| 121 | + # Also set the upper half of the input tensor to positive and the lower half to negative. |
| 122 | + rand_inp = np.random.randn(1, *input_shape) |
| 123 | + sign = np.ones((1, *input_shape)) |
| 124 | + sign[:, rand_inp.shape[1]//2:, :, :] = -1 |
| 125 | + rand_inp = rand_inp * sign |
| 126 | + output = model(rand_inp) |
102 | 127 | self.assertTrue(np.any(output[0] != output[1]), "Expecting activation layer output to be different " |
103 | 128 | "from the activation holder layer output, which should be " |
104 | 129 | "quantized.") |
|
0 commit comments