Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -57,16 +57,34 @@

def _build_model_with_quantization_holder(act_layer, quant_activation_holder, input_shape, model_name):
inputs = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.Conv2D(filters=3, kernel_size=4)(inputs)
# If all conv outputs are negative, the ReLU output will be 0, causing a quantizer validation error.
# To include positive and negative values ​​in the conv outputs, we set the conv weights to positive values.
# Also set the upper half of the input tensor to positive and the lower half to negative.
conv = tf.keras.layers.Conv2D(filters=3, kernel_size=4)
conv.build(input_shape=input_shape)
kernel, bias = conv.get_weights()
conv.set_weights([np.abs(kernel), bias])
x = conv(inputs)
act_output = act_layer(x)
quant_output = quant_activation_holder(act_output)
return tf.keras.Model(inputs=inputs, outputs=[quant_output, act_output], name=model_name)


def _build_model_with_operator_quantization_holder(act_layer, quant_activation_holder, input_shape, model_name):
inputs = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.Conv2D(filters=3, kernel_size=4)(inputs)
y = tf.keras.layers.Conv2D(filters=3, kernel_size=4)(inputs)
# If all conv outputs are negative, the ReLU output will be 0, causing a quantizer validation error.
# To include positive and negative values ​​in the conv outputs, we set the conv weights to positive values.
# Also set the upper half of the input tensor to positive and the lower half to negative.
conv1 = tf.keras.layers.Conv2D(filters=3, kernel_size=4)
conv1.build(input_shape=input_shape)
kernel1, bias1 = conv1.get_weights()
conv1.set_weights([np.abs(kernel1), bias1])
conv2 = tf.keras.layers.Conv2D(filters=3, kernel_size=4)
conv2.build(input_shape=input_shape)
kernel2, bias2 = conv2.get_weights()
conv2.set_weights([np.abs(kernel2), bias2])
x = conv1(inputs)
y = conv2(inputs)
act_output = act_layer([x, y])
quant_output = quant_activation_holder(act_output)
return tf.keras.Model(inputs=inputs, outputs=[quant_output, act_output], name=model_name)
Expand Down Expand Up @@ -98,7 +116,14 @@ def build_and_save_model(self, quantizer, quantizer_params, layer, model_name, i
self.assertEqual(len(quant_holder_layer), 1)

# Verifying activation quantization after holder
output = model(np.random.randn(1, *input_shape))
# If all conv outputs are negative, the ReLU output will be 0, causing a quantizer validation error.
# To include positive and negative values ​​in the conv outputs, we set the conv weights to positive values.
# Also set the upper half of the input tensor to positive and the lower half to negative.
rand_inp = np.random.randn(1, *input_shape)
sign = np.ones((1, *input_shape))
sign[:, rand_inp.shape[1]//2:, :, :] = -1
rand_inp = rand_inp * sign
output = model(rand_inp)
self.assertTrue(np.any(output[0] != output[1]), "Expecting activation layer output to be different "
"from the activation holder layer output, which should be "
"quantized.")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,13 @@ def _build_model_with_quantization_holder(act_layer, quant_activation_holder, in
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
# If all conv outputs are negative, the ReLU output will be 0, causing a quantizer validation error.
# To include positive and negative values ​​in the conv outputs, we set the conv weights to positive values.
# Also set the upper half of the input tensor to positive and the lower half to negative.
self.conv = torch.nn.Conv2d(in_channels=3, out_channels=3, kernel_size=4)
with torch.no_grad():
weight = self.conv.weight.abs()
self.conv.weight.copy_(weight)
self.act_layer = act_layer
self.quant_activation_holder = quant_activation_holder

Expand All @@ -74,8 +80,16 @@ def _build_model_with_operator_quantization_holder(act_layer, quant_activation_h
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
# If all conv outputs are negative, the ReLU output will be 0, causing a quantizer validation error.
# To include positive and negative values ​​in the conv outputs, we set the conv weights to positive values.
# Also set the upper half of the input tensor to positive and the lower half to negative.
self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=3, kernel_size=4)
self.conv2 = torch.nn.Conv2d(in_channels=3, out_channels=3, kernel_size=4)
with torch.no_grad():
weight1 = self.conv1.weight.abs()
self.conv1.weight.copy_(weight1)
weight2 = self.conv2.weight.abs()
self.conv2.weight.copy_(weight2)
self.act_layer = act_layer
self.quant_activation_holder = quant_activation_holder

Expand Down Expand Up @@ -115,7 +129,13 @@ def build_and_save_model(self, quantizer, quantizer_params, layer, model_name, i
quant_holder_layer = [_l for _, _l in model.named_modules() if isinstance(_l, PytorchActivationQuantizationHolder)]
self.assertEqual(len(quant_holder_layer), 1)

# If all conv outputs are negative, the ReLU output will be 0, causing a quantizer validation error.
# To include positive and negative values ​​in the conv outputs, we set the conv weights to positive values.
# Also set the upper half of the input tensor to positive and the lower half to negative.
rand_inp = torch.rand(1, *input_shape).to(BaseActivationQuantizerBuildAndSaveTest.device)
sign = torch.ones(1, *input_shape)
sign[:, :, rand_inp.shape[2]//2:, :] = -1
rand_inp = rand_inp * sign
model = model.to(BaseActivationQuantizerBuildAndSaveTest.device)

# Verifying activation quantization after holder
Expand Down