Skip to content
Merged
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 99 additions & 0 deletions backends/vulkan/test/test_vulkan_delegate.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ def lower_module(
if dynamic_shapes is not None:
compile_options["require_dynamic_shapes"] = True

# Enable downcast_64_bit by default to handle float64/int64 tensors
compile_options["downcast_64_bit"] = True

edge_compile_config = EdgeCompileConfig(
_skip_dim_order=False, # TODO(T182928844): Delegate dim order op to backend.
)
Expand Down Expand Up @@ -1964,3 +1967,99 @@ def forward(self, x):
GroupNormModule(num_groups, num_channels),
sample_inputs,
)

def test_vulkan_backend_full_quantization_workflow(self):
class FullQuantizationWorkflowModule(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x):
# Step 1: Choose quantization parameters per tensor
scale, zero_point = (
torch.ops.quantized_decomposed.choose_qparams.tensor(
x,
quant_min=-2147483648, # int32 min
quant_max=2147483647, # int32 max
eps=1e-5,
dtype=torch.int32,
)
)

# Step 2: Quantize using the calculated parameters
quantized = torch.ops.quantized_decomposed.quantize_per_tensor.tensor(
x,
scale,
zero_point,
quant_min=-2147483648, # int32 min
quant_max=2147483647, # int32 max
dtype=torch.int32,
)

# Step 3: Dequantize back to float
dequantized = (
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor(
quantized,
scale,
zero_point,
quant_min=-2147483648, # int32 min
quant_max=2147483647, # int32 max
dtype=torch.int32,
)
)

return dequantized

full_workflow_module = FullQuantizationWorkflowModule()
sample_inputs = (torch.rand(size=(2, 3, 4), dtype=torch.float32),)

# Use higher tolerance since quantization introduces some error
self.lower_module_and_test_output(
full_workflow_module, sample_inputs, atol=5e-3, rtol=5e-3
)

def test_vulkan_backend_full_per_token_quantization_workflow(self):
class FullPerTokenQuantizationWorkflowModule(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x):
# Step 1: Choose quantization parameters per token
scale, zero_point = (
torch.ops.quantized_decomposed.choose_qparams_per_token_asymmetric.default(
x,
dtype=torch.int32,
)
)

# Step 2: Quantize using the calculated parameters per token
quantized = torch.ops.quantized_decomposed.quantize_per_token.default(
x,
scale,
zero_point,
quant_min=-2147483648, # int32 min
quant_max=2147483647, # int32 max
dtype=torch.int32,
)

# Step 3: Dequantize back to float per token
dequantized = (
torch.ops.quantized_decomposed.dequantize_per_token.default(
quantized,
scale,
zero_point,
quant_min=-2147483648, # int32 min
quant_max=2147483647, # int32 max
dtype=torch.int32,
output_dtype=torch.float32,
)
)

return dequantized

full_per_token_workflow_module = FullPerTokenQuantizationWorkflowModule()
sample_inputs = (torch.rand(size=(6, 4), dtype=torch.float32),)

# Use higher tolerance since quantization introduces some error
self.lower_module_and_test_output(
full_per_token_workflow_module, sample_inputs, atol=5e-3, rtol=5e-3
)
Loading