Skip to content

Commit 6712804

Browse files
committed
Comment, remove debug prints
1 parent 1e4907e commit 6712804

File tree

1 file changed

+7
-16
lines changed

1 file changed

+7
-16
lines changed

test/test_microxcaling.py

Lines changed: 7 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,10 @@ def test_mx(
4040
mx_etype: ElemFormat,
4141
gf_etype: FormatInfo,
4242
) -> None:
43+
# Input tensor
4344
A = torch.arange(32) / 2 - 5
4445

46+
# Compute MX quantization
4547
mx_specs = dict(
4648
block_size=32,
4749
scale_bits=8,
@@ -52,28 +54,17 @@ def test_mx(
5254

5355
mx_dq = quantize_mx_op(A, mx_specs, mx_etype, axes=0, round=mx_round)
5456

57+
# Compute GFloat quantization
5558
fi = BlockFormatInfo("test", gf_etype, 32, format_info_ocp_e8m0)
5659

60+
# Compute scale - this is not considered GFloat's job
5761
amax = A.abs().max()
5862
q_log2scale = torch.floor(torch.log2(amax)).item() - fi.etype.emax
5963
q_scale = 2**q_log2scale
6064

61-
print(f"{q_scale=}")
62-
63-
enc = list(encode_block(fi, q_scale, (a.item() for a in A), gf_round))
64-
print(f"{enc=}")
65-
print("decoded_scale=", decode_float(fi.stype, enc[0]).fval)
66-
print("decoded_vals=", list(decode_float(fi.etype, e).fval for e in enc[1:]))
67-
print(
68-
"all_vals=",
69-
*(
70-
str(decode_float(fi.etype, i).fval) + ("" if i & 1 else "e")
71-
for i in range(fi.etype.code_of_max + 1)
72-
),
73-
)
65+
# Apply scale to encode and decode
66+
enc = encode_block(fi, q_scale, (a.item() for a in A), gf_round)
7467
gf_dq = list(decode_block(fi, enc))
75-
print("input=", *(str(v.item()) for v in A))
76-
print("mx_dq=", *(str(v.item()) for v in mx_dq))
77-
print("gf_dq=", *(str(v) for v in gf_dq))
7868

69+
# Compare
7970
np.testing.assert_allclose(gf_dq, mx_dq)

0 commit comments

Comments
 (0)