Skip to content

Commit c7808e4

Browse files
committed
fix bug
Signed-off-by: xin3he <[email protected]>
1 parent 33bb948 commit c7808e4

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

test/3x/torch/quantization/weight_only/test_rtn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ def test_double_quant_params(self, dtype, double_quant_bits, double_quant_group_
241241
out = model(self.example_inputs)[0]
242242
atol_true = (out - self.q_label).amax()
243243
# compare atol, this case is an ideal case.
244-
if not (dtype, double_quant_bits, double_quant_group_size) == (256, 6, "nf4"):
244+
if not (dtype, double_quant_bits, double_quant_group_size) == ("nf4", 6, 256):
245245
assert (
246246
atol_false < atol_true
247247
), "asym for double quant should have smaller atol because scales is bigger than zero, please double check."

0 commit comments

Comments
 (0)