Skip to content

Commit ae93e97

Browse files
authored
Fix incorrect condition in PatchedMatmul (#204)
1 parent 330b970 commit ae93e97

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

neural_compressor/torch/algorithms/fp8_quant/_quant_common/helper_modules.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ def __init__(self, mod, parent, mod_extra_config, *args, **kwargs):
105105
if self.quantization_mode in [QuantMode.QUANTIZE, QuantMode.LOAD]:
106106
self.quant_input_0 = self._mod_extra_config.inputs[0]
107107
self.quant_input_1 = self._mod_extra_config.inputs[1]
108-
if not self.use_qdq or self.fake_quant:
108+
if not self.use_qdq and not self.fake_quant:
109109
self.register_scale("scale_input", mod_extra_config.scale.inputs[0], self.scale_format)
110110
self.register_scale("scale_other", mod_extra_config.scale.inputs[1], self.scale_format)
111111
self.matmul_fp8 = get_quantized_func_wrapper(OP_TYPE.MATMUL_GEMM, self.scale_format)

0 commit comments

Comments
 (0)