Skip to content

Commit 4595971

Browse files
Adapt to _convert_weight_to_int4pack new behavior
1 parent e5df48e commit 4595971

File tree

1 file changed

+2
-0
lines changed

1 file changed

+2
-0
lines changed

torchao/quantization/GPTQ.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -720,6 +720,8 @@ def _create_quantized_state_dict(
720720
self.precision, # dtype for scales_and_zeros
721721
)
722722
# TODO: just get the device from mod.weight.device?
723+
w_cpu = w_int4x8.cpu()
724+
w_int4x8 = (w_cpu[::, ::2] << 4 | w_cpu[::, 1::2]).to(torch.uint8)
723725
weight_int4pack = torch.ops.aten._convert_weight_to_int4pack(w_int4x8.to(self.device), self.inner_k_tiles)
724726
cur_state_dict[f"{fqn}.weight"] = weight_int4pack.to(self.device)
725727
cur_state_dict[f"{fqn}.scales_and_zeros"] = scales_and_zeros.to(self.device)

0 commit comments

Comments
 (0)