We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent bac5d4a commit 40cabd1Copy full SHA for 40cabd1
torchao/quantization/qat/linear.py
@@ -365,6 +365,7 @@ def _convert_qat_linear_4w(self, module: torch.nn.Module):
365
inner_k_tiles=inner_k_tiles,
366
precision=child.weight.dtype,
367
scales_precision=config.scale_precision,
368
+ device=next(child.parameters()).device,
369
)
370
setattr(module, name, quantized_linear)
371
0 commit comments