We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 53b6b78 commit bf49f41Copy full SHA for bf49f41
torchao/float8/float8_utils.py
@@ -99,7 +99,7 @@ def amax_history_to_scale_stack(
99
100
@torch.no_grad()
101
def tensor_to_amax(x: torch.Tensor, reduce_amax: bool = False) -> torch.Tensor:
102
- amax = torch.max(torch.abs(x))
+ amax = torch.linalg.vector_norm(x, ord=float("inf"))
103
104
# If the user asked for distributed reduction, do it.
105
# If the user did not ask for it, assume that it will
0 commit comments