Skip to content

Commit 2e2e7ea

Browse files
committed
Revert "pin nightly to 2.5.0.dev20240709+cu121 (#505)"
This reverts commit cc871c5.
1 parent 6e7cf71 commit 2e2e7ea

File tree

2 files changed

+6
-2
lines changed

2 files changed

+6
-2
lines changed

.github/workflows/regression_test.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ jobs:
3333
gpu-arch-version: "12.1"
3434
- name: CUDA Nightly
3535
runs-on: linux.g5.12xlarge.nvidia.gpu
36-
torch-spec: '--pre torch==2.5.0.dev20240709+cu121 --index-url https://download.pytorch.org/whl/nightly/cu121'
36+
torch-spec: '--pre torch --index-url https://download.pytorch.org/whl/nightly/cu121'
3737
gpu-arch-type: "cuda"
3838
gpu-arch-version: "12.1"
3939
- name: CPU 2.2.2
@@ -48,7 +48,7 @@ jobs:
4848
gpu-arch-version: ""
4949
- name: CPU Nightly
5050
runs-on: linux.4xlarge
51-
torch-spec: '--pre torch==2.5.0.dev20240709+cpu --index-url https://download.pytorch.org/whl/nightly/cpu'
51+
torch-spec: '--pre torch --index-url https://download.pytorch.org/whl/nightly/cpu'
5252
gpu-arch-type: "cpu"
5353
gpu-arch-version: ""
5454

test/integration/test_integration.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -631,6 +631,7 @@ def test_dequantize_int8_weight_only_quant_subclass(self, device, dtype):
631631

632632
@parameterized.expand(COMMON_DEVICE_DTYPE)
633633
@unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "int4 requires torch nightly.")
634+
@unittest.skipIf(TORCH_VERSION_AFTER_2_5, "int4 skipping 2.5+ for now")
634635
def test_dequantize_int4_weight_only_quant_subclass(self, device, dtype):
635636
if dtype != torch.bfloat16:
636637
self.skipTest("Currently only supports bfloat16.")
@@ -641,6 +642,7 @@ def test_dequantize_int4_weight_only_quant_subclass(self, device, dtype):
641642

642643
@parameterized.expand(COMMON_DEVICE_DTYPE)
643644
@unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "int4 requires torch nightly.")
645+
@unittest.skipIf(TORCH_VERSION_AFTER_2_5, "int4 skipping 2.5+ for now")
644646
def test_dequantize_int4_weight_only_quant_subclass_grouped(self, device, dtype):
645647
if dtype != torch.bfloat16:
646648
self.skipTest("Currently only supports bfloat16.")
@@ -821,6 +823,7 @@ def test_int8_weight_only_quant_with_freeze(self, device, dtype):
821823

822824
@parameterized.expand(COMMON_DEVICE_DTYPE)
823825
@unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "int4 requires torch nightly.")
826+
@unittest.skipIf(TORCH_VERSION_AFTER_2_5, "int4 skipping 2.5+ for now")
824827
def test_int4_weight_only_quant_subclass_api(self, device, dtype):
825828
if dtype != torch.bfloat16:
826829
self.skipTest(f"Fails for {dtype}")
@@ -835,6 +838,7 @@ def test_int4_weight_only_quant_subclass_api(self, device, dtype):
835838

836839
@parameterized.expand(COMMON_DEVICE_DTYPE)
837840
@unittest.skipIf(not TORCH_VERSION_AFTER_2_3, "int4 requires torch nightly.")
841+
@unittest.skipIf(TORCH_VERSION_AFTER_2_5, "int4 skipping 2.5+ for now")
838842
def test_int4_weight_only_quant_subclass_api_grouped(self, device, dtype):
839843
if dtype != torch.bfloat16:
840844
self.skipTest(f"Fails for {dtype}")

0 commit comments

Comments
 (0)