Skip to content

Commit 2e86a3f

Browse files
authored
[Tests] skip nan lora tests on PyTorch 2.5.1 CPU. (#9975)
* skip nan lora tests on PyTorch 2.5.1 CPU. * cog * use xfail * correct xfail * add condition * tests
1 parent cd6ca9d commit 2e86a3f

File tree

3 files changed

+21
-0
lines changed

3 files changed

+21
-0
lines changed

tests/lora/test_lora_layers_cogvideox.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
import unittest
1717

1818
import numpy as np
19+
import pytest
1920
import torch
2021
from transformers import AutoTokenizer, T5EncoderModel
2122

@@ -29,6 +30,7 @@
2930
from diffusers.utils.testing_utils import (
3031
floats_tensor,
3132
is_peft_available,
33+
is_torch_version,
3234
require_peft_backend,
3335
skip_mps,
3436
torch_device,
@@ -126,6 +128,11 @@ def get_dummy_inputs(self, with_generator=True):
126128
return noise, input_ids, pipeline_inputs
127129

128130
@skip_mps
131+
@pytest.mark.xfail(
132+
condtion=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"),
133+
reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.",
134+
strict=True,
135+
)
129136
def test_lora_fuse_nan(self):
130137
for scheduler_cls in self.scheduler_classes:
131138
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)

tests/lora/test_lora_layers_mochi.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,15 @@
1616
import unittest
1717

1818
import numpy as np
19+
import pytest
1920
import torch
2021
from transformers import AutoTokenizer, T5EncoderModel
2122

2223
from diffusers import AutoencoderKLMochi, FlowMatchEulerDiscreteScheduler, MochiPipeline, MochiTransformer3DModel
2324
from diffusers.utils.testing_utils import (
2425
floats_tensor,
2526
is_peft_available,
27+
is_torch_version,
2628
require_peft_backend,
2729
skip_mps,
2830
torch_device,
@@ -105,6 +107,11 @@ def get_dummy_inputs(self, with_generator=True):
105107

106108
return noise, input_ids, pipeline_inputs
107109

110+
@pytest.mark.xfail(
111+
condtion=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"),
112+
reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.",
113+
strict=True,
114+
)
108115
def test_lora_fuse_nan(self):
109116
for scheduler_cls in self.scheduler_classes:
110117
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)

tests/lora/utils.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
from itertools import product
2020

2121
import numpy as np
22+
import pytest
2223
import torch
2324

2425
from diffusers import (
@@ -32,6 +33,7 @@
3233
from diffusers.utils.testing_utils import (
3334
CaptureLogger,
3435
floats_tensor,
36+
is_torch_version,
3537
require_peft_backend,
3638
require_peft_version_greater,
3739
require_transformers_version_greater,
@@ -1510,6 +1512,11 @@ def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self):
15101512
)
15111513

15121514
@skip_mps
1515+
@pytest.mark.xfail(
1516+
condtion=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"),
1517+
reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.",
1518+
strict=True,
1519+
)
15131520
def test_lora_fuse_nan(self):
15141521
for scheduler_cls in self.scheduler_classes:
15151522
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)

0 commit comments

Comments
 (0)