Skip to content

Commit 64b3e0f

Browse files
faaanysayakpaulDN6
authored
make pipelines tests device-agnostic (part1) (#9399)
* enable on xpu * add 1 more * add one more * enable more * add 1 more * add more * enable 1 * enable more cases * enable * enable * update comment * one more * enable 1 * add more cases * enable xpu * add one more caswe * add more cases * add 1 * add more * add more cases * add case * enable * add more * add more * add more * enbale more * add more * update code * update test marker * add skip back * update comment * remove single files * remove * style * add * revert * reformat * update decorator * update * update * update * Update tests/pipelines/deepfloyd_if/test_if.py Co-authored-by: Dhruv Nair <[email protected]> * Update src/diffusers/utils/testing_utils.py Co-authored-by: Dhruv Nair <[email protected]> * Update tests/pipelines/animatediff/test_animatediff_controlnet.py Co-authored-by: Dhruv Nair <[email protected]> * Update tests/pipelines/animatediff/test_animatediff.py Co-authored-by: Dhruv Nair <[email protected]> * Update tests/pipelines/animatediff/test_animatediff_controlnet.py Co-authored-by: Dhruv Nair <[email protected]> * update float16 * no unitest.skipt * update * apply style check * reapply format --------- Co-authored-by: Sayak Paul <[email protected]> Co-authored-by: Dhruv Nair <[email protected]>
1 parent 2e86a3f commit 64b3e0f

30 files changed

+229
-156
lines changed

src/diffusers/utils/testing_utils.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,14 @@ def require_note_seq(test_case):
373373
return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case)
374374

375375

376+
def require_accelerator(test_case):
377+
"""
378+
Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
379+
hardware accelerator available.
380+
"""
381+
return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case)
382+
383+
376384
def require_torchsde(test_case):
377385
"""
378386
Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed.

tests/pipelines/amused/test_amused.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
from diffusers import AmusedPipeline, AmusedScheduler, UVit2DModel, VQModel
2323
from diffusers.utils.testing_utils import (
2424
enable_full_determinism,
25-
require_torch_gpu,
25+
require_torch_accelerator,
2626
slow,
2727
torch_device,
2828
)
@@ -129,7 +129,7 @@ def test_inference_batch_single_identical(self):
129129

130130

131131
@slow
132-
@require_torch_gpu
132+
@require_torch_accelerator
133133
class AmusedPipelineSlowTests(unittest.TestCase):
134134
def test_amused_256(self):
135135
pipe = AmusedPipeline.from_pretrained("amused/amused-256")

tests/pipelines/amused/test_amused_img2img.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from diffusers.utils import load_image
2424
from diffusers.utils.testing_utils import (
2525
enable_full_determinism,
26-
require_torch_gpu,
26+
require_torch_accelerator,
2727
slow,
2828
torch_device,
2929
)
@@ -131,7 +131,7 @@ def test_inference_batch_single_identical(self):
131131

132132

133133
@slow
134-
@require_torch_gpu
134+
@require_torch_accelerator
135135
class AmusedImg2ImgPipelineSlowTests(unittest.TestCase):
136136
def test_amused_256(self):
137137
pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256")

tests/pipelines/amused/test_amused_inpaint.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from diffusers.utils import load_image
2424
from diffusers.utils.testing_utils import (
2525
enable_full_determinism,
26-
require_torch_gpu,
26+
require_torch_accelerator,
2727
slow,
2828
torch_device,
2929
)
@@ -135,7 +135,7 @@ def test_inference_batch_single_identical(self):
135135

136136

137137
@slow
138-
@require_torch_gpu
138+
@require_torch_accelerator
139139
class AmusedInpaintPipelineSlowTests(unittest.TestCase):
140140
def test_amused_256(self):
141141
pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-256")

tests/pipelines/animatediff/test_animatediff.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,13 @@
1919
)
2020
from diffusers.models.attention import FreeNoiseTransformerBlock
2121
from diffusers.utils import is_xformers_available, logging
22-
from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device
22+
from diffusers.utils.testing_utils import (
23+
numpy_cosine_similarity_distance,
24+
require_accelerator,
25+
require_torch_gpu,
26+
slow,
27+
torch_device,
28+
)
2329

2430
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
2531
from ..test_pipelines_common import (
@@ -272,7 +278,7 @@ def test_inference_batch_single_identical(
272278
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
273279
assert max_diff < expected_max_diff
274280

275-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
281+
@require_accelerator
276282
def test_to_device(self):
277283
components = self.get_dummy_components()
278284
pipe = self.pipeline_class(**components)
@@ -288,14 +294,14 @@ def test_to_device(self):
288294
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
289295
self.assertTrue(np.isnan(output_cpu).sum() == 0)
290296

291-
pipe.to("cuda")
297+
pipe.to(torch_device)
292298
model_devices = [
293299
component.device.type for component in pipe.components.values() if hasattr(component, "device")
294300
]
295-
self.assertTrue(all(device == "cuda" for device in model_devices))
301+
self.assertTrue(all(device == torch_device for device in model_devices))
296302

297-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
298-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
303+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
304+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
299305

300306
def test_to_dtype(self):
301307
components = self.get_dummy_components()

tests/pipelines/animatediff/test_animatediff_controlnet.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
from diffusers.models.attention import FreeNoiseTransformerBlock
2222
from diffusers.utils import logging
2323
from diffusers.utils.import_utils import is_xformers_available
24-
from diffusers.utils.testing_utils import torch_device
24+
from diffusers.utils.testing_utils import require_accelerator, torch_device
2525

2626
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
2727
from ..test_pipelines_common import (
@@ -281,7 +281,7 @@ def test_inference_batch_single_identical(
281281
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
282282
assert max_diff < expected_max_diff
283283

284-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
284+
@require_accelerator
285285
def test_to_device(self):
286286
components = self.get_dummy_components()
287287
pipe = self.pipeline_class(**components)
@@ -297,14 +297,14 @@ def test_to_device(self):
297297
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
298298
self.assertTrue(np.isnan(output_cpu).sum() == 0)
299299

300-
pipe.to("cuda")
300+
pipe.to(torch_device)
301301
model_devices = [
302302
component.device.type for component in pipe.components.values() if hasattr(component, "device")
303303
]
304-
self.assertTrue(all(device == "cuda" for device in model_devices))
304+
self.assertTrue(all(device == torch_device for device in model_devices))
305305

306-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
307-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
306+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
307+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
308308

309309
def test_to_dtype(self):
310310
components = self.get_dummy_components()

tests/pipelines/animatediff/test_animatediff_sdxl.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
UNetMotionModel,
1515
)
1616
from diffusers.utils import is_xformers_available, logging
17-
from diffusers.utils.testing_utils import torch_device
17+
from diffusers.utils.testing_utils import require_accelerator, torch_device
1818

1919
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_PARAMS
2020
from ..test_pipelines_common import (
@@ -212,7 +212,7 @@ def test_inference_batch_single_identical(
212212
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
213213
assert max_diff < expected_max_diff
214214

215-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
215+
@require_accelerator
216216
def test_to_device(self):
217217
components = self.get_dummy_components()
218218
pipe = self.pipeline_class(**components)
@@ -228,14 +228,14 @@ def test_to_device(self):
228228
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
229229
self.assertTrue(np.isnan(output_cpu).sum() == 0)
230230

231-
pipe.to("cuda")
231+
pipe.to(torch_device)
232232
model_devices = [
233233
component.device.type for component in pipe.components.values() if hasattr(component, "device")
234234
]
235-
self.assertTrue(all(device == "cuda" for device in model_devices))
235+
self.assertTrue(all(device == torch_device for device in model_devices))
236236

237-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
238-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
237+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
238+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
239239

240240
def test_to_dtype(self):
241241
components = self.get_dummy_components()

tests/pipelines/animatediff/test_animatediff_sparsectrl.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
)
2121
from diffusers.utils import logging
2222
from diffusers.utils.import_utils import is_xformers_available
23-
from diffusers.utils.testing_utils import torch_device
23+
from diffusers.utils.testing_utils import require_accelerator, torch_device
2424

2525
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
2626
from ..test_pipelines_common import (
@@ -345,7 +345,7 @@ def test_inference_batch_single_identical_use_simplified_condition_embedding_tru
345345
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
346346
assert max_diff < expected_max_diff
347347

348-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
348+
@require_accelerator
349349
def test_to_device(self):
350350
components = self.get_dummy_components()
351351
pipe = self.pipeline_class(**components)
@@ -361,13 +361,13 @@ def test_to_device(self):
361361
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
362362
self.assertTrue(np.isnan(output_cpu).sum() == 0)
363363

364-
pipe.to("cuda")
364+
pipe.to(torch_device)
365365
model_devices = [
366366
component.device.type for component in pipe.components.values() if hasattr(component, "device")
367367
]
368-
self.assertTrue(all(device == "cuda" for device in model_devices))
368+
self.assertTrue(all(device == torch_device for device in model_devices))
369369

370-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
370+
output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
371371
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
372372

373373
def test_to_dtype(self):

tests/pipelines/animatediff/test_animatediff_video2video.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
)
2020
from diffusers.models.attention import FreeNoiseTransformerBlock
2121
from diffusers.utils import is_xformers_available, logging
22-
from diffusers.utils.testing_utils import torch_device
22+
from diffusers.utils.testing_utils import require_accelerator, torch_device
2323

2424
from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS
2525
from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin
@@ -258,7 +258,7 @@ def test_inference_batch_single_identical(
258258
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
259259
assert max_diff < expected_max_diff
260260

261-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
261+
@require_accelerator
262262
def test_to_device(self):
263263
components = self.get_dummy_components()
264264
pipe = self.pipeline_class(**components)
@@ -274,14 +274,14 @@ def test_to_device(self):
274274
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
275275
self.assertTrue(np.isnan(output_cpu).sum() == 0)
276276

277-
pipe.to("cuda")
277+
pipe.to(torch_device)
278278
model_devices = [
279279
component.device.type for component in pipe.components.values() if hasattr(component, "device")
280280
]
281-
self.assertTrue(all(device == "cuda" for device in model_devices))
281+
self.assertTrue(all(device == torch_device for device in model_devices))
282282

283-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
284-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
283+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
284+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
285285

286286
def test_to_dtype(self):
287287
components = self.get_dummy_components()

tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
)
2121
from diffusers.models.attention import FreeNoiseTransformerBlock
2222
from diffusers.utils import is_xformers_available, logging
23-
from diffusers.utils.testing_utils import torch_device
23+
from diffusers.utils.testing_utils import require_accelerator, torch_device
2424

2525
from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS
2626
from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin
@@ -274,7 +274,7 @@ def test_inference_batch_single_identical(
274274
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
275275
assert max_diff < expected_max_diff
276276

277-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
277+
@require_accelerator
278278
def test_to_device(self):
279279
components = self.get_dummy_components()
280280
pipe = self.pipeline_class(**components)
@@ -290,13 +290,13 @@ def test_to_device(self):
290290
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
291291
self.assertTrue(np.isnan(output_cpu).sum() == 0)
292292

293-
pipe.to("cuda")
293+
pipe.to(torch_device)
294294
model_devices = [
295295
component.device.type for component in pipe.components.values() if hasattr(component, "device")
296296
]
297-
self.assertTrue(all(device == "cuda" for device in model_devices))
297+
self.assertTrue(all(device == torch_device for device in model_devices))
298298

299-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
299+
output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
300300
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
301301

302302
def test_to_dtype(self):

tests/pipelines/controlnet/test_controlnet_sdxl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1019,7 +1019,7 @@ def test_conditioning_channels(self):
10191019
)
10201020

10211021
controlnet = ControlNetModel.from_unet(unet, conditioning_channels=4)
1022-
assert type(controlnet.mid_block) == UNetMidBlock2D
1022+
assert type(controlnet.mid_block) is UNetMidBlock2D
10231023
assert controlnet.conditioning_channels == 4
10241024

10251025
def get_dummy_components(self, time_cond_proj_dim=None):

tests/pipelines/controlnet_xs/test_controlnetxs.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
is_torch_compile,
3939
load_image,
4040
load_numpy,
41+
require_accelerator,
4142
require_torch_2,
4243
require_torch_gpu,
4344
run_test_in_subprocess,
@@ -306,7 +307,7 @@ def test_multi_vae(self):
306307

307308
assert out_vae_np.shape == out_np.shape
308309

309-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
310+
@require_accelerator
310311
def test_to_device(self):
311312
components = self.get_dummy_components()
312313
pipe = self.pipeline_class(**components)
@@ -322,14 +323,14 @@ def test_to_device(self):
322323
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
323324
self.assertTrue(np.isnan(output_cpu).sum() == 0)
324325

325-
pipe.to("cuda")
326+
pipe.to(torch_device)
326327
model_devices = [
327328
component.device.type for component in pipe.components.values() if hasattr(component, "device")
328329
]
329-
self.assertTrue(all(device == "cuda" for device in model_devices))
330+
self.assertTrue(all(device == torch_device for device in model_devices))
330331

331-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
332-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
332+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
333+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
333334

334335

335336
@slow

tests/pipelines/deepfloyd_if/test_if.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,14 @@
2323
)
2424
from diffusers.models.attention_processor import AttnAddedKVProcessor
2525
from diffusers.utils.import_utils import is_xformers_available
26-
from diffusers.utils.testing_utils import load_numpy, require_torch_gpu, skip_mps, slow, torch_device
26+
from diffusers.utils.testing_utils import (
27+
load_numpy,
28+
require_accelerator,
29+
require_torch_gpu,
30+
skip_mps,
31+
slow,
32+
torch_device,
33+
)
2734

2835
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
2936
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
@@ -58,7 +65,8 @@ def get_dummy_inputs(self, device, seed=0):
5865
def test_save_load_optional_components(self):
5966
self._test_save_load_optional_components()
6067

61-
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
68+
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
69+
@require_accelerator
6270
def test_save_load_float16(self):
6371
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
6472
super().test_save_load_float16(expected_max_diff=1e-1)

tests/pipelines/deepfloyd_if/test_if_img2img.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,15 @@
2222
from diffusers import IFImg2ImgPipeline
2323
from diffusers.models.attention_processor import AttnAddedKVProcessor
2424
from diffusers.utils.import_utils import is_xformers_available
25-
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
25+
from diffusers.utils.testing_utils import (
26+
floats_tensor,
27+
load_numpy,
28+
require_accelerator,
29+
require_torch_gpu,
30+
skip_mps,
31+
slow,
32+
torch_device,
33+
)
2634

2735
from ..pipeline_params import (
2836
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
@@ -70,12 +78,14 @@ def test_save_load_optional_components(self):
7078
def test_xformers_attention_forwardGenerator_pass(self):
7179
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
7280

73-
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
81+
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
82+
@require_accelerator
7483
def test_save_load_float16(self):
7584
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
7685
super().test_save_load_float16(expected_max_diff=1e-1)
7786

78-
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
87+
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
88+
@require_accelerator
7989
def test_float16_inference(self):
8090
super().test_float16_inference(expected_max_diff=1e-1)
8191

0 commit comments

Comments
 (0)