Skip to content

[tests] fix tests for save load components #10977

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 94 additions & 0 deletions tests/pipelines/hunyuandit/test_hunyuan_dit.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# limitations under the License.

import gc
import tempfile
import unittest

import numpy as np
Expand Down Expand Up @@ -212,6 +213,99 @@ def test_fused_qkv_projections(self):
def test_encode_prompt_works_in_isolation(self):
pass

def test_save_load_optional_components(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

inputs = self.get_dummy_inputs(torch_device)

prompt = inputs["prompt"]
generator = inputs["generator"]
num_inference_steps = inputs["num_inference_steps"]
output_type = inputs["output_type"]

(
prompt_embeds,
negative_prompt_embeds,
prompt_attention_mask,
negative_prompt_attention_mask,
) = pipe.encode_prompt(prompt, device=torch_device, dtype=torch.float32, text_encoder_index=0)

(
prompt_embeds_2,
negative_prompt_embeds_2,
prompt_attention_mask_2,
negative_prompt_attention_mask_2,
) = pipe.encode_prompt(
prompt,
device=torch_device,
dtype=torch.float32,
text_encoder_index=1,
)

# inputs with prompt converted to embeddings
inputs = {
"prompt_embeds": prompt_embeds,
"prompt_attention_mask": prompt_attention_mask,
"negative_prompt_embeds": negative_prompt_embeds,
"negative_prompt_attention_mask": negative_prompt_attention_mask,
"prompt_embeds_2": prompt_embeds_2,
"prompt_attention_mask_2": prompt_attention_mask_2,
"negative_prompt_embeds_2": negative_prompt_embeds_2,
"negative_prompt_attention_mask_2": negative_prompt_attention_mask_2,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
"use_resolution_binning": False,
}

# set all optional components to None
for optional_component in pipe._optional_components:
setattr(pipe, optional_component, None)

output = pipe(**inputs)[0]

with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir)
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
pipe_loaded.to(torch_device)
pipe_loaded.set_progress_bar_config(disable=None)

for optional_component in pipe._optional_components:
self.assertTrue(
getattr(pipe_loaded, optional_component) is None,
f"`{optional_component}` did not stay set to None after loading.",
)

inputs = self.get_dummy_inputs(torch_device)

generator = inputs["generator"]
num_inference_steps = inputs["num_inference_steps"]
output_type = inputs["output_type"]

# inputs with prompt converted to embeddings
inputs = {
"prompt_embeds": prompt_embeds,
"prompt_attention_mask": prompt_attention_mask,
"negative_prompt_embeds": negative_prompt_embeds,
"negative_prompt_attention_mask": negative_prompt_attention_mask,
"prompt_embeds_2": prompt_embeds_2,
"prompt_attention_mask_2": prompt_attention_mask_2,
"negative_prompt_embeds_2": negative_prompt_embeds_2,
"negative_prompt_attention_mask_2": negative_prompt_attention_mask_2,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
"use_resolution_binning": False,
}

output_loaded = pipe_loaded(**inputs)[0]

max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
self.assertLess(max_diff, 1e-4)


@slow
@require_torch_accelerator
Expand Down
70 changes: 69 additions & 1 deletion tests/pipelines/latte/test_latte.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

import gc
import inspect
import tempfile
import unittest

import numpy as np
Expand All @@ -39,7 +40,7 @@
)

from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin
from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np


enable_full_determinism()
Expand Down Expand Up @@ -217,6 +218,73 @@ def test_xformers_attention_forwardGenerator_pass(self):
def test_encode_prompt_works_in_isolation(self):
pass

def test_save_load_optional_components(self):
if not hasattr(self.pipeline_class, "_optional_components"):
return

components = self.get_dummy_components()
pipe = self.pipeline_class(**components)

for component in pipe.components.values():
if hasattr(component, "set_default_attn_processor"):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

inputs = self.get_dummy_inputs(torch_device)

prompt = inputs["prompt"]
generator = inputs["generator"]

(
prompt_embeds,
negative_prompt_embeds,
) = pipe.encode_prompt(prompt)

# inputs with prompt converted to embeddings
inputs = {
"prompt_embeds": prompt_embeds,
"negative_prompt": None,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 8,
"width": 8,
"video_length": 1,
"mask_feature": False,
"output_type": "pt",
"clean_caption": False,
}

# set all optional components to None
for optional_component in pipe._optional_components:
setattr(pipe, optional_component, None)

output = pipe(**inputs)[0]

with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir, safe_serialization=False)
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
pipe_loaded.to(torch_device)

for component in pipe_loaded.components.values():
if hasattr(component, "set_default_attn_processor"):
component.set_default_attn_processor()

pipe_loaded.set_progress_bar_config(disable=None)

for optional_component in pipe._optional_components:
self.assertTrue(
getattr(pipe_loaded, optional_component) is None,
f"`{optional_component}` did not stay set to None after loading.",
)

output_loaded = pipe_loaded(**inputs)[0]

max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
self.assertLess(max_diff, 1.0)


@slow
@require_torch_accelerator
Expand Down
98 changes: 95 additions & 3 deletions tests/pipelines/pag/test_pag_hunyuan_dit.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# limitations under the License.

import inspect
import tempfile
import unittest

import numpy as np
Expand All @@ -27,9 +28,7 @@
HunyuanDiTPAGPipeline,
HunyuanDiTPipeline,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
)
from diffusers.utils.testing_utils import enable_full_determinism, torch_device

from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, to_np
Expand Down Expand Up @@ -269,3 +268,96 @@ def test_pag_applied_layers(self):
)
def test_encode_prompt_works_in_isolation(self):
pass

def test_save_load_optional_components(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

inputs = self.get_dummy_inputs(torch_device)

prompt = inputs["prompt"]
generator = inputs["generator"]
num_inference_steps = inputs["num_inference_steps"]
output_type = inputs["output_type"]

(
prompt_embeds,
negative_prompt_embeds,
prompt_attention_mask,
negative_prompt_attention_mask,
) = pipe.encode_prompt(prompt, device=torch_device, dtype=torch.float32, text_encoder_index=0)

(
prompt_embeds_2,
negative_prompt_embeds_2,
prompt_attention_mask_2,
negative_prompt_attention_mask_2,
) = pipe.encode_prompt(
prompt,
device=torch_device,
dtype=torch.float32,
text_encoder_index=1,
)

# inputs with prompt converted to embeddings
inputs = {
"prompt_embeds": prompt_embeds,
"prompt_attention_mask": prompt_attention_mask,
"negative_prompt_embeds": negative_prompt_embeds,
"negative_prompt_attention_mask": negative_prompt_attention_mask,
"prompt_embeds_2": prompt_embeds_2,
"prompt_attention_mask_2": prompt_attention_mask_2,
"negative_prompt_embeds_2": negative_prompt_embeds_2,
"negative_prompt_attention_mask_2": negative_prompt_attention_mask_2,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
"use_resolution_binning": False,
}

# set all optional components to None
for optional_component in pipe._optional_components:
setattr(pipe, optional_component, None)

output = pipe(**inputs)[0]

with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir)
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
pipe_loaded.to(torch_device)
pipe_loaded.set_progress_bar_config(disable=None)

for optional_component in pipe._optional_components:
self.assertTrue(
getattr(pipe_loaded, optional_component) is None,
f"`{optional_component}` did not stay set to None after loading.",
)

inputs = self.get_dummy_inputs(torch_device)

generator = inputs["generator"]
num_inference_steps = inputs["num_inference_steps"]
output_type = inputs["output_type"]

# inputs with prompt converted to embeddings
inputs = {
"prompt_embeds": prompt_embeds,
"prompt_attention_mask": prompt_attention_mask,
"negative_prompt_embeds": negative_prompt_embeds,
"negative_prompt_attention_mask": negative_prompt_attention_mask,
"prompt_embeds_2": prompt_embeds_2,
"prompt_attention_mask_2": prompt_attention_mask_2,
"negative_prompt_embeds_2": negative_prompt_embeds_2,
"negative_prompt_attention_mask_2": negative_prompt_attention_mask_2,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
"use_resolution_binning": False,
}

output_loaded = pipe_loaded(**inputs)[0]

max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
self.assertLess(max_diff, 1e-4)
4 changes: 4 additions & 0 deletions tests/pipelines/pag/test_pag_pixart_sigma.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,3 +343,7 @@ def test_components_function(self):

self.assertTrue(hasattr(pipe, "components"))
self.assertTrue(set(pipe.components.keys()) == set(init_components.keys()))

@unittest.skip("Test is already covered through encode_prompt isolation.")
def test_save_load_optional_components(self):
pass
4 changes: 4 additions & 0 deletions tests/pipelines/pixart_alpha/test_pixart.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,10 @@ def test_inference_non_square_images(self):
max_diff = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(max_diff, 1e-3)

@unittest.skip("Test is already covered through encode_prompt isolation.")
def test_save_load_optional_components(self):
pass

def test_inference_with_embeddings_and_multiple_images(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
Expand Down
4 changes: 4 additions & 0 deletions tests/pipelines/pixart_sigma/test_pixart.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,10 @@ def test_inference_with_multiple_images_per_prompt(self):
max_diff = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(max_diff, 1e-3)

@unittest.skip("Test is already covered through encode_prompt isolation.")
def test_save_load_optional_components(self):
pass

def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=1e-3)

Expand Down
Loading