Skip to content

Commit 0aefecb

Browse files
committed
animatediff controlnet community pipeline IP adapter fixed
1 parent b536f39 commit 0aefecb

File tree

1 file changed

+3
-4
lines changed

1 file changed

+3
-4
lines changed

examples/community/pipeline_animatediff_controlnet.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323

2424
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
2525
from diffusers.loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26-
from diffusers.models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel, UNetMotionModel
26+
from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel, UNetMotionModel
2727
from diffusers.models.lora import adjust_lora_scale_text_encoder
2828
from diffusers.models.unets.unet_motion_model import MotionAdapter
2929
from diffusers.pipelines.animatediff.pipeline_output import AnimateDiffPipelineOutput
@@ -400,12 +400,11 @@ def prepare_ip_adapter_image_embeds(
400400
)
401401

402402
image_embeds = []
403-
for single_ip_adapter_image, image_proj_layer in zip(
403+
for single_ip_adapter_image, _ in zip(
404404
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
405405
):
406-
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
407406
single_image_embeds, single_negative_image_embeds = self.encode_image(
408-
single_ip_adapter_image, device, 1, output_hidden_state
407+
single_ip_adapter_image, device, 1
409408
)
410409
single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
411410
single_negative_image_embeds = torch.stack(

0 commit comments

Comments
 (0)