Skip to content

Commit e461b61

Browse files
authored
Apply suggestions from code review
1 parent 41375d3 commit e461b61

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

src/diffusers/pipelines/wan/pipeline_wan_i2v.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
import PIL
2020
import regex as re
2121
import torch
22-
from transformers import AutoTokenizer, CLIPImageProcessor, CLIPVisionModelWithProjection, UMT5EncoderModel
22+
from transformers import AutoTokenizer, CLIPImageProcessor, CLIPVisionModel, UMT5EncoderModel
2323

2424
from ...callbacks import MultiPipelineCallbacks, PipelineCallback
2525
from ...image_processor import PipelineImageInput
@@ -49,11 +49,11 @@
4949
>>> import numpy as np
5050
>>> from diffusers import AutoencoderKLWan, WanImageToVideoPipeline
5151
>>> from diffusers.utils import export_to_video, load_image
52-
>>> from transformers import CLIPVisionModelWithProjection
52+
>>> from transformers import CLIPVisionModel
5353
5454
>>> # Available models: Wan-AI/Wan2.1-I2V-14B-480P-Diffusers, Wan-AI/Wan2.1-I2V-14B-720P-Diffusers
5555
>>> model_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
56-
>>> image_encoder = CLIPVisionModelWithProjection.from_pretrained(
56+
>>> image_encoder = CLIPVisionModel.from_pretrained(
5757
... model_id, subfolder="image_encoder", torch_dtype=torch.float32
5858
... )
5959
>>> vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
@@ -171,7 +171,7 @@ def __init__(
171171
self,
172172
tokenizer: AutoTokenizer,
173173
text_encoder: UMT5EncoderModel,
174-
image_encoder: CLIPVisionModelWithProjection,
174+
image_encoder: CLIPVisionModel,
175175
image_processor: CLIPImageProcessor,
176176
transformer: WanTransformer3DModel,
177177
vae: AutoencoderKLWan,

0 commit comments

Comments
 (0)