@@ -1355,6 +1355,7 @@ def _convert_non_diffusers_wan_lora_to_diffusers(state_dict):
1355
1355
original_state_dict = {k [len ("diffusion_model." ) :]: v for k , v in state_dict .items ()}
1356
1356
1357
1357
num_blocks = len ({k .split ("blocks." )[1 ].split ("." )[0 ] for k in original_state_dict })
1358
+ is_i2v_lora = any ("k_img" in k for k in original_state_dict ) and any ("v_img" in k for k in original_state_dict )
1358
1359
1359
1360
for i in range (num_blocks ):
1360
1361
# Self-attention
@@ -1374,13 +1375,15 @@ def _convert_non_diffusers_wan_lora_to_diffusers(state_dict):
1374
1375
converted_state_dict [f"blocks.{ i } .attn2.{ c } .lora_B.weight" ] = original_state_dict .pop (
1375
1376
f"blocks.{ i } .cross_attn.{ o } .lora_B.weight"
1376
1377
)
1377
- for o , c in zip (["k_img" , "v_img" ], ["add_k_proj" , "add_v_proj" ]):
1378
- converted_state_dict [f"blocks.{ i } .attn2.{ c } .lora_A.weight" ] = original_state_dict .pop (
1379
- f"blocks.{ i } .cross_attn.{ o } .lora_A.weight"
1380
- )
1381
- converted_state_dict [f"blocks.{ i } .attn2.{ c } .lora_B.weight" ] = original_state_dict .pop (
1382
- f"blocks.{ i } .cross_attn.{ o } .lora_B.weight"
1383
- )
1378
+
1379
+ if is_i2v_lora :
1380
+ for o , c in zip (["k_img" , "v_img" ], ["add_k_proj" , "add_v_proj" ]):
1381
+ converted_state_dict [f"blocks.{ i } .attn2.{ c } .lora_A.weight" ] = original_state_dict .pop (
1382
+ f"blocks.{ i } .cross_attn.{ o } .lora_A.weight"
1383
+ )
1384
+ converted_state_dict [f"blocks.{ i } .attn2.{ c } .lora_B.weight" ] = original_state_dict .pop (
1385
+ f"blocks.{ i } .cross_attn.{ o } .lora_B.weight"
1386
+ )
1384
1387
1385
1388
# FFN
1386
1389
for o , c in zip (["ffn.0" , "ffn.2" ], ["net.0.proj" , "net.2" ]):
0 commit comments