We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 072a5dd commit 33b4c02Copy full SHA for 33b4c02
src/diffusers/models/attention_processor.py
@@ -93,8 +93,8 @@ def __init__(
93
self.to_v = nn.Linear(cross_attention_dim, inner_dim, bias=bias)
94
95
if self.added_kv_proj_dim is not None:
96
- self.add_k_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim)
97
- self.add_v_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim)
+ self.add_k_proj = nn.Linear(added_kv_proj_dim, inner_dim)
+ self.add_v_proj = nn.Linear(added_kv_proj_dim, inner_dim)
98
99
self.to_out = nn.ModuleList([])
100
self.to_out.append(nn.Linear(inner_dim, query_dim, bias=out_bias))
0 commit comments