We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 7da742e commit f0b2645Copy full SHA for f0b2645
modules/LoRA.py
@@ -13,5 +13,6 @@ def add_lora_to_model(lora_name):
13
print("Reloading the model to remove the LoRA...")
14
shared.model, shared.tokenizer = load_model(shared.model_name)
15
else:
16
+ # Why doesn't this work in 16-bit mode?
17
print(f"Adding the LoRA {lora_name} to the model...")
18
shared.model = PeftModel.from_pretrained(shared.model, Path(f"loras/{lora_name}"))
0 commit comments