We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 8163687 commit bb7fb66Copy full SHA for bb7fb66
docs/source/en/quantization/quanto.md
@@ -85,8 +85,12 @@ from diffusers import FluxTransformer2DModel, QuantoConfig
85
86
model_id = "black-forest-labs/FLUX.1-dev"
87
quantization_config = QuantoConfig(weights_dtype="float8")
88
-transformer = FluxTransformer2DModel.from_pretrained(model_id, quantization_config=quantization_config, torch_dtype=torch.bfloat16)
89
-
+transformer = FluxTransformer2DModel.from_pretrained(
+ model_id,
90
+ subfolder="transformer",
91
+ quantization_config=quantization_config,
92
+ torch_dtype=torch.bfloat16,
93
+)
94
# save quantized model to reuse
95
transformer.save_pretrained("<your quantized model save path>")
96
0 commit comments