neuralvfx commited on
Commit
0ffc2f0
·
verified ·
1 Parent(s): b1c8839

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +45 -1
README.md CHANGED
@@ -28,7 +28,10 @@ This model/pipeline is the product of my [LibreFlux ControlNet training repo](ht
28
  pip install -U diffusers==0.32.0
29
  pip install -U "transformers @ git+https://github.com/huggingface/transformers@e15687fffe5c9d20598a19aeab721ae0a7580f8a"
30
  ```
31
-
 
 
 
32
  # Load Pipeline
33
  ```py
34
  import torch
@@ -76,3 +79,44 @@ out = pipe(
76
  )
77
  out.images[0]
78
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  pip install -U diffusers==0.32.0
29
  pip install -U "transformers @ git+https://github.com/huggingface/transformers@e15687fffe5c9d20598a19aeab721ae0a7580f8a"
30
  ```
31
+ Low VRAM:
32
+ ```py
33
+ pip install optimum-quanto
34
+ ```
35
  # Load Pipeline
36
  ```py
37
  import torch
 
79
  )
80
  out.images[0]
81
  ```
82
+ # Load Pipeline ( Low VRAM )
83
+ ```py
84
+ import torch
85
+ from diffusers import DiffusionPipeline
86
+ from optimum.quanto import freeze, quantize, qint8
87
+
88
+ model_id = "neuralvfx/LibreFlux-ControlNet" # <-- change if you used a different repo
89
+ device = "cuda" if torch.cuda.is_available() else "cpu"
90
+ dtype = torch.bfloat16 if device == "cuda" else torch.float32
91
+
92
+ pipe = DiffusionPipeline.from_pretrained(
93
+ model_id,
94
+ custom_pipeline=model_id,
95
+ trust_remote_code=True, # required for your custom pipeline + modules
96
+ torch_dtype=dtype,
97
+ safety_checker=None # optional; disable if not using a checker
98
+ )
99
+
100
+ quantize(
101
+ pipe.transformer,
102
+ weights=qint8,
103
+ exclude=[
104
+ "*.norm", "*.norm1", "*.norm2", "*.norm2_context",
105
+ "proj_out", "x_embedder", "norm_out", "context_embedder",
106
+ ],
107
+ )
108
+
109
+ quantize(
110
+ pipe.controlnet,
111
+ weights=qint8,
112
+ exclude=[
113
+ "*.norm", "*.norm1", "*.norm2", "*.norm2_context",
114
+ "proj_out", "x_embedder", "norm_out", "context_embedder",
115
+ ],
116
+ )
117
+ freeze(pipe.transformer)
118
+ freeze(pipe.controlnet)
119
+
120
+ pipe.enable_model_cpu_offload()
121
+
122
+ ```