LPX55 commited on
Commit
f48d60a
·
verified ·
1 Parent(s): 8de7d3d

Update app_v5.py

Browse files
Files changed (1) hide show
  1. app_v5.py +6 -6
app_v5.py CHANGED
@@ -8,7 +8,9 @@ import datetime
8
  import io
9
  import moondream as md
10
  from transformers import T5EncoderModel
11
- from diffusers import FluxControlNetPipeline
 
 
12
  from diffusers.utils import load_image
13
  from PIL import Image
14
  from threading import Thread
@@ -18,8 +20,6 @@ from debug import log_params, scheduler, save_image
18
  logging.set_verbosity_debug()
19
  from model_loader import safe_model_load
20
  from huggingface_hub.utils._runtime import dump_environment_info
21
- from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
22
- from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig
23
 
24
  def hello(profile: gr.OAuthProfile | None) -> str:
25
  if profile is None:
@@ -43,16 +43,16 @@ try:
43
  except Exception as e:
44
  print(f"Error setting memory usage: {e}")
45
 
46
- quant_config = TransformersBitsAndBytesConfig(load_in_8bit=True,)
47
  text_encoder_2_8b = T5EncoderModel.from_pretrained(
48
  "LPX55/FLUX.1-merged_lightning_v2",
49
  subfolder="text_encoder_2",
50
- quantization_config=quant_config,
51
  torch_dtype=torch.float16,
52
  )
53
 
54
  quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True,)
55
- transformer_8bit = FluxControlNetPipeline.from_pretrained(
56
  "LPX55/FLUX.1-merged_lightning_v2",
57
  subfolder="transformer",
58
  quantization_config=quant_config,
 
8
  import io
9
  import moondream as md
10
  from transformers import T5EncoderModel
11
+ from diffusers import FluxControlNetPipeline, FluxPipeline
12
+ from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
13
+ from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig
14
  from diffusers.utils import load_image
15
  from PIL import Image
16
  from threading import Thread
 
20
  logging.set_verbosity_debug()
21
  from model_loader import safe_model_load
22
  from huggingface_hub.utils._runtime import dump_environment_info
 
 
23
 
24
  def hello(profile: gr.OAuthProfile | None) -> str:
25
  if profile is None:
 
43
  except Exception as e:
44
  print(f"Error setting memory usage: {e}")
45
 
46
+ quant_config_5_t5 = TransformersBitsAndBytesConfig(load_in_8bit=True,)
47
  text_encoder_2_8b = T5EncoderModel.from_pretrained(
48
  "LPX55/FLUX.1-merged_lightning_v2",
49
  subfolder="text_encoder_2",
50
+ quantization_config=quant_config_5_t5,
51
  torch_dtype=torch.float16,
52
  )
53
 
54
  quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True,)
55
+ transformer_8bit = FluxPipeline.from_pretrained(
56
  "LPX55/FLUX.1-merged_lightning_v2",
57
  subfolder="transformer",
58
  quantization_config=quant_config,