LPX55 commited on
Commit
d4b8499
·
verified ·
1 Parent(s): 05a995d

Update raw.py

Browse files
Files changed (1) hide show
  1. raw.py +17 -2
raw.py CHANGED
@@ -3,9 +3,22 @@ import spaces
3
  import os
4
  from diffusers.utils import load_image
5
  from diffusers import FluxControlNetModel, FluxControlNetPipeline, AutoencoderKL
 
 
 
 
6
  import gradio as gr
7
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
8
 
 
 
 
 
 
 
 
 
 
9
  good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16, token=huggingface_token).to("cuda")
10
 
11
  # Load pipeline
@@ -18,6 +31,7 @@ pipe = FluxControlNetPipeline.from_pretrained(
18
  controlnet=controlnet,
19
  torch_dtype=torch.bfloat16,
20
  vae=good_vae,
 
21
  token=huggingface_token
22
  )
23
  adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
@@ -29,7 +43,7 @@ pipe.load_lora_weights(adapter_id2, adapter_name="real")
29
  pipe.set_adapters(["turbo", "real"], adapter_weights=[0.9, 0.6])
30
  pipe.fuse_lora(adapter_names=["turbo", "real"], lora_scale=1.0)
31
  pipe.unload_lora_weights()
32
-
33
  # save to the Hub
34
  # pipe.push_to_hub("fused-t-r")
35
 
@@ -53,13 +67,14 @@ def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_
53
  return image
54
 
55
  # Create Gradio interface with rows and columns
56
- with gr.Blocks(title="FLUX ControlNet Image Generation") as iface:
57
  gr.Markdown("# FLUX ControlNet Image Generation")
58
  gr.Markdown("Generate images using the FluxControlNetPipeline. Upload a control image and enter a prompt to create an image.")
59
 
60
  with gr.Row():
61
  control_image = gr.Image(type="pil", label="Control Image")
62
  generated_image = gr.Image(type="pil", label="Generated Image", format="png")
 
63
  with gr.Column(scale=1):
64
  scale = gr.Slider(1, 3, value=1, label="Scale")
65
  steps = gr.Slider(6, 30, value=8, label="Steps")
 
3
  import os
4
  from diffusers.utils import load_image
5
  from diffusers import FluxControlNetModel, FluxControlNetPipeline, AutoencoderKL
6
+ from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
7
+ from transformers import T5EncoderModel
8
+ from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig
9
+
10
  import gradio as gr
11
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
12
 
13
+ quant_config = TransformersBitsAndBytesConfig(load_in_8bit=True,)
14
+ text_encoder_2_8bit = T5EncoderModel.from_pretrained(
15
+ "LPX55/FLUX.1-merged_uncensored",
16
+ subfolder="text_encoder_2",
17
+ quantization_config=quant_config,
18
+ torch_dtype=torch.bfloat16,
19
+ token=huggingface_token
20
+ )
21
+
22
  good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16, token=huggingface_token).to("cuda")
23
 
24
  # Load pipeline
 
31
  controlnet=controlnet,
32
  torch_dtype=torch.bfloat16,
33
  vae=good_vae,
34
+ text_encoder_2=text_encoder_2_8bit,
35
  token=huggingface_token
36
  )
37
  adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
 
43
  pipe.set_adapters(["turbo", "real"], adapter_weights=[0.9, 0.6])
44
  pipe.fuse_lora(adapter_names=["turbo", "real"], lora_scale=1.0)
45
  pipe.unload_lora_weights()
46
+ pipe.enable_xformers_memory_efficient_attention()
47
  # save to the Hub
48
  # pipe.push_to_hub("fused-t-r")
49
 
 
67
  return image
68
 
69
  # Create Gradio interface with rows and columns
70
+ with gr.Blocks(title="FLUX ControlNet Image Generation", fill_height=True) as iface:
71
  gr.Markdown("# FLUX ControlNet Image Generation")
72
  gr.Markdown("Generate images using the FluxControlNetPipeline. Upload a control image and enter a prompt to create an image.")
73
 
74
  with gr.Row():
75
  control_image = gr.Image(type="pil", label="Control Image")
76
  generated_image = gr.Image(type="pil", label="Generated Image", format="png")
77
+ with gr.Row():
78
  with gr.Column(scale=1):
79
  scale = gr.Slider(1, 3, value=1, label="Scale")
80
  steps = gr.Slider(6, 30, value=8, label="Steps")