LPX55 commited on
Commit
4a172a5
·
verified ·
1 Parent(s): d49d1bb

Update raw.py

Browse files
Files changed (1) hide show
  1. raw.py +24 -27
raw.py CHANGED
@@ -16,44 +16,41 @@ import gradio as gr
16
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
17
  MAX_SEED = 1000000
18
 
19
- quant_config = TransformersBitsAndBytesConfig(load_in_8bit=True,)
20
- text_encoder_2_8bit = T5EncoderModel.from_pretrained(
21
- "LPX55/FLUX.1-merged_uncensored",
22
- subfolder="text_encoder_2",
23
- quantization_config=quant_config,
24
- torch_dtype=torch.bfloat16,
25
- token=huggingface_token
26
- )
27
 
28
- good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16, token=huggingface_token).to("cuda")
29
 
30
  # Load pipeline
31
- controlnet = FluxControlNetModel.from_pretrained(
32
- "jasperai/Flux.1-dev-Controlnet-Upscaler",
33
- torch_dtype=torch.bfloat16
34
- )
35
  pipe = FluxControlNetPipeline.from_pretrained(
36
- "LPX55/FLUX.1-merged_uncensored",
37
- controlnet=controlnet,
38
  torch_dtype=torch.bfloat16,
39
- vae=good_vae,
40
- text_encoder_2=text_encoder_2_8bit,
41
  token=huggingface_token
42
  )
43
- adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
44
- adapter_id2 = "XLabs-AI/flux-RealismLora"
45
- adapter_id3 = "enhanceaiteam/Flux-uncensored-v2"
46
 
47
  pipe.to("cuda")
48
- pipe.load_lora_weights(adapter_id, adapter_name="turbo")
49
- pipe.load_lora_weights(adapter_id2, adapter_name="real")
50
- pipe.load_lora_weights(adapter_id3, weight_name="lora.safetensors", adapter_name="enhance")
51
- pipe.set_adapters(["turbo", "real", "enhance"], adapter_weights=[0.9, 0.66, 0.6])
52
- pipe.fuse_lora(adapter_names=["turbo", "real", "enhance"], lora_scale=1.0)
53
- pipe.unload_lora_weights()
54
  # pipe.enable_xformers_memory_efficient_attention()
55
  # save to the Hub
56
- pipe.push_to_hub("FLUX.1M-8step_upscaler-cnet")
57
 
58
  @spaces.GPU
59
  def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale, seed, guidance_end):
 
16
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
17
  MAX_SEED = 1000000
18
 
19
+ # quant_config = TransformersBitsAndBytesConfig(load_in_8bit=True,)
20
+ # text_encoder_2_8bit = T5EncoderModel.from_pretrained(
21
+ # "LPX55/FLUX.1-merged_uncensored",
22
+ # subfolder="text_encoder_2",
23
+ # quantization_config=quant_config,
24
+ # torch_dtype=torch.bfloat16,
25
+ # token=huggingface_token
26
+ # )
27
 
28
+ # good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16, token=huggingface_token).to("cuda")
29
 
30
  # Load pipeline
31
+ # controlnet = FluxControlNetModel.from_pretrained(
32
+ # "jasperai/Flux.1-dev-Controlnet-Upscaler",
33
+ # torch_dtype=torch.bfloat16
34
+ # )
35
  pipe = FluxControlNetPipeline.from_pretrained(
36
+ "LPX55/FLUX.1M-8step_upscaler-cnet",
 
37
  torch_dtype=torch.bfloat16,
 
 
38
  token=huggingface_token
39
  )
40
+ # adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
41
+ # adapter_id2 = "XLabs-AI/flux-RealismLora"
42
+ # adapter_id3 = "enhanceaiteam/Flux-uncensored-v2"
43
 
44
  pipe.to("cuda")
45
+ # pipe.load_lora_weights(adapter_id, adapter_name="turbo")
46
+ # pipe.load_lora_weights(adapter_id2, adapter_name="real")
47
+ # pipe.load_lora_weights(adapter_id3, weight_name="lora.safetensors", adapter_name="enhance")
48
+ # pipe.set_adapters(["turbo", "real", "enhance"], adapter_weights=[0.9, 0.66, 0.6])
49
+ # pipe.fuse_lora(adapter_names=["turbo", "real", "enhance"], lora_scale=1.0)
50
+ # pipe.unload_lora_weights()
51
  # pipe.enable_xformers_memory_efficient_attention()
52
  # save to the Hub
53
+ # pipe.push_to_hub("FLUX.1M-8step_upscaler-cnet")
54
 
55
  @spaces.GPU
56
  def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale, seed, guidance_end):