Gui28F commited on
Commit
e28b576
·
verified ·
1 Parent(s): af74478

Upload 20 files

Browse files
models/diffusionModel/Flux.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import torch
4
+ from diffusers import FluxPipeline, DPMSolverMultistepScheduler
5
+ from BeamDiffusionModel.models.diffusionModel.configs.config_loader import CONFIG
6
+ from functools import partial
7
+ from BeamDiffusionModel.models.diffusionModel.Latents_Singleton import Latents
8
+
9
+ class Flux:
10
+ def __init__(self):
11
+ self.device = "cuda" if CONFIG.get("flux", {}).get("use_cuda", True) and torch.cuda.is_available() else "cpu"
12
+ self.torch_dtype = torch.bfloat16 if CONFIG.get("flux", {}).get("precision") == "bfloat16" else torch.float16
13
+
14
+ print(f"Loading model: {CONFIG['flux']['id']} on {self.device}")
15
+
16
+ self.pipe = FluxPipeline.from_pretrained(CONFIG["flux"]["id"], torch_dtype=torch.bfloat16)
17
+ self.pipe.enable_sequential_cpu_offload()
18
+ self.pipe.vae.enable_slicing()
19
+ self.pipe.vae.enable_tiling()
20
+ self.pipe.tokenizer.truncation_side = 'left'
21
+
22
+ print("Model loaded successfully!")
23
+
24
+
25
+ def capture_latents(self, latents_store: Latents, pipe, step, timestep, callback_kwargs):
26
+ latents = callback_kwargs["latents"]
27
+ latents_store.add_latents(latents)
28
+ return callback_kwargs
29
+
30
+ def generate_image(self, prompt: str, latent=None, generator=None):
31
+ latents = Latents()
32
+ callback = partial(self.capture_latents, latents)
33
+ img = self.pipe(prompt, latents=latent, callback_on_step_end=callback,
34
+ generator=generator, callback_on_step_end_tensor_inputs=["latents"],
35
+ height=768,
36
+ width=768,
37
+ guidance_scale=3.5,
38
+ max_sequence_length=512,
39
+ num_inference_steps=CONFIG["flux"]["diffusion_settings"]["steps"]).images[0]
40
+
41
+ return img, latents.dump_and_clear()
models/diffusionModel/configs/diffusionModel.yml CHANGED
@@ -4,3 +4,10 @@ stable_diffusion:
4
  id: stabilityai/stable-diffusion-2-1
5
  diffusion_settings:
6
  steps: 50
 
 
 
 
 
 
 
 
4
  id: stabilityai/stable-diffusion-2-1
5
  diffusion_settings:
6
  steps: 50
7
+
8
+ flux:
9
+ use_cuda: True
10
+ precision: bfloat16
11
+ id: black-forest-labs/FLUX.1-dev
12
+ diffusion_settings:
13
+ steps: 100
utils/utils.py CHANGED
@@ -1,12 +1,10 @@
1
  import PIL
2
- from BeamDiffusionModel.models.diffusionModel.StableDiffusion import StableDiffusion
3
 
4
  from BeamDiffusionModel.models.diffusionModel.configs.config_loader import CONFIG
5
  from BeamDiffusionModel.models.clip.clip import Clip
6
  import torch
7
  import json
8
 
9
- sd = StableDiffusion()
10
  clip = Clip()
11
 
12
 
@@ -31,7 +29,7 @@ def clip_score(step, imgs_path):
31
 
32
  return clip.similarity(step, imgs)
33
 
34
- def gen_img(caption, latent= None, seed=None):
35
  system = "cuda" if CONFIG.get("diffusion_model", {}).get("use_cuda", True) and torch.cuda.is_available() else "cpu"
36
  if seed:
37
  generator = torch.Generator(system).manual_seed(seed)
 
1
  import PIL
 
2
 
3
  from BeamDiffusionModel.models.diffusionModel.configs.config_loader import CONFIG
4
  from BeamDiffusionModel.models.clip.clip import Clip
5
  import torch
6
  import json
7
 
 
8
  clip = Clip()
9
 
10
 
 
29
 
30
  return clip.similarity(step, imgs)
31
 
32
+ def gen_img(sd, caption, latent= None, seed=None):
33
  system = "cuda" if CONFIG.get("diffusion_model", {}).get("use_cuda", True) and torch.cuda.is_available() else "cpu"
34
  if seed:
35
  generator = torch.Generator(system).manual_seed(seed)