John6666 commited on
Commit
534feb5
·
verified ·
1 Parent(s): 4aea187

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +23 -2
  3. optimization.py +42 -0
  4. requirements.txt +5 -2
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🖼
4
  colorFrom: purple
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 5.23.2
8
  app_file: app.py
9
  license: apache-2.0
10
  pinned: false
 
4
  colorFrom: purple
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 5.47.1
8
  app_file: app.py
9
  license: apache-2.0
10
  pinned: false
app.py CHANGED
@@ -4,6 +4,7 @@ import random
4
  import torch
5
  import spaces
6
  import re
 
7
  from diffusers import (
8
  DiffusionPipeline,
9
  AutoencoderTiny,
@@ -11,6 +12,15 @@ from diffusers import (
11
  from huggingface_hub import hf_hub_download
12
  #from feifeilib.feifeichat import feifeichat
13
 
 
 
 
 
 
 
 
 
 
14
  import config
15
  styles_name = [style["name"] for style in config.style_list]
16
  MAX_SEED = np.iinfo(np.int32).max
@@ -49,7 +59,19 @@ def feifeimodload():
49
 
50
  pipe = feifeimodload()
51
 
52
- @spaces.GPU()
 
 
 
 
 
 
 
 
 
 
 
 
53
  def infer(prompt="", styles_Radio="(None)", feife_select = False, bigboobs_select = True, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, guidancescale=3.5, num_feifei=0.35, nsfw_select=False, nsfw_slider=1, progress=gr.Progress(track_tqdm=True)):
54
 
55
  Duke86Syl_lora_name=[]
@@ -232,7 +254,6 @@ with gr.Blocks(css=css) as demo:
232
  step=0.05,
233
  value=0.35,
234
  )
235
-
236
 
237
  gr.Examples(
238
  examples = examples,
 
4
  import torch
5
  import spaces
6
  import re
7
+ import os
8
  from diffusers import (
9
  DiffusionPipeline,
10
  AutoencoderTiny,
 
12
  from huggingface_hub import hf_hub_download
13
  #from feifeilib.feifeichat import feifeichat
14
 
15
+ IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
16
+ IS_GPU_MODE = True if IS_ZERO_GPU else (True if torch.cuda.is_available() else False)
17
+ if IS_ZERO_GPU:
18
+ import subprocess
19
+ subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
20
+ torch.set_float32_matmul_precision("high")
21
+ torch.backends.cuda.matmul.allow_tf32 = True
22
+ IS_COMPILE = False
23
+
24
  import config
25
  styles_name = [style["name"] for style in config.style_list]
26
  MAX_SEED = np.iinfo(np.int32).max
 
59
 
60
  pipe = feifeimodload()
61
 
62
+ if IS_ZERO_GPU:
63
+ os.environ["DIFFUSERS_ENABLE_HUB_KERNELS"] = "yes"
64
+ pipe.transformer.set_attention_backend("flash_hub")
65
+ if IS_COMPILE:
66
+ from optimization import optimize_pipeline_
67
+ optimize_pipeline_(pipe, "prompt")
68
+
69
+ def get_duration(prompt, styles_Radio, feife_select, bigboobs_select, seed, randomize_seed, width, height, num_inference_steps, guidancescale, num_feifei, nsfw_select, nsfw_slider, progress):
70
+ def_duration = 15.
71
+ def_steps = 4.
72
+ return int(def_duration * float(num_inference_steps) / def_steps)
73
+
74
+ @spaces.GPU(duration=get_duration)
75
  def infer(prompt="", styles_Radio="(None)", feife_select = False, bigboobs_select = True, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, guidancescale=3.5, num_feifei=0.35, nsfw_select=False, nsfw_slider=1, progress=gr.Progress(track_tqdm=True)):
76
 
77
  Duke86Syl_lora_name=[]
 
254
  step=0.05,
255
  value=0.35,
256
  )
 
257
 
258
  gr.Examples(
259
  examples = examples,
optimization.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ """
3
+
4
+ from typing import Any
5
+ from typing import Callable
6
+ from typing import ParamSpec
7
+ import spaces
8
+ import torch
9
+
10
+
11
+ P = ParamSpec('P')
12
+
13
+
14
+ INDUCTOR_CONFIGS = {
15
+ 'conv_1x1_as_mm': True,
16
+ 'epilogue_fusion': False,
17
+ 'coordinate_descent_tuning': True,
18
+ 'coordinate_descent_check_all_directions': True,
19
+ 'max_autotune': True,
20
+ 'triton.cudagraphs': True,
21
+ }
22
+
23
+
24
+ def optimize_pipeline_(pipeline: Callable[P, Any], *args: P.args, **kwargs: P.kwargs):
25
+
26
+ @spaces.GPU(duration=1500)
27
+ def compile_transformer():
28
+
29
+ with spaces.aoti_capture(pipeline.transformer) as call:
30
+ pipeline(*args, **kwargs)
31
+
32
+ exported = torch.export.export(
33
+ mod=pipeline.transformer,
34
+ args=call.args,
35
+ kwargs=call.kwargs,
36
+ )
37
+
38
+ return spaces.aoti_compile(exported, INDUCTOR_CONFIGS)
39
+
40
+ pipeline.transformer.fuse_qkv_projections()
41
+ pipeline.transformer.set_attention_backend("flash_hub")
42
+ spaces.aoti_apply(compile_transformer(), pipeline.transformer)
requirements.txt CHANGED
@@ -3,7 +3,7 @@ gradio
3
  mistralai
4
  requests
5
  accelerate
6
- git+https://github.com/huggingface/diffusers.git
7
  invisible_watermark
8
  torch
9
  xformers
@@ -13,4 +13,7 @@ peft
13
  psutil
14
  gradio_client
15
  spaces
16
- openai
 
 
 
 
3
  mistralai
4
  requests
5
  accelerate
6
+ git+https://github.com/huggingface/diffusers@fa-hub
7
  invisible_watermark
8
  torch
9
  xformers
 
13
  psutil
14
  gradio_client
15
  spaces
16
+ openai
17
+ kernels
18
+ huggingface_hub[hf_xet]
19
+ pydantic==2.10.6