Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -40,9 +40,14 @@ dtype = torch.bfloat16
|
|
| 40 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 41 |
base_model = "black-forest-labs/FLUX.1-dev"
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
| 44 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
| 45 |
-
|
|
|
|
| 46 |
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
| 47 |
base_model,
|
| 48 |
vae=good_vae,
|
|
@@ -52,12 +57,27 @@ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
|
| 52 |
text_encoder_2=pipe.text_encoder_2,
|
| 53 |
tokenizer_2=pipe.tokenizer_2,
|
| 54 |
torch_dtype=dtype
|
| 55 |
-
)
|
| 56 |
|
| 57 |
MAX_SEED = 2**32 - 1
|
| 58 |
|
| 59 |
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
class calculateDuration:
|
| 62 |
def __init__(self, activity_name=""):
|
| 63 |
self.activity_name = activity_name
|
|
@@ -488,8 +508,6 @@ css = '''
|
|
| 488 |
footer {visibility: hidden;}
|
| 489 |
'''
|
| 490 |
|
| 491 |
-
|
| 492 |
-
|
| 493 |
huggingface_token = os.getenv("HF_TOKEN")
|
| 494 |
|
| 495 |
model_path = snapshot_download(
|
|
@@ -497,25 +515,11 @@ model_path = snapshot_download(
|
|
| 497 |
repo_type="model",
|
| 498 |
ignore_patterns=["*.md", "*..gitattributes"],
|
| 499 |
local_dir="FLUX.1-dev",
|
| 500 |
-
token=huggingface_token,
|
| 501 |
)
|
| 502 |
|
| 503 |
-
# Load pipeline
|
| 504 |
-
controlnet = FluxControlNetModel.from_pretrained(
|
| 505 |
-
"jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
|
| 506 |
-
).to(device)
|
| 507 |
-
pipe_controlnet = FluxControlNetPipeline.from_pretrained(
|
| 508 |
-
model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
|
| 509 |
-
)
|
| 510 |
-
pipe_controlnet.to(device)
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
MAX_SEED = 1000000
|
| 515 |
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
def process_input(input_image, upscale_factor):
|
| 520 |
w, h = input_image.size
|
| 521 |
w_original, h_original = w, h
|
|
@@ -547,12 +551,13 @@ def process_input(input_image, upscale_factor):
|
|
| 547 |
|
| 548 |
MAX_PIXEL_BUDGET = 1024 * 1024
|
| 549 |
|
| 550 |
-
|
| 551 |
@spaces.GPU
|
| 552 |
def upscale(input_image, progress=gr.Progress(track_tqdm=True)):
|
| 553 |
if input_image is None:
|
| 554 |
raise gr.Error("No image to upscale. Please generate an image first.")
|
| 555 |
|
|
|
|
|
|
|
| 556 |
# ์
๋ ฅ ์ด๋ฏธ์ง ์ฒ๋ฆฌ
|
| 557 |
input_image, w_original, h_original, was_resized = process_input(input_image, 4)
|
| 558 |
|
|
@@ -575,8 +580,6 @@ def upscale(input_image, progress=gr.Progress(track_tqdm=True)):
|
|
| 575 |
|
| 576 |
return upscaled_image # ์
์ค์ผ์ผ๋ ์ด๋ฏธ์ง๋ง ๋ฐํ
|
| 577 |
|
| 578 |
-
|
| 579 |
-
|
| 580 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as app:
|
| 581 |
|
| 582 |
loras_state = gr.State(loras)
|
|
@@ -690,8 +693,6 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
|
|
| 690 |
outputs=history_gallery,
|
| 691 |
)
|
| 692 |
|
| 693 |
-
|
| 694 |
-
|
| 695 |
upscale_button.click(
|
| 696 |
upscale,
|
| 697 |
inputs=[result],
|
|
@@ -699,4 +700,4 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
|
|
| 699 |
)
|
| 700 |
|
| 701 |
app.queue()
|
| 702 |
-
app.launch()
|
|
|
|
| 40 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 41 |
base_model = "black-forest-labs/FLUX.1-dev"
|
| 42 |
|
| 43 |
+
# FLUX ๋ชจ๋ธ ํ ๋ฒ๋ง ๋ก๋
|
| 44 |
+
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
|
| 45 |
+
|
| 46 |
+
# VAE ์ค์
|
| 47 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
| 48 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
| 49 |
+
|
| 50 |
+
# Image2Image ํ์ดํ๋ผ์ธ ์ค์
|
| 51 |
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
| 52 |
base_model,
|
| 53 |
vae=good_vae,
|
|
|
|
| 57 |
text_encoder_2=pipe.text_encoder_2,
|
| 58 |
tokenizer_2=pipe.tokenizer_2,
|
| 59 |
torch_dtype=dtype
|
| 60 |
+
).to(device)
|
| 61 |
|
| 62 |
MAX_SEED = 2**32 - 1
|
| 63 |
|
| 64 |
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
| 65 |
|
| 66 |
+
# ControlNet ๋ชจ๋ธ๊ณผ ํ์ดํ๋ผ์ธ (ํ์ํ ๋๋ง ๋ก๋)
|
| 67 |
+
controlnet = None
|
| 68 |
+
pipe_controlnet = None
|
| 69 |
+
|
| 70 |
+
def load_controlnet():
|
| 71 |
+
global controlnet, pipe_controlnet
|
| 72 |
+
if controlnet is None:
|
| 73 |
+
controlnet = FluxControlNetModel.from_pretrained(
|
| 74 |
+
"jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
|
| 75 |
+
).to(device)
|
| 76 |
+
if pipe_controlnet is None:
|
| 77 |
+
pipe_controlnet = FluxControlNetPipeline.from_pretrained(
|
| 78 |
+
base_model, controlnet=controlnet, torch_dtype=torch.bfloat16
|
| 79 |
+
).to(device)
|
| 80 |
+
|
| 81 |
class calculateDuration:
|
| 82 |
def __init__(self, activity_name=""):
|
| 83 |
self.activity_name = activity_name
|
|
|
|
| 508 |
footer {visibility: hidden;}
|
| 509 |
'''
|
| 510 |
|
|
|
|
|
|
|
| 511 |
huggingface_token = os.getenv("HF_TOKEN")
|
| 512 |
|
| 513 |
model_path = snapshot_download(
|
|
|
|
| 515 |
repo_type="model",
|
| 516 |
ignore_patterns=["*.md", "*..gitattributes"],
|
| 517 |
local_dir="FLUX.1-dev",
|
| 518 |
+
token=huggingface_token,
|
| 519 |
)
|
| 520 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 521 |
MAX_SEED = 1000000
|
| 522 |
|
|
|
|
|
|
|
|
|
|
| 523 |
def process_input(input_image, upscale_factor):
|
| 524 |
w, h = input_image.size
|
| 525 |
w_original, h_original = w, h
|
|
|
|
| 551 |
|
| 552 |
MAX_PIXEL_BUDGET = 1024 * 1024
|
| 553 |
|
|
|
|
| 554 |
@spaces.GPU
|
| 555 |
def upscale(input_image, progress=gr.Progress(track_tqdm=True)):
|
| 556 |
if input_image is None:
|
| 557 |
raise gr.Error("No image to upscale. Please generate an image first.")
|
| 558 |
|
| 559 |
+
load_controlnet() # ControlNet ํ์ ์ ๋ก๋
|
| 560 |
+
|
| 561 |
# ์
๋ ฅ ์ด๋ฏธ์ง ์ฒ๋ฆฌ
|
| 562 |
input_image, w_original, h_original, was_resized = process_input(input_image, 4)
|
| 563 |
|
|
|
|
| 580 |
|
| 581 |
return upscaled_image # ์
์ค์ผ์ผ๋ ์ด๋ฏธ์ง๋ง ๋ฐํ
|
| 582 |
|
|
|
|
|
|
|
| 583 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as app:
|
| 584 |
|
| 585 |
loras_state = gr.State(loras)
|
|
|
|
| 693 |
outputs=history_gallery,
|
| 694 |
)
|
| 695 |
|
|
|
|
|
|
|
| 696 |
upscale_button.click(
|
| 697 |
upscale,
|
| 698 |
inputs=[result],
|
|
|
|
| 700 |
)
|
| 701 |
|
| 702 |
app.queue()
|
| 703 |
+
app.launch()
|