import random import numpy as np import torch import spaces import gradio as gr from diffusers import FluxFillPipeline MAX_SEED = np.iinfo(np.int32).max pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16) lora_repo = "strangerzonehf/Flux-Super-Realism-LoRA" trigger_word = "Super Realism" pipe.load_lora_weights(lora_repo) pipe.to("cuda") # reference https://huggingface.co/spaces/black-forest-labs/FLUX.1-Fill-dev/blob/main/app.py def calculate_optimal_dimensions(image): # Extract the original dimensions original_width, original_height = image.size # Set constants MIN_ASPECT_RATIO = 9 / 16 MAX_ASPECT_RATIO = 16 / 9 FIXED_DIMENSION = 1024 # Calculate the aspect ratio of the original image original_aspect_ratio = original_width / original_height # Determine which dimension to fix if original_aspect_ratio > 1: # Wider than tall width = FIXED_DIMENSION height = round(FIXED_DIMENSION / original_aspect_ratio) else: # Taller than wide height = FIXED_DIMENSION width = round(FIXED_DIMENSION * original_aspect_ratio) # Ensure dimensions are multiples of 8 width = (width // 8) * 8 height = (height // 8) * 8 # Enforce aspect ratio limits calculated_aspect_ratio = width / height if calculated_aspect_ratio > MAX_ASPECT_RATIO: width = (height * MAX_ASPECT_RATIO // 8) * 8 elif calculated_aspect_ratio < MIN_ASPECT_RATIO: height = (width / MIN_ASPECT_RATIO // 8) * 8 # Ensure width and height remain above the minimum dimensions width = max(width, 576) if width == FIXED_DIMENSION else width height = max(height, 576) if height == FIXED_DIMENSION else height return width, height @spaces.GPU(duration=30) def inpaint( image, mask, prompt="", seed=0, num_inference_steps=28, guidance_scale=50, ): image = image.convert("RGB") mask = mask.convert("L") width, height = calculate_optimal_dimensions(image) if trigger_word: prompt = f"{trigger_word} {prompt}" if not isinstance(seed, int) or seed <= 0: seed = random.randint(0, MAX_SEED) result = pipe( image=image, mask_image=mask, prompt=prompt, width=width, height=height, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator = torch.Generator().manual_seed(seed) ).images[0] result = result.convert("RGBA") return result, seed demo = gr.Interface( fn=inpaint, inputs=[ gr.Image(label="image", type="pil"), gr.Image(label="mask", type="pil"), gr.Text(label="prompt", lines=4), gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, info="(0 = Random)" ), gr.Number(value=40, label="num_inference_steps"), gr.Number(value=28, label="guidance_scale"), ], outputs=[ gr.Image(label="Result"), gr.Number(label="Seed") ], api_name="inpaint", examples=[["./assets/rocket.png", "./assets/Inpainting mask.png"]], cache_examples=False ) demo.launch()