File size: 7,294 Bytes
a3f5a50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97a289e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6a3ec9b
a3f5a50
67e4393
 
 
efd7824
c8b4e68
67e4393
 
a3f5a50
 
 
 
 
6a3ec9b
a3f5a50
6a3ec9b
a3f5a50
 
 
67e4393
 
 
 
a3f5a50
67e4393
a3f5a50
67e4393
 
 
 
 
 
 
 
a3f5a50
67e4393
 
 
a3f5a50
67e4393
a3f5a50
 
67e4393
 
 
b0661e2
67e4393
 
 
 
a3f5a50
 
67e4393
 
 
 
 
 
 
a3f5a50
 
 
67e4393
 
 
 
 
 
 
 
 
 
a3f5a50
 
67e4393
 
 
a3f5a50
 
 
 
 
 
67e4393
a3f5a50
 
932647d
67e4393
a3f5a50
67e4393
932647d
a3f5a50
 
 
67e4393
3bceb05
67e4393
 
 
 
a3f5a50
67e4393
a3f5a50
67e4393
 
 
 
 
 
 
 
 
 
 
932647d
a3f5a50
 
67e4393
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0720854
a3f5a50
67e4393
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
import gradio as gr
import numpy as np
import random
import torch
import spaces

from PIL import Image
from diffusers import FlowMatchEulerDiscreteScheduler
from optimization import optimize_pipeline_
from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3

import math

# --- Model Loading ---
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"

scheduler_config = {
    "base_image_seq_len": 256,
    "base_shift": math.log(3),
    "invert_sigmas": False,
    "max_image_seq_len": 8192,
    "max_shift": math.log(3),
    "num_train_timesteps": 1000,
    "shift": 1.0,
    "shift_terminal": None,
    "stochastic_sampling": False,
    "time_shift_type": "exponential",
    "use_beta_sigmas": False,
    "use_dynamic_shifting": True,
    "use_exponential_sigmas": False,
    "use_karras_sigmas": False,
}

scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)

pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", scheduler=scheduler, torch_dtype=dtype)

# Load the texture LoRA
pipe.load_lora_weights("tarn59/apply_texture_qwen_image_edit_2509", 
        weight_name="apply_texture_qwen_image_edit_2509.safetensors", adapter_name="texture")
pipe.load_lora_weights("lightx2v/Qwen-Image-Lightning",
        weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors", adapter_name="lightning")
pipe.set_adapters(["texture", "lightning"], adapter_weights=[1., 1.])
pipe.fuse_lora(adapter_names=["texture", "lightning"], lora_scale=1)
pipe.unload_lora_weights()

pipe.transformer.__class__ = QwenImageTransformer2DModel
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())

pipe.to(device)

optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")

MAX_SEED = np.iinfo(np.int32).max

def calculate_dimensions(image):
    """Calculate output dimensions based on content image, keeping largest side at 1024."""
    if image is None:
        return 1024, 1024
    
    original_width, original_height = image.size
    
    if original_width > original_height:
        new_width = 1024
        aspect_ratio = original_height / original_width
        new_height = int(new_width * aspect_ratio)
    else:
        new_height = 1024
        aspect_ratio = original_width / original_height
        new_width = int(new_height * aspect_ratio)
    
    # Ensure dimensions are multiples of 8
    new_width = (new_width // 8) * 8
    new_height = (new_height // 8) * 8
    
    return new_width, new_height

@spaces.GPU
def apply_texture(
    content_image,
    texture_image,
    prompt,
    seed=42,
    randomize_seed=False,
    true_guidance_scale=False,
    num_inference_steps=4,
    progress=gr.Progress(track_tqdm=True)
):
    if content_image is None:
        raise gr.Error("Please upload a content image.")
    if texture_image is None:
        raise gr.Error("Please upload a texture image.")
    if not prompt or not prompt.strip():
        raise gr.Error("Please provide a description.")
    
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    generator = torch.Generator(device=device).manual_seed(seed)
    
    # Calculate dimensions based on content image
    width, height = calculate_dimensions(content_image)
    
    # Prepare images
    content_pil = content_image.convert("RGB") if isinstance(content_image, Image.Image) else Image.open(content_image.name).convert("RGB")
    texture_pil = texture_image.convert("RGB") if isinstance(texture_image, Image.Image) else Image.open(texture_image.name).convert("RGB")
    
    pil_images = [content_pil, texture_pil]
    
    result = pipe(
        image=pil_images,
        prompt=prompt,
        height=height,
        width=width,
        num_inference_steps=num_inference_steps,
        generator=generator,
        true_cfg_scale=true_guidance_scale,
        num_images_per_prompt=1,
    ).images[0]
    
    return result, seed

# --- UI ---
css = '''
#col-container { max-width: 800px; margin: 0 auto; }
.dark .progress-text{color: white !important}
#examples{max-width: 800px; margin: 0 auto; }
'''

with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown("# Apply Texture — Qwen Image Edit")
        gr.Markdown("""
            Using [tarn59's Apply-Texture-Qwen-Image-Edit-2509 LoRA](https://huggingface.co/tarn59/apply_texture_qwen_image_edit_2509) 
            and [lightx2v/Qwen-Image-Lightning](https://huggingface.co/lightx2v/Qwen-Image-Lightning) for 4-step inference 💨
        """)
        
        with gr.Row():
            with gr.Column():
                with gr.Row():
                    content_image = gr.Image(label="Content", type="pil")
                    texture_image = gr.Image(label="Texture", type="pil")
                
                prompt = gr.Textbox(
                    label="Describe",
                    info="Apply ... texture to ...",
                    placeholder="Apply wood siding texture to building walls."
                )
                
                button = gr.Button("✨ Generate", variant="primary")
                
                with gr.Accordion("⚙️ Advanced Settings", open=False):
                    seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
                    randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
                    true_guidance_scale = gr.Slider(
                        label="True Guidance Scale", 
                        minimum=1.0, 
                        maximum=10.0, 
                        step=0.1, 
                        value=1.0
                    )
                    num_inference_steps = gr.Slider(
                        label="Inference Steps", 
                        minimum=1, 
                        maximum=40, 
                        step=1, 
                        value=4
                    )
            
            with gr.Column():
                output = gr.Image(label="Output", interactive=False)
                seed_output = gr.Number(label="Used Seed", visible=False)
        
        # Event handlers
        button.click(
            fn=apply_texture,
            inputs=[
                content_image,
                texture_image,
                prompt,
                seed,
                randomize_seed,
                true_guidance_scale,
                num_inference_steps
            ],
            outputs=[output, seed_output]
        )
        
        # Examples
        gr.Examples(
            examples=[
                ["coffee_mug.png", "wood_boxes.png", "Apply wood texture to mug"],
                ["leaf.webp", "salmon.webp", "Apply salmon texture to leaves and stems"],
            ],
            inputs=[
                content_image,
                texture_image,
                prompt,
            ],
            outputs=[output, seed_output],
            fn=apply_texture,
            cache_examples="lazy",
            elem_id="examples"
        )

if __name__ == "__main__":
    demo.launch()