import torch torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True import os import gradio as gr import spaces from diffusers import FlowMatchEulerDiscreteScheduler from lakonlab.ui.gradio.create_text_to_img import create_interface_text_to_img from lakonlab.pipelines.piqwen_pipeline import PiQwenImagePipeline from huggingface_hub import login login(token=os.getenv('HF_TOKEN')) DEFAULT_PROMPT = ('Photo of a coffee shop entrance featuring a chalkboard sign reading "Ļ€-Qwen Coffee 😊 $2 per cup," ' 'with a neon light beside it displaying "Ļ€-通义千问". Next to it hangs a poster showing a beautiful ' 'Chinese woman, and beneath the poster is written "eā‰ˆ2.71828-18284-59045-23536-02874-71352".') pipe = PiQwenImagePipeline.from_pretrained( 'Qwen/Qwen-Image', torch_dtype=torch.bfloat16) pipe.load_piflow_adapter( 'Lakonik/pi-Qwen-Image', subfolder='gmqwen_k8_piid_4step', target_module_name='transformer') pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config( # use fixed shift=3.2 pipe.scheduler.config, shift=3.2, shift_terminal=None, use_dynamic_shifting=False) pipe = pipe.to('cuda') @spaces.GPU def generate( seed, prompt, width, height, steps, progress=gr.Progress(track_tqdm=True)): return pipe( prompt=prompt, width=width, height=height, num_inference_steps=steps, generator=torch.Generator().manual_seed(seed), ).images[0] with gr.Blocks(analytics_enabled=False, title='pi-Qwen Demo', css='lakonlab/ui/gradio/style.css' ) as demo: md_txt = '# pi-Qwen Demo\n\n' \ 'Official demo of the paper [pi-Flow: Policy-Based Few-Step Generation via Imitation Distillation](https://arxiv.org/abs/2510.14974). ' \ '**Base model:** [Qwen-Image](https://huggingface.co/Qwen/Qwen-Image). **Fast policy:** GMFlow. **Code:** [https://github.com/Lakonik/piFlow](https://github.com/Lakonik/piFlow).' gr.Markdown(md_txt) create_interface_text_to_img( generate, prompt=DEFAULT_PROMPT, steps=4, guidance_scale=None, args=['last_seed', 'prompt', 'width', 'height', 'steps']) demo.queue().launch()