multimodalart HF Staff commited on
Commit
97a289e
·
verified ·
1 Parent(s): 0720854

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -1
app.py CHANGED
@@ -27,7 +27,26 @@ from huggingface_hub import InferenceClient
27
  dtype = torch.bfloat16
28
  device = "cuda" if torch.cuda.is_available() else "cpu"
29
 
30
- pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", torch_dtype=dtype).to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  # Load the relight LoRA
33
  pipe.load_lora_weights(
 
27
  dtype = torch.bfloat16
28
  device = "cuda" if torch.cuda.is_available() else "cpu"
29
 
30
+ scheduler_config = {
31
+ "base_image_seq_len": 256,
32
+ "base_shift": math.log(3),
33
+ "invert_sigmas": False,
34
+ "max_image_seq_len": 8192,
35
+ "max_shift": math.log(3),
36
+ "num_train_timesteps": 1000,
37
+ "shift": 1.0,
38
+ "shift_terminal": None,
39
+ "stochastic_sampling": False,
40
+ "time_shift_type": "exponential",
41
+ "use_beta_sigmas": False,
42
+ "use_dynamic_shifting": True,
43
+ "use_exponential_sigmas": False,
44
+ "use_karras_sigmas": False,
45
+ }
46
+
47
+ scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
48
+
49
+ pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", scheduler=scheduler, torch_dtype=dtype).to(device)
50
 
51
  # Load the relight LoRA
52
  pipe.load_lora_weights(