prithivMLmods commited on
Commit
e84ad0c
·
verified ·
1 Parent(s): 9bd8809

update app

Browse files
Files changed (1) hide show
  1. app.py +1 -27
app.py CHANGED
@@ -147,24 +147,6 @@ model_l = Qwen3VLForConditionalGeneration.from_pretrained(
147
  torch_dtype=torch.float16
148
  ).to(device).eval()
149
 
150
- # Load Qwen3-VL-2B-Thinking
151
- MODEL_ID_J = "Qwen/Qwen3-VL-2B-Thinking"
152
- processor_j = AutoProcessor.from_pretrained(MODEL_ID_J, trust_remote_code=True)
153
- model_j = Qwen3VLForConditionalGeneration.from_pretrained(
154
- MODEL_ID_J,
155
- trust_remote_code=True,
156
- torch_dtype=torch.float16
157
- ).to(device).eval()
158
-
159
- # Load Qwen3-VL-4B-Thinking
160
- MODEL_ID_T = "Qwen/Qwen3-VL-4B-Thinking"
161
- processor_t = AutoProcessor.from_pretrained(MODEL_ID_T, trust_remote_code=True)
162
- model_t = Qwen3VLForConditionalGeneration.from_pretrained(
163
- MODEL_ID_T,
164
- trust_remote_code=True,
165
- torch_dtype=torch.float16
166
- ).to(device).eval()
167
-
168
  def downsample_video(video_path):
169
  """
170
  Downsamples the video to evenly spaced frames.
@@ -205,12 +187,8 @@ def generate_image(model_name: str, text: str, image: Image.Image,
205
  processor, model = processor_q, model_q
206
  elif model_name == "Qwen3-VL-8B-Instruct":
207
  processor, model = processor_y, model_y
208
- elif model_name == "Qwen3-VL-4B-Thinking":
209
- processor, model = processor_t, model_t
210
  elif model_name == "Qwen3-VL-2B-Instruct":
211
  processor, model = processor_l, model_l
212
- elif model_name == "Qwen3-VL-2B-Thinking":
213
- processor, model = processor_j, model_j
214
  else:
215
  yield "Invalid model selected.", "Invalid model selected."
216
  return
@@ -251,12 +229,8 @@ def generate_video(model_name: str, text: str, video_path: str,
251
  processor, model = processor_q, model_q
252
  elif model_name == "Qwen3-VL-8B-Instruct":
253
  processor, model = processor_y, model_y
254
- elif model_name == "Qwen3-VL-4B-Thinking":
255
- processor, model = processor_t, model_t
256
  elif model_name == "Qwen3-VL-2B-Instruct":
257
  processor, model = processor_l, model_l
258
- elif model_name == "Qwen3-VL-2B-Thinking":
259
- processor, model = processor_j, model_j
260
  else:
261
  yield "Invalid model selected.", "Invalid model selected."
262
  return
@@ -351,7 +325,7 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
351
  markdown_output = gr.Markdown()
352
 
353
  model_choice = gr.Radio(
354
- choices=["Qwen3-VL-4B-Instruct", "Qwen3-VL-8B-Instruct", "Qwen3-VL-2B-Instruct", "Qwen3-VL-2B-Thinking", "Qwen3-VL-4B-Thinking", "Qwen2.5-VL-3B-Instruct", "Qwen2.5-VL-7B-Instruct"],
355
  label="Select Model",
356
  value="Qwen3-VL-4B-Instruct"
357
  )
 
147
  torch_dtype=torch.float16
148
  ).to(device).eval()
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  def downsample_video(video_path):
151
  """
152
  Downsamples the video to evenly spaced frames.
 
187
  processor, model = processor_q, model_q
188
  elif model_name == "Qwen3-VL-8B-Instruct":
189
  processor, model = processor_y, model_y
 
 
190
  elif model_name == "Qwen3-VL-2B-Instruct":
191
  processor, model = processor_l, model_l
 
 
192
  else:
193
  yield "Invalid model selected.", "Invalid model selected."
194
  return
 
229
  processor, model = processor_q, model_q
230
  elif model_name == "Qwen3-VL-8B-Instruct":
231
  processor, model = processor_y, model_y
 
 
232
  elif model_name == "Qwen3-VL-2B-Instruct":
233
  processor, model = processor_l, model_l
 
 
234
  else:
235
  yield "Invalid model selected.", "Invalid model selected."
236
  return
 
325
  markdown_output = gr.Markdown()
326
 
327
  model_choice = gr.Radio(
328
+ choices=["Qwen3-VL-4B-Instruct", "Qwen3-VL-8B-Instruct", "Qwen3-VL-2B-Instruct", "Qwen2.5-VL-3B-Instruct", "Qwen2.5-VL-7B-Instruct"],
329
  label="Select Model",
330
  value="Qwen3-VL-4B-Instruct"
331
  )