fffiloni commited on
Commit
581674d
Β·
verified Β·
1 Parent(s): 596f2d8

prepare for nudge duplication

Browse files
Files changed (1) hide show
  1. app.py +125 -24
app.py CHANGED
@@ -12,6 +12,7 @@ import tempfile
12
  from datetime import datetime
13
 
14
  is_shared_ui = True if "fffiloni/Meigen-MultiTalk" in os.environ['SPACE_ID'] else False
 
15
 
16
  def trim_audio_to_5s_temp(audio_path, sample_rate=16000):
17
  max_duration_sec = 5
@@ -85,32 +86,34 @@ shutil.copy2(
85
 
86
  print("Copied MultiTalk files into base model directory.")
87
 
 
88
 
 
 
 
 
 
89
 
90
- # Check if CUDA-compatible GPU is available
91
- if torch.cuda.is_available():
92
- # Get current GPU name
93
- gpu_name = torch.cuda.get_device_name(torch.cuda.current_device())
94
- print(f"Current GPU: {gpu_name}")
95
-
96
- # Enforce GPU requirement
97
- if "A100" not in gpu_name and "L4" not in gpu_name:
98
- raise RuntimeError(f"This notebook requires an A100 or L4 GPU. Found: {gpu_name}")
99
- elif "L4" in gpu_name:
100
- print("Warning: L4 or L40S is supported, but A100 is recommended for faster inference.")
101
- else:
102
- raise RuntimeError("No CUDA-compatible GPU found. An A100, L4 or L40S GPU is required.")
103
 
104
 
105
- GPU_TO_VRAM_PARAMS = {
106
- "NVIDIA A100": 11000000000,
107
- "NVIDIA A100-SXM4-40GB": 11000000000,
108
- "NVIDIA A100-SXM4-80GB": 22000000000,
109
- "NVIDIA L4": 5000000000,
110
- "NVIDIA L40S": 11000000000
111
- }
112
- USED_VRAM_PARAMS = GPU_TO_VRAM_PARAMS[gpu_name]
113
- print("Using", USED_VRAM_PARAMS, "for num_persistent_param_in_dit")
114
 
115
 
116
 
@@ -239,7 +242,65 @@ def load_prerendered_examples(prompt, cond_image_path, cond_audio_path_spk1, con
239
 
240
  return output_video
241
 
242
- with gr.Blocks(title="MultiTalk Inference") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  gr.Markdown("## 🎀 Meigen MultiTalk Inference Demo")
244
  gr.Markdown("Let Them Talk: Audio-Driven Multi-Person Conversational Video Generation")
245
  if is_shared_ui:
@@ -290,9 +351,49 @@ with gr.Blocks(title="MultiTalk Inference") as demo:
290
  interactive=False if is_shared_ui else True
291
  )
292
 
293
- submit_btn = gr.Button("Generate")
294
 
295
  with gr.Column(scale=3):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
  output_video = gr.Video(label="Generated Video", interactive=False)
297
 
298
  gr.Examples(
 
12
  from datetime import datetime
13
 
14
  is_shared_ui = True if "fffiloni/Meigen-MultiTalk" in os.environ['SPACE_ID'] else False
15
+ is_gpu_associated = torch.cuda.is_available()
16
 
17
  def trim_audio_to_5s_temp(audio_path, sample_rate=16000):
18
  max_duration_sec = 5
 
86
 
87
  print("Copied MultiTalk files into base model directory.")
88
 
89
+ if not is_shared_ui:
90
 
91
+ # Check if CUDA-compatible GPU is available
92
+ if torch.cuda.is_available():
93
+ # Get current GPU name
94
+ gpu_name = torch.cuda.get_device_name(torch.cuda.current_device())
95
+ print(f"Current GPU: {gpu_name}")
96
 
97
+ # Enforce GPU requirement
98
+ if "A100" not in gpu_name and "L4" not in gpu_name:
99
+ #raise RuntimeError(f"This notebook requires an A100 or L4 GPU. Found: {gpu_name}")
100
+ print(f"This notebook requires an A100 or L4 GPU. Found: {gpu_name}")
101
+ elif "L4" in gpu_name:
102
+ print("Warning: L4 or L40S is supported, but A100 is recommended for faster inference.")
103
+ else:
104
+ #raise RuntimeError("No CUDA-compatible GPU found. An A100, L4 or L40S GPU is required.")
105
+ print("No CUDA-compatible GPU found. An A100, L4 or L40S GPU is required.")
 
 
 
 
106
 
107
 
108
+ GPU_TO_VRAM_PARAMS = {
109
+ "NVIDIA A100": 11000000000,
110
+ "NVIDIA A100-SXM4-40GB": 11000000000,
111
+ "NVIDIA A100-SXM4-80GB": 22000000000,
112
+ "NVIDIA L4": 5000000000,
113
+ "NVIDIA L40S": 11000000000
114
+ }
115
+ USED_VRAM_PARAMS = GPU_TO_VRAM_PARAMS[gpu_name]
116
+ print("Using", USED_VRAM_PARAMS, "for num_persistent_param_in_dit")
117
 
118
 
119
 
 
242
 
243
  return output_video
244
 
245
+ css = """
246
+ div#warning-duplicate {
247
+ background-color: #ebf5ff;
248
+ padding: 0 16px 16px;
249
+ margin: 20px 0;
250
+ color: #030303!important;
251
+ }
252
+ div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
253
+ color: #0f4592!important;
254
+ }
255
+ div#warning-duplicate strong {
256
+ color: #0f4592;
257
+ }
258
+ p.actions {
259
+ display: flex;
260
+ align-items: center;
261
+ margin: 20px 0;
262
+ }
263
+ div#warning-duplicate .actions a {
264
+ display: inline-block;
265
+ margin-right: 10px;
266
+ }
267
+ div#warning-setgpu {
268
+ background-color: #fff4eb;
269
+ padding: 0 16px 16px;
270
+ margin: 20px 0;
271
+ color: #030303!important;
272
+ }
273
+ div#warning-setgpu > .gr-prose > h2, div#warning-setgpu > .gr-prose > p {
274
+ color: #92220f!important;
275
+ }
276
+ div#warning-setgpu a, div#warning-setgpu b {
277
+ color: #91230f;
278
+ }
279
+ div#warning-setgpu p.actions > a {
280
+ display: inline-block;
281
+ background: #1f1f23;
282
+ border-radius: 40px;
283
+ padding: 6px 24px;
284
+ color: antiquewhite;
285
+ text-decoration: none;
286
+ font-weight: 600;
287
+ font-size: 1.2em;
288
+ }
289
+ div#warning-ready {
290
+ background-color: #ecfdf5;
291
+ padding: 0 16px 16px;
292
+ margin: 20px 0;
293
+ color: #030303!important;
294
+ }
295
+ div#warning-ready > .gr-prose > h2, div#warning-ready > .gr-prose > p {
296
+ color: #057857!important;
297
+ }
298
+ .custom-color {
299
+ color: #030303 !important;
300
+ }
301
+ """
302
+
303
+ with gr.Blocks(title="MultiTalk Inference", css=css) as demo:
304
  gr.Markdown("## 🎀 Meigen MultiTalk Inference Demo")
305
  gr.Markdown("Let Them Talk: Audio-Driven Multi-Person Conversational Video Generation")
306
  if is_shared_ui:
 
351
  interactive=False if is_shared_ui else True
352
  )
353
 
354
+ submit_btn = gr.Button("Generate", interactive=False if is_shared_ui else True)
355
 
356
  with gr.Column(scale=3):
357
+ if is_shared_ui:
358
+ top_description = gr.HTML(f'''
359
+ <div class="gr-prose">
360
+ <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
361
+ Attention: this Space need to be duplicated to work</h2>
362
+ <p class="main-message custom-color">
363
+ To make it work, <strong>duplicate the Space</strong> and run it on your own profile using a <strong>private</strong> GPU (L40s recommended).<br />
364
+ A L40s costs <strong>US$1.80/h</strong>.
365
+ </p>
366
+ <p class="actions custom-color">
367
+ <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
368
+ <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
369
+ </a>
370
+ to start experimenting with this demo
371
+ </p>
372
+ </div>
373
+ ''', elem_id="warning-duplicate")
374
+ else:
375
+ if(is_gpu_associated):
376
+ top_description = gr.HTML(f'''
377
+ <div class="gr-prose">
378
+ <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
379
+ You have successfully associated a GPU to this Space πŸŽ‰</h2>
380
+ <p class="custom-color">
381
+ You will be billed by the minute from when you activated the GPU until when it is turned off.
382
+ </p>
383
+ </div>
384
+ ''', elem_id="warning-ready")
385
+ else:
386
+ top_description = gr.HTML(f'''
387
+ <div class="gr-prose">
388
+ <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
389
+ You have successfully duplicated the MimicMotion Space πŸŽ‰</h2>
390
+ <p class="custom-color">There's only one step left before you can properly play with this demo: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a GPU</b> to it (via the Settings tab)</a> and run the app below.
391
+ You will be billed by the minute from when you activate the GPU until when it is turned off.</p>
392
+ <p class="actions custom-color">
393
+ <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">πŸ”₯ &nbsp; Set recommended GPU</a>
394
+ </p>
395
+ </div>
396
+ ''', elem_id="warning-setgpu")
397
  output_video = gr.Video(label="Generated Video", interactive=False)
398
 
399
  gr.Examples(