seokochin commited on
Commit
9d7db02
Β·
verified Β·
1 Parent(s): da6f71b

Update generate.py

Browse files
Files changed (1) hide show
  1. generate.py +14 -7
generate.py CHANGED
@@ -1,6 +1,7 @@
1
  import argparse
2
  import subprocess
3
  import os
 
4
  from huggingface_hub import snapshot_download
5
 
6
  # Arguments
@@ -8,28 +9,34 @@ parser = argparse.ArgumentParser()
8
  parser.add_argument("--task", type=str, default="t2v-14B")
9
  parser.add_argument("--size", type=str, default="832*480")
10
  parser.add_argument("--frame_num", type=int, default=60)
11
- parser.add_argument("--sample_steps", type=int, default=25)
12
  parser.add_argument("--ckpt_dir", type=str, default="./Wan2.1-T2V-14B")
13
  parser.add_argument("--offload_model", type=str, default="True")
14
- parser.add_argument("--precision", type=str, default="bf16")
15
  parser.add_argument("--prompt", type=str, required=True)
16
  args = parser.parse_args()
17
 
18
- # Download model if not available
19
  if not os.path.exists(args.ckpt_dir):
20
- print("πŸ”„ Downloading WAN 2.1 model...")
21
  snapshot_download(repo_id="Wan-AI/Wan2.1-T2V-14B", local_dir=args.ckpt_dir)
22
 
23
- # Run Model
24
- command = f"python run_model.py --task {args.task} --size {args.size} --frame_num {args.frame_num} --sample_steps {args.sample_steps} --ckpt_dir {args.ckpt_dir} --offload_model {args.offload_model} --precision {args.precision} --prompt \"{args.prompt}\""
 
 
 
 
 
 
25
 
26
  process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
27
  stdout, stderr = process.communicate()
28
 
 
29
  print("πŸ”Ή Output:", stdout.decode())
30
  print("πŸ”Ί Error:", stderr.decode())
31
 
32
- # Check if video was created
33
  if os.path.exists("output.mp4"):
34
  print("βœ… Video generated successfully: output.mp4")
35
  else:
 
1
  import argparse
2
  import subprocess
3
  import os
4
+ import torch
5
  from huggingface_hub import snapshot_download
6
 
7
  # Arguments
 
9
  parser.add_argument("--task", type=str, default="t2v-14B")
10
  parser.add_argument("--size", type=str, default="832*480")
11
  parser.add_argument("--frame_num", type=int, default=60)
12
+ parser.add_argument("--sample_steps", type=int, default=20)
13
  parser.add_argument("--ckpt_dir", type=str, default="./Wan2.1-T2V-14B")
14
  parser.add_argument("--offload_model", type=str, default="True")
 
15
  parser.add_argument("--prompt", type=str, required=True)
16
  args = parser.parse_args()
17
 
18
+ # Ensure the model is downloaded
19
  if not os.path.exists(args.ckpt_dir):
20
+ print("πŸ”„ Downloading WAN 2.1 - 14B model from Hugging Face...")
21
  snapshot_download(repo_id="Wan-AI/Wan2.1-T2V-14B", local_dir=args.ckpt_dir)
22
 
23
+ # Free up GPU memory
24
+ if torch.cuda.is_available():
25
+ torch.cuda.empty_cache()
26
+ torch.backends.cudnn.benchmark = False
27
+ torch.backends.cudnn.deterministic = True
28
+
29
+ # Run WAN 2.1 - 14B Model
30
+ command = f"python run_model.py --task {args.task} --size {args.size} --frame_num {args.frame_num} --sample_steps {args.sample_steps} --ckpt_dir {args.ckpt_dir} --offload_model {args.offload_model} --prompt \"{args.prompt}\""
31
 
32
  process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
33
  stdout, stderr = process.communicate()
34
 
35
+ # Print logs for debugging
36
  print("πŸ”Ή Output:", stdout.decode())
37
  print("πŸ”Ί Error:", stderr.decode())
38
 
39
+ # Verify if video was created
40
  if os.path.exists("output.mp4"):
41
  print("βœ… Video generated successfully: output.mp4")
42
  else: