Spaces:
				
			
			
	
			
			
					
		Running
		
	
	
	
			
			
	
	
	
	
		
		
					
		Running
		
	Update generate.py
Browse files- generate.py +14 -7
    	
        generate.py
    CHANGED
    
    | @@ -1,6 +1,7 @@ | |
| 1 | 
             
            import argparse
         | 
| 2 | 
             
            import subprocess
         | 
| 3 | 
             
            import os
         | 
|  | |
| 4 | 
             
            from huggingface_hub import snapshot_download
         | 
| 5 |  | 
| 6 | 
             
            # Arguments
         | 
| @@ -8,28 +9,34 @@ parser = argparse.ArgumentParser() | |
| 8 | 
             
            parser.add_argument("--task", type=str, default="t2v-14B")
         | 
| 9 | 
             
            parser.add_argument("--size", type=str, default="832*480")
         | 
| 10 | 
             
            parser.add_argument("--frame_num", type=int, default=60)
         | 
| 11 | 
            -
            parser.add_argument("--sample_steps", type=int, default= | 
| 12 | 
             
            parser.add_argument("--ckpt_dir", type=str, default="./Wan2.1-T2V-14B")
         | 
| 13 | 
             
            parser.add_argument("--offload_model", type=str, default="True")
         | 
| 14 | 
            -
            parser.add_argument("--precision", type=str, default="bf16")
         | 
| 15 | 
             
            parser.add_argument("--prompt", type=str, required=True)
         | 
| 16 | 
             
            args = parser.parse_args()
         | 
| 17 |  | 
| 18 | 
            -
            #  | 
| 19 | 
             
            if not os.path.exists(args.ckpt_dir):
         | 
| 20 | 
            -
                print("π Downloading WAN 2.1 model...")
         | 
| 21 | 
             
                snapshot_download(repo_id="Wan-AI/Wan2.1-T2V-14B", local_dir=args.ckpt_dir)
         | 
| 22 |  | 
| 23 | 
            -
            #  | 
| 24 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 25 |  | 
| 26 | 
             
            process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         | 
| 27 | 
             
            stdout, stderr = process.communicate()
         | 
| 28 |  | 
|  | |
| 29 | 
             
            print("πΉ Output:", stdout.decode())
         | 
| 30 | 
             
            print("πΊ Error:", stderr.decode())
         | 
| 31 |  | 
| 32 | 
            -
            #  | 
| 33 | 
             
            if os.path.exists("output.mp4"):
         | 
| 34 | 
             
                print("β
 Video generated successfully: output.mp4")
         | 
| 35 | 
             
            else:
         | 
|  | |
| 1 | 
             
            import argparse
         | 
| 2 | 
             
            import subprocess
         | 
| 3 | 
             
            import os
         | 
| 4 | 
            +
            import torch
         | 
| 5 | 
             
            from huggingface_hub import snapshot_download
         | 
| 6 |  | 
| 7 | 
             
            # Arguments
         | 
|  | |
| 9 | 
             
            parser.add_argument("--task", type=str, default="t2v-14B")
         | 
| 10 | 
             
            parser.add_argument("--size", type=str, default="832*480")
         | 
| 11 | 
             
            parser.add_argument("--frame_num", type=int, default=60)
         | 
| 12 | 
            +
            parser.add_argument("--sample_steps", type=int, default=20)
         | 
| 13 | 
             
            parser.add_argument("--ckpt_dir", type=str, default="./Wan2.1-T2V-14B")
         | 
| 14 | 
             
            parser.add_argument("--offload_model", type=str, default="True")
         | 
|  | |
| 15 | 
             
            parser.add_argument("--prompt", type=str, required=True)
         | 
| 16 | 
             
            args = parser.parse_args()
         | 
| 17 |  | 
| 18 | 
            +
            # Ensure the model is downloaded
         | 
| 19 | 
             
            if not os.path.exists(args.ckpt_dir):
         | 
| 20 | 
            +
                print("π Downloading WAN 2.1 - 14B model from Hugging Face...")
         | 
| 21 | 
             
                snapshot_download(repo_id="Wan-AI/Wan2.1-T2V-14B", local_dir=args.ckpt_dir)
         | 
| 22 |  | 
| 23 | 
            +
            # Free up GPU memory
         | 
| 24 | 
            +
            if torch.cuda.is_available():
         | 
| 25 | 
            +
                torch.cuda.empty_cache()
         | 
| 26 | 
            +
                torch.backends.cudnn.benchmark = False
         | 
| 27 | 
            +
                torch.backends.cudnn.deterministic = True
         | 
| 28 | 
            +
             | 
| 29 | 
            +
            # Run WAN 2.1 - 14B Model
         | 
| 30 | 
            +
            command = f"python run_model.py --task {args.task} --size {args.size} --frame_num {args.frame_num} --sample_steps {args.sample_steps} --ckpt_dir {args.ckpt_dir} --offload_model {args.offload_model} --prompt \"{args.prompt}\""
         | 
| 31 |  | 
| 32 | 
             
            process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         | 
| 33 | 
             
            stdout, stderr = process.communicate()
         | 
| 34 |  | 
| 35 | 
            +
            # Print logs for debugging
         | 
| 36 | 
             
            print("πΉ Output:", stdout.decode())
         | 
| 37 | 
             
            print("πΊ Error:", stderr.decode())
         | 
| 38 |  | 
| 39 | 
            +
            # Verify if video was created
         | 
| 40 | 
             
            if os.path.exists("output.mp4"):
         | 
| 41 | 
             
                print("β
 Video generated successfully: output.mp4")
         | 
| 42 | 
             
            else:
         |