PengWeixuanSZU commited on
Commit
f4e353b
·
verified ·
1 Parent(s): 19f7978

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -24,7 +24,7 @@ import PIL
24
  import torch.nn.functional as F
25
  from torchvision import transforms
26
 
27
- #import spaces
28
  from huggingface_hub import snapshot_download
29
 
30
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
@@ -122,7 +122,7 @@ def init_pipe():
122
 
123
  return pipe
124
 
125
- #@spaces.GPU()#duration=120
126
  def inference(source_images,
127
  target_images,
128
  text_prompt, negative_prompt,
@@ -133,6 +133,7 @@ def inference(source_images,
133
  pipe.vae.to(DEVICE)
134
  pipe.transformer.to(DEVICE)
135
  pipe.controlnet_transformer.to(DEVICE)
 
136
 
137
  source_pixel_values = source_images/127.5 - 1.0
138
  source_pixel_values = source_pixel_values.to(torch.float16).to(DEVICE)
@@ -161,6 +162,7 @@ def inference(source_images,
161
  image_latents = None
162
  latents = source_latents
163
 
 
164
  video = pipe(
165
  prompt = text_prompt,
166
  negative_prompt = negative_prompt,
@@ -207,7 +209,7 @@ def process_video(video_file, image_file, positive_prompt, negative_prompt, guid
207
 
208
  video:List[PIL.Image.Image]=[]
209
 
210
-
211
  for i in progress.tqdm(range(video_shard)):
212
  if i>0: #first frame guidence
213
  first_frame=transforms.ToTensor()(video[-1])
 
24
  import torch.nn.functional as F
25
  from torchvision import transforms
26
 
27
+ import spaces
28
  from huggingface_hub import snapshot_download
29
 
30
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
 
122
 
123
  return pipe
124
 
125
+ @spaces.GPU()#duration=120
126
  def inference(source_images,
127
  target_images,
128
  text_prompt, negative_prompt,
 
133
  pipe.vae.to(DEVICE)
134
  pipe.transformer.to(DEVICE)
135
  pipe.controlnet_transformer.to(DEVICE)
136
+ print("pipe.vae/transformer/controlnet移动到GPU里")
137
 
138
  source_pixel_values = source_images/127.5 - 1.0
139
  source_pixel_values = source_pixel_values.to(torch.float16).to(DEVICE)
 
162
  image_latents = None
163
  latents = source_latents
164
 
165
+ print("准备调用pipe()函数")
166
  video = pipe(
167
  prompt = text_prompt,
168
  negative_prompt = negative_prompt,
 
209
 
210
  video:List[PIL.Image.Image]=[]
211
 
212
+ print("所有事都干完了,准备inference。")
213
  for i in progress.tqdm(range(video_shard)):
214
  if i>0: #first frame guidence
215
  first_frame=transforms.ToTensor()(video[-1])