Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -322,10 +322,10 @@ def motionllm(
|
|
| 322 |
sample = {"instruction": prompt, "input": input_video_path}
|
| 323 |
|
| 324 |
prefix = generate_prompt_mlp(sample)
|
| 325 |
-
pre = torch.cat((tokenizer.encode(prefix.split('INPUT_VIDEO: ')[0] + "\n", bos=True, eos=False, device=
|
| 326 |
|
| 327 |
prompt = (pre, ". ASSISTANT: ")
|
| 328 |
-
encoded = (prompt[0], video_feature[0], tokenizer.encode(prompt[1], bos=False, eos=False, device=
|
| 329 |
|
| 330 |
t0 = time.perf_counter()
|
| 331 |
|
|
@@ -565,7 +565,7 @@ print('Load mlp model again from', mlp_path)
|
|
| 565 |
print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
|
| 566 |
|
| 567 |
model.eval()
|
| 568 |
-
model =
|
| 569 |
linear_proj.eval()
|
| 570 |
|
| 571 |
tokenizer = Tokenizer(tokenizer_llm_path)
|
|
|
|
| 322 |
sample = {"instruction": prompt, "input": input_video_path}
|
| 323 |
|
| 324 |
prefix = generate_prompt_mlp(sample)
|
| 325 |
+
pre = torch.cat((tokenizer.encode(prefix.split('INPUT_VIDEO: ')[0] + "\n", bos=True, eos=False, device=device).view(1, -1), tokenizer.encode("INPUT_VIDEO: ", bos=False, eos=False, device=device).view(1, -1)), dim=1)
|
| 326 |
|
| 327 |
prompt = (pre, ". ASSISTANT: ")
|
| 328 |
+
encoded = (prompt[0], video_feature[0], tokenizer.encode(prompt[1], bos=False, eos=False, device=device).view(1, -1))
|
| 329 |
|
| 330 |
t0 = time.perf_counter()
|
| 331 |
|
|
|
|
| 565 |
print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr)
|
| 566 |
|
| 567 |
model.eval()
|
| 568 |
+
model = model.cuda()
|
| 569 |
linear_proj.eval()
|
| 570 |
|
| 571 |
tokenizer = Tokenizer(tokenizer_llm_path)
|