Spaces:
Running
on
Zero
Running
on
Zero
Remove pytorch3d
Browse files- app.py +8 -9
- requirements.txt +4 -4
app.py
CHANGED
|
@@ -5,8 +5,7 @@ import clip
|
|
| 5 |
import gradio as gr
|
| 6 |
from gradio_rerun import Rerun
|
| 7 |
import numpy as np
|
| 8 |
-
|
| 9 |
-
from pytorch3d.structures import Meshes
|
| 10 |
import rerun as rr
|
| 11 |
import torch
|
| 12 |
|
|
@@ -79,13 +78,13 @@ def get_normals(vertices: torch.Tensor, faces: torch.Tensor) -> torch.Tensor:
|
|
| 79 |
num_frames, num_faces = vertices.shape[0], faces.shape[-2]
|
| 80 |
faces = faces.expand(num_frames, num_faces, 3)
|
| 81 |
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
normals =
|
| 87 |
|
| 88 |
-
return normals
|
| 89 |
|
| 90 |
|
| 91 |
def generate(
|
|
@@ -118,7 +117,7 @@ def generate(
|
|
| 118 |
padded_vertices = out["char_raw"]["char_vertices"][0]
|
| 119 |
vertices = padded_vertices[padding_mask]
|
| 120 |
faces = out["char_raw"]["char_faces"][0]
|
| 121 |
-
normals
|
| 122 |
fx, fy, cx, cy = out["intrinsics"][0].cpu().numpy()
|
| 123 |
K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
|
| 124 |
caption = out["caption_raw"][0]
|
|
|
|
| 5 |
import gradio as gr
|
| 6 |
from gradio_rerun import Rerun
|
| 7 |
import numpy as np
|
| 8 |
+
import trimesh
|
|
|
|
| 9 |
import rerun as rr
|
| 10 |
import torch
|
| 11 |
|
|
|
|
| 78 |
num_frames, num_faces = vertices.shape[0], faces.shape[-2]
|
| 79 |
faces = faces.expand(num_frames, num_faces, 3)
|
| 80 |
|
| 81 |
+
normals = [
|
| 82 |
+
trimesh.Trimesh(vertices=v, faces=f, process=False).vertex_normals
|
| 83 |
+
for v, f in zip(vertices, faces)
|
| 84 |
+
]
|
| 85 |
+
normals = torch.from_numpy(np.stack(normals))
|
| 86 |
|
| 87 |
+
return normals
|
| 88 |
|
| 89 |
|
| 90 |
def generate(
|
|
|
|
| 117 |
padded_vertices = out["char_raw"]["char_vertices"][0]
|
| 118 |
vertices = padded_vertices[padding_mask]
|
| 119 |
faces = out["char_raw"]["char_faces"][0]
|
| 120 |
+
normals = get_normals(vertices, faces)
|
| 121 |
fx, fy, cx, cy = out["intrinsics"][0].cpu().numpy()
|
| 122 |
K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
|
| 123 |
caption = out["caption_raw"][0]
|
requirements.txt
CHANGED
|
@@ -9,8 +9,8 @@ scikit-learn
|
|
| 9 |
fvcore
|
| 10 |
iopath
|
| 11 |
ema_pytorch
|
| 12 |
-
ftfy
|
| 13 |
-
regex
|
| 14 |
tqdm
|
| 15 |
-
|
| 16 |
-
git+https://github.com/
|
|
|
|
| 9 |
fvcore
|
| 10 |
iopath
|
| 11 |
ema_pytorch
|
| 12 |
+
ftfy
|
| 13 |
+
regex
|
| 14 |
tqdm
|
| 15 |
+
trimesh
|
| 16 |
+
git+https://github.com/openai/CLIP.git
|