Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -31,7 +31,7 @@ from torchvision.ops import nms
|
|
| 31 |
|
| 32 |
|
| 33 |
from transformers import (AutoTokenizer, CLIPTextModelWithProjection)
|
| 34 |
-
from transformers import (AutoProcessor, CLIPVisionModelWithProjection)
|
| 35 |
|
| 36 |
BOUNDING_BOX_ANNOTATOR = sv.BoundingBoxAnnotator(thickness=2)
|
| 37 |
MASK_ANNOTATOR = sv.MaskAnnotator()
|
|
@@ -298,7 +298,7 @@ if __name__ == '__main__':
|
|
| 298 |
# init vision encoder
|
| 299 |
clip_model = "openai/clip-vit-base-patch32"
|
| 300 |
vision_model = CLIPVisionModelWithProjection.from_pretrained(clip_model)
|
| 301 |
-
processor =
|
| 302 |
device = 'cuda'
|
| 303 |
vision_model.to(device)
|
| 304 |
|
|
|
|
| 31 |
|
| 32 |
|
| 33 |
from transformers import (AutoTokenizer, CLIPTextModelWithProjection)
|
| 34 |
+
from transformers import (AutoProcessor, CLIPImageProcessor, CLIPVisionModelWithProjection)
|
| 35 |
|
| 36 |
BOUNDING_BOX_ANNOTATOR = sv.BoundingBoxAnnotator(thickness=2)
|
| 37 |
MASK_ANNOTATOR = sv.MaskAnnotator()
|
|
|
|
| 298 |
# init vision encoder
|
| 299 |
clip_model = "openai/clip-vit-base-patch32"
|
| 300 |
vision_model = CLIPVisionModelWithProjection.from_pretrained(clip_model)
|
| 301 |
+
processor = CLIPImageProcessor.from_pretrained(clip_model)
|
| 302 |
device = 'cuda'
|
| 303 |
vision_model.to(device)
|
| 304 |
|