initial commit
Browse files
app.py
CHANGED
|
@@ -1,10 +1,8 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from transformers import
|
| 3 |
from PIL import Image
|
| 4 |
import torch
|
| 5 |
|
| 6 |
-
processor = AutoProcessor.from_pretrained("microsoft/git-base-vqav2")
|
| 7 |
-
model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-vqav2")
|
| 8 |
model_path = "microsoft/git-base-vqav2"
|
| 9 |
dataset_name = "Multimodal-Fatima/OK-VQA_train"
|
| 10 |
|
|
@@ -19,8 +17,7 @@ questions = ["What can happen the objects shown are thrown on the ground?",
|
|
| 19 |
"Where can that toilet seat be bought?",
|
| 20 |
"What do you call the kind of pants that the man on the right is wearing?"]
|
| 21 |
|
| 22 |
-
|
| 23 |
-
model = AutoModelForVisualQuestionAnswering.from_pretrained(model_path)
|
| 24 |
|
| 25 |
|
| 26 |
def main(select_exemple_num):
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from transformers import Blip2ForVisualQuestionAnswering, Blip2Config, AutoTokenizer
|
| 3 |
from PIL import Image
|
| 4 |
import torch
|
| 5 |
|
|
|
|
|
|
|
| 6 |
model_path = "microsoft/git-base-vqav2"
|
| 7 |
dataset_name = "Multimodal-Fatima/OK-VQA_train"
|
| 8 |
|
|
|
|
| 17 |
"Where can that toilet seat be bought?",
|
| 18 |
"What do you call the kind of pants that the man on the right is wearing?"]
|
| 19 |
|
| 20 |
+
model = Blip2ForVisualQuestionAnswering.from_pretrained(model_path)
|
|
|
|
| 21 |
|
| 22 |
|
| 23 |
def main(select_exemple_num):
|