Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,34 +1,42 @@
|
|
| 1 |
-
|
| 2 |
import gradio as gr
|
| 3 |
import json
|
|
|
|
| 4 |
from datetime import datetime
|
| 5 |
from theme import TufteInspired
|
| 6 |
-
from transformers import AutoTokenizer
|
| 7 |
-
from transformers import pipeline
|
| 8 |
-
import torch
|
| 9 |
-
|
| 10 |
from huggingface_hub import login
|
|
|
|
| 11 |
import os
|
|
|
|
|
|
|
| 12 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 13 |
login(HF_TOKEN)
|
| 14 |
|
| 15 |
-
#
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
pipeline = pipeline(
|
| 20 |
-
"text-generation",
|
| 21 |
-
model=model_id,
|
| 22 |
-
model_kwargs={"torch_dtype": torch.bfloat16},
|
| 23 |
-
device="cuda",
|
| 24 |
-
)
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
-
|
| 29 |
def generate_blurb():
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
# Function to log blurb and vote
|
| 34 |
def log_blurb_and_vote(blurb, vote):
|
|
@@ -47,12 +55,12 @@ tufte_theme = TufteInspired()
|
|
| 47 |
# Create Gradio interface
|
| 48 |
with gr.Blocks(theme=tufte_theme) as demo:
|
| 49 |
gr.Markdown("<h1 style='text-align: center;'>Would you read it?</h1>")
|
| 50 |
-
gr.Markdown("Click the button to generate a blurb for a made-up book, then vote on its quality.")
|
| 51 |
|
| 52 |
with gr.Row():
|
| 53 |
generate_btn = gr.Button("Write a Blurb", variant="primary")
|
| 54 |
|
| 55 |
-
blurb_output = gr.Textbox(label="Generated Blurb", lines=
|
| 56 |
|
| 57 |
with gr.Row():
|
| 58 |
upvote_btn = gr.Button("π would read")
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import json
|
| 3 |
+
import random
|
| 4 |
from datetime import datetime
|
| 5 |
from theme import TufteInspired
|
| 6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
|
|
|
|
|
|
| 7 |
from huggingface_hub import login
|
| 8 |
+
import torch
|
| 9 |
import os
|
| 10 |
+
import spaces
|
| 11 |
+
|
| 12 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 13 |
login(HF_TOKEN)
|
| 14 |
|
| 15 |
+
# List of models to choose from
|
| 16 |
+
model_list = [
|
| 17 |
+
"meta-llama/Llama-2-7b-chat-hf",
|
| 18 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
+
# Function to load a random model
|
| 21 |
+
@spaces.GPU(duration=120) # Allowing extra time for model loading
|
| 22 |
+
def load_random_model():
|
| 23 |
+
model_id = random.choice(model_list)
|
| 24 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, add_special_tokens=True)
|
| 25 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
|
| 26 |
+
return model_id, model, tokenizer
|
| 27 |
|
| 28 |
|
| 29 |
+
@spaces.GPU
|
| 30 |
def generate_blurb():
|
| 31 |
+
model_id, model, tokenizer = load_random_model()
|
| 32 |
+
prompt = "Write a blurb for a made-up book:"
|
| 33 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 34 |
+
|
| 35 |
+
with torch.no_grad():
|
| 36 |
+
outputs = model.generate(**inputs, max_length=200, num_return_sequences=1)
|
| 37 |
+
|
| 38 |
+
blurb = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 39 |
+
return f"Model used: {model_id}\n\nBlurb: {blurb}"
|
| 40 |
|
| 41 |
# Function to log blurb and vote
|
| 42 |
def log_blurb_and_vote(blurb, vote):
|
|
|
|
| 55 |
# Create Gradio interface
|
| 56 |
with gr.Blocks(theme=tufte_theme) as demo:
|
| 57 |
gr.Markdown("<h1 style='text-align: center;'>Would you read it?</h1>")
|
| 58 |
+
gr.Markdown("Click the button to generate a blurb for a made-up book using a random model, then vote on its quality.")
|
| 59 |
|
| 60 |
with gr.Row():
|
| 61 |
generate_btn = gr.Button("Write a Blurb", variant="primary")
|
| 62 |
|
| 63 |
+
blurb_output = gr.Textbox(label="Generated Blurb", lines=8, interactive=False)
|
| 64 |
|
| 65 |
with gr.Row():
|
| 66 |
upvote_btn = gr.Button("π would read")
|