Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import json | |
| from datetime import datetime | |
| from theme import TufteInspired | |
| import glob | |
| import os | |
| import uuid | |
| from pathlib import Path | |
| from huggingface_hub import InferenceClient | |
| from openai import OpenAI | |
| from huggingface_hub import get_token | |
| from huggingface_hub import CommitScheduler, hf_hub_download, login | |
| from prompts import detailed_genre_description_prompt, basic_prompt | |
| import random | |
| # TODOs | |
| # 1. Add a login button | |
| # 2. Prompt library expand | |
| # 3. log user if logged in | |
| client = OpenAI( | |
| base_url="https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct/v1", | |
| api_key=get_token(), | |
| ) | |
| def generate_prompt(): | |
| if random.choice([True, False]): | |
| return detailed_genre_description_prompt() | |
| else: | |
| return basic_prompt() | |
| def generate_blurb(): | |
| max_tokens = random.randint(100, 1000) | |
| prompt = generate_prompt() | |
| print(prompt) | |
| chat_completion = client.chat.completions.create( | |
| model="tgi", | |
| messages=[ | |
| {"role": "user", "content": prompt}, | |
| ], | |
| stream=True, | |
| max_tokens=max_tokens, | |
| ) | |
| full_text = "" | |
| for message in chat_completion: | |
| full_text += message.choices[0].delta.content | |
| yield full_text | |
| # Function to log blurb and vote | |
| def log_blurb_and_vote(blurb, vote): | |
| log_entry = {"timestamp": datetime.now().isoformat(), "blurb": blurb, "vote": vote} | |
| with open("blurb_log.jsonl", "a") as f: | |
| f.write(json.dumps(log_entry) + "\n") | |
| gr.Info("Thank you for voting!") | |
| return f"Logged: {vote}" | |
| # Create custom theme | |
| tufte_theme = TufteInspired() | |
| # Create Gradio interface | |
| with gr.Blocks(theme=tufte_theme) as demo: | |
| gr.Markdown("<h1 style='text-align: center;'>Would you read this book?</h1>") | |
| gr.Markdown( | |
| """<p style='text-align: center;'>Looking for your next summer read? | |
| Would you read a book based on this LLM generated blurb? <br> Your vote will be added to <a href="https://example.com">this</a> Hugging Face dataset</p>""" | |
| + """""" | |
| ) | |
| # gr.LoginButton(size="sm") | |
| user_name = gr.Textbox(label="User Name", placeholder="Enter your name") | |
| with gr.Row(): | |
| generate_btn = gr.Button("Create a book", variant="primary") | |
| blurb_output = gr.Markdown(label="Book blurb") | |
| with gr.Row(visible=False) as voting_row: | |
| upvote_btn = gr.Button("π would read") | |
| downvote_btn = gr.Button("π wouldn't read") | |
| vote_output = gr.Textbox(label="Vote Status", interactive=False, visible=False) | |
| def show_voting_buttons(blurb): | |
| return blurb, gr.Row(visible=True) | |
| generate_btn.click(generate_blurb, outputs=blurb_output).then( | |
| show_voting_buttons, inputs=blurb_output, outputs=[blurb_output, voting_row] | |
| ) | |
| upvote_btn.click( | |
| lambda x: log_blurb_and_vote(x, "upvote"), | |
| inputs=blurb_output, | |
| outputs=vote_output, | |
| ) | |
| downvote_btn.click( | |
| lambda x: log_blurb_and_vote(x, "downvote"), | |
| inputs=blurb_output, | |
| outputs=vote_output, | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |