safe-playground / app.py
pratyushmaini's picture
Update app.py
f53a587
raw
history blame
10.9 kB
import gradio as gr
from huggingface_hub import InferenceClient
# Define available models (update with your actual model IDs)
model_list = {
"Safe LM": "HuggingFaceH4/zephyr-7b-beta", # Replace with your Safe LM model ID
"Baseline 1": "HuggingFaceH4/zephyr-7b-beta",
"Another Model": "HuggingFaceH4/zephyr-7b-beta"
}
def respond(message, history, system_message, max_tokens, temperature, top_p, selected_model):
try:
# Create an InferenceClient for the selected model
client = InferenceClient(model_list.get(selected_model, "HuggingFaceH4/zephyr-7b-beta"))
# Build conversation messages for the client
messages = [{"role": "system", "content": system_message}]
for user_msg, assistant_msg in history:
if user_msg: # Only add non-empty messages
messages.append({"role": "user", "content": user_msg})
if assistant_msg: # Only add non-empty messages
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": message})
response = ""
# Stream the response from the client
for token_message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
# Safe extraction of token with error handling
try:
token = token_message.choices[0].delta.content
if token is not None: # Handle potential None values
response += token
yield response
except (AttributeError, IndexError) as e:
# Handle cases where token structure might be different
print(f"Error extracting token: {e}")
continue
except Exception as e:
# Return error message if the model call fails
print(f"Error calling model API: {e}")
yield f"Sorry, there was an error: {str(e)}"
# Custom CSS for branding with consistent styling and hiding footer
css = """
@import url('https://fonts.googleapis.com/css2?family=Crimson+Pro:wght@400;500;600;700&display=swap');
body {
background-color: #f0f5fb; /* Light pastel blue background */
font-family: 'Crimson Pro', 'Palatino', serif;
max-width: 100%;
overflow-x: hidden;
}
.gradio-container {
background-color: #FFFFFF;
border-radius: 16px;
box-shadow: 0 3px 10px rgba(0,0,0,0.05);
max-width: 90%;
margin: 10px auto;
}
.app-header {
background: linear-gradient(135deg, #4a90e2, #75c6ef);
padding: 15px;
border-radius: 16px 16px 0 0;
position: relative;
color: white;
overflow: hidden;
border-bottom: 3px solid #e6c200;
}
.app-header::before {
content: "🛡️";
position: absolute;
font-size: 100px;
opacity: 0.1;
right: -20px;
top: -30px;
transform: rotate(15deg);
}
.app-title {
font-size: 24px;
font-weight: 600;
margin: 0;
display: flex;
align-items: center;
font-family: 'Crimson Pro', serif;
}
.app-title .safe {
font-weight: 700;
margin-right: 5px;
}
.app-title .lm {
font-weight: 400;
letter-spacing: 1px;
}
.app-title .shield {
margin-right: 10px;
font-size: 22px;
}
.app-subtitle {
font-size: 14px;
opacity: 0.9;
margin-top: 5px;
}
/* Send and Clear Chat buttons with gold accents */
button[aria-label="Send"], button[aria-label="Clear Chat"] {
background: white !important;
color: #333 !important;
border: 2px solid #e6c200 !important;
border-radius: 8px !important;
padding: 8px 16px !important;
font-weight: 600 !important;
transition: all 0.3s ease !important;
font-family: 'Crimson Pro', serif !important;
}
button[aria-label="Send"]:hover, button[aria-label="Clear Chat"]:hover {
background: #fff9e6 !important;
transform: translateY(-1px) !important;
box-shadow: 0 3px 6px rgba(230, 194, 0, 0.2) !important;
}
/* Other buttons remain with blue gradient */
button:not([aria-label="Send"]):not([aria-label="Clear Chat"]), .gradio-button:not([aria-label="Send"]):not([aria-label="Clear Chat"]) {
background: linear-gradient(to right, #4a90e2, #75c6ef) !important;
color: white !important;
border: none !important;
border-radius: 8px !important;
padding: 8px 16px !important;
font-weight: 600 !important;
transition: all 0.3s ease !important;
font-family: 'Crimson Pro', serif !important;
}
button:not([aria-label="Send"]):not([aria-label="Clear Chat"]):hover, .gradio-button:not([aria-label="Send"]):not([aria-label="Clear Chat"]):hover {
transform: translateY(-1px) !important;
box-shadow: 0 3px 6px rgba(74, 144, 226, 0.2) !important;
}
.gradio-dropdown, .gradio-slider, .gradio-textbox, .gradio-checkbox {
border-radius: 8px !important;
border: 1px solid #e1e5eb !important;
font-family: 'Crimson Pro', serif !important;
}
.model-select {
border-left: 3px solid #e6c200 !important;
padding-left: 8px !important;
}
/* Clean chat styling without background colors */
.chatbot .user-message::before {
content: "👤 ";
font-size: 16px;
}
.chatbot .assistant-message::before {
content: "🛡️ ";
font-size: 16px;
}
.chatbot .user-message, .chatbot .assistant-message {
background-color: transparent !important;
border-radius: 0 !important;
padding: 10px !important;
border-bottom: 1px solid #f0f0f0 !important;
font-family: 'Crimson Pro', serif !important;
font-size: 16px !important;
}
/* Remove the blue background behind message text */
.chatbot .user-message p, .chatbot .assistant-message p {
background-color: transparent !important;
}
.chatbot .user-message {
color: #333333 !important;
}
.chatbot .assistant-message {
color: #1a73e8 !important;
}
#chatbot {
height: 400px !important;
overflow-y: auto !important;
border: 1px solid #e1e5eb !important;
border-radius: 8px !important;
background-color: #ffffff !important;
}
/* Hide Gradio branding and footer */
footer, .footer {
display: none !important;
}
.gradio-footer {
display: none !important;
}
.gradio-container .footer {
display: none !important;
}
.gradio-container .footer-logo {
display: none !important;
}
div.gradio-footer {
display: none !important;
}
.footer-message {
display: none !important;
}
button[aria-label="Use via API"] {
display: none !important;
}
"""
with gr.Blocks(css=css) as demo:
# Custom header with branding - removed the hr element
gr.HTML("""
<div class="app-header">
<h1 class="app-title">
<span class="shield">🛡️</span>
<span class="safe">Safe</span>
<span class="lm">Playground</span>
</h1>
<p class="app-subtitle">Responsible AI for everyone</p>
</div>
""")
with gr.Row():
# Left sidebar: Model selector
with gr.Column(scale=1):
gr.Markdown("## Models")
model_dropdown = gr.Dropdown(
choices=list(model_list.keys()),
label="Select Model",
value="Safe LM",
elem_classes=["model-select"]
)
# Settings
gr.Markdown("### Settings")
system_message = gr.Textbox(
value="You are a friendly and safe assistant.",
label="System Message",
lines=2
)
max_tokens_slider = gr.Slider(
minimum=1, maximum=2048, value=512, step=1,
label="Max New Tokens"
)
temperature_slider = gr.Slider(
minimum=0.1, maximum=4.0, value=0.7, step=0.1,
label="Temperature"
)
top_p_slider = gr.Slider(
minimum=0.1, maximum=1.0, value=0.95, step=0.05,
label="Top-p (nucleus sampling)"
)
# Main area: Chat interface
with gr.Column(scale=3):
chatbot = gr.Chatbot(
label="Conversation",
show_label=True,
# Removed avatar_images which may not be supported in some Gradio versions
elem_id="chatbot",
height=500
)
with gr.Row():
user_input = gr.Textbox(
placeholder="Type your message here...",
label="Your Message",
show_label=False,
scale=9
)
send_button = gr.Button(
"Send",
scale=1,
variant="primary"
)
with gr.Row():
clear_button = gr.Button("Clear Chat")
# No footer - removed
gr.HTML('<div style="height: 20px;"></div>')
# Fix 1: Correct event handling for the chatbot interface
def user(user_message, history):
# Return the user's message and add it to history
return "", history + [[user_message, None]]
def bot(history, system_message, max_tokens, temperature, top_p, selected_model):
# Get the last user message from history (with error checking)
if not history or len(history) == 0:
return history
user_message = history[-1][0]
# Call respond function with the message
response_generator = respond(
user_message,
history[:-1], # Pass history without the current message
system_message,
max_tokens,
temperature,
top_p,
selected_model
)
# Update history as responses come in
for response in response_generator:
history[-1][1] = response
yield history
# Wire up the event chain - use queue=True for the bot responses
user_input.submit(
user,
[user_input, chatbot],
[user_input, chatbot],
queue=False
).then(
bot,
[chatbot, system_message, max_tokens_slider, temperature_slider, top_p_slider, model_dropdown],
[chatbot],
queue=True
)
send_button.click(
user,
[user_input, chatbot],
[user_input, chatbot],
queue=False
).then(
bot,
[chatbot, system_message, max_tokens_slider, temperature_slider, top_p_slider, model_dropdown],
[chatbot],
queue=True
)
# Clear the chat history - use a proper function instead of lambda
def clear_history():
return []
clear_button.click(clear_history, None, chatbot, queue=False)
if __name__ == "__main__":
# Simple launch without parameters that might not be supported
demo.launch()