Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| # Define available models (update with your actual model IDs) | |
| model_list = { | |
| "Safe LM": "HuggingFaceH4/zephyr-7b-beta", # Replace with your Safe LM model ID | |
| "Baseline 1": "HuggingFaceH4/zephyr-7b-beta", | |
| "Another Model": "HuggingFaceH4/zephyr-7b-beta" | |
| } | |
| def respond(message, history, system_message, max_tokens, temperature, top_p, selected_model): | |
| try: | |
| # Create an InferenceClient for the selected model | |
| client = InferenceClient(model_list.get(selected_model, "HuggingFaceH4/zephyr-7b-beta")) | |
| # Build conversation messages for the client | |
| messages = [{"role": "system", "content": system_message}] | |
| for user_msg, assistant_msg in history: | |
| if user_msg: # Only add non-empty messages | |
| messages.append({"role": "user", "content": user_msg}) | |
| if assistant_msg: # Only add non-empty messages | |
| messages.append({"role": "assistant", "content": assistant_msg}) | |
| messages.append({"role": "user", "content": message}) | |
| response = "" | |
| # Stream the response from the client | |
| for token_message in client.chat_completion( | |
| messages, | |
| max_tokens=max_tokens, | |
| stream=True, | |
| temperature=temperature, | |
| top_p=top_p, | |
| ): | |
| # Safe extraction of token with error handling | |
| try: | |
| token = token_message.choices[0].delta.content | |
| if token is not None: # Handle potential None values | |
| response += token | |
| yield response | |
| except (AttributeError, IndexError) as e: | |
| # Handle cases where token structure might be different | |
| print(f"Error extracting token: {e}") | |
| continue | |
| except Exception as e: | |
| # Return error message if the model call fails | |
| print(f"Error calling model API: {e}") | |
| yield f"Sorry, there was an error: {str(e)}" | |
| # Custom CSS for styling | |
| css = """ | |
| body { | |
| background-color: #f0f5fb; /* Light pastel blue background */ | |
| } | |
| .gradio-container { | |
| background-color: white; | |
| border-radius: 16px; | |
| box-shadow: 0 2px 10px rgba(0,0,0,0.05); | |
| max-width: 90%; | |
| margin: 15px auto; | |
| padding-bottom: 20px; | |
| } | |
| /* Header styling with diagonal shield */ | |
| .app-header { | |
| position: relative; | |
| overflow: hidden; | |
| } | |
| .app-header::before { | |
| content: "🛡️"; | |
| position: absolute; | |
| font-size: 100px; | |
| opacity: 0.1; | |
| right: -20px; | |
| top: -30px; | |
| transform: rotate(15deg); | |
| pointer-events: none; | |
| } | |
| /* Simple styling for buttons */ | |
| #send-btn { | |
| background-color: white !important; | |
| color: #333 !important; | |
| border: 2px solid #e6c200 !important; | |
| } | |
| #send-btn:hover { | |
| background-color: #fff9e6 !important; | |
| } | |
| #clear-btn { | |
| background-color: white !important; | |
| color: #333 !important; | |
| border: 2px solid #e6c200 !important; | |
| } | |
| #clear-btn:hover { | |
| background-color: #fff9e6 !important; | |
| } | |
| /* Hide elements */ | |
| footer { | |
| display: none !important; | |
| } | |
| .footer { | |
| display: none !important; | |
| } | |
| """ | |
| with gr.Blocks(css=css) as demo: | |
| # Custom header with branding | |
| gr.HTML(""" | |
| <div class="app-header" style="background: linear-gradient(135deg, #4a90e2, #75c6ef); padding: 15px; border-radius: 16px 16px 0 0; color: white; border-bottom: 3px solid #e6c200;"> | |
| <h1 style="font-size: 24px; font-weight: 600; margin: 0; display: flex; align-items: center; font-family: 'Palatino', serif;"> | |
| <span style="margin-right: 10px; font-size: 22px;">🛡️</span> | |
| <span style="font-weight: 700; margin-right: 5px;">Safe</span> | |
| <span style="font-weight: 400; letter-spacing: 1px;">Playground</span> | |
| </h1> | |
| <p style="font-size: 14px; opacity: 0.9; margin-top: 5px;">Responsible AI for everyone</p> | |
| </div> | |
| """) | |
| with gr.Row(): | |
| # Left sidebar: Model selector | |
| with gr.Column(scale=1): | |
| gr.Markdown("## Models") | |
| model_dropdown = gr.Dropdown( | |
| choices=list(model_list.keys()), | |
| label="Select Model", | |
| value="Safe LM", | |
| elem_classes=["model-select"] | |
| ) | |
| # Settings | |
| gr.Markdown("### Settings") | |
| system_message = gr.Textbox( | |
| value="You are a friendly and safe assistant.", | |
| label="System Message", | |
| lines=2 | |
| ) | |
| max_tokens_slider = gr.Slider( | |
| minimum=1, maximum=2048, value=512, step=1, | |
| label="Max New Tokens" | |
| ) | |
| temperature_slider = gr.Slider( | |
| minimum=0.1, maximum=4.0, value=0.7, step=0.1, | |
| label="Temperature" | |
| ) | |
| top_p_slider = gr.Slider( | |
| minimum=0.1, maximum=1.0, value=0.95, step=0.05, | |
| label="Top-p (nucleus sampling)" | |
| ) | |
| # Main area: Chat interface | |
| with gr.Column(scale=3): | |
| chatbot = gr.Chatbot( | |
| label="Conversation", | |
| show_label=True, | |
| height=400 | |
| ) | |
| with gr.Row(): | |
| user_input = gr.Textbox( | |
| placeholder="Type your message here...", | |
| label="Your Message", | |
| show_label=False, | |
| scale=9 | |
| ) | |
| send_button = gr.Button( | |
| "Send", | |
| scale=1, | |
| elem_id="send-btn" | |
| ) | |
| with gr.Row(): | |
| clear_button = gr.Button("Clear Chat", elem_id="clear-btn") | |
| # Fix 1: Correct event handling for the chatbot interface | |
| def user(user_message, history): | |
| # Return the user's message and add it to history | |
| # No emoji here since we'll add it during display | |
| return "", history + [[user_message, None]] | |
| def bot(history, system_message, max_tokens, temperature, top_p, selected_model): | |
| # Ensure there's history | |
| if not history or len(history) == 0: | |
| return history | |
| # Get the last user message from history | |
| user_message = history[-1][0] | |
| # Call respond function with the message | |
| response_generator = respond( | |
| user_message, | |
| history[:-1], # Pass history without the current message | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| selected_model | |
| ) | |
| # Update history as responses come in | |
| for response in response_generator: | |
| history[-1][1] = response | |
| yield history | |
| # Add a function to display history with emojis | |
| def display_with_emojis(history): | |
| if history is None or len(history) == 0: | |
| return [] | |
| # Create a new history with emojis added | |
| history_with_emojis = [] | |
| for user_msg, assistant_msg in history: | |
| # Add the user emoji to the user message (only if it doesn't already have one) | |
| if user_msg: | |
| if not user_msg.startswith("👤 "): | |
| user_msg_with_emoji = f"👤 {user_msg}" | |
| else: | |
| user_msg_with_emoji = user_msg | |
| else: | |
| user_msg_with_emoji = user_msg | |
| # Add the assistant emoji to the assistant message (only if it doesn't already have one) | |
| if assistant_msg: | |
| if not assistant_msg.startswith("🛡️ "): | |
| assistant_msg_with_emoji = f"🛡️ {assistant_msg}" | |
| else: | |
| assistant_msg_with_emoji = assistant_msg | |
| else: | |
| assistant_msg_with_emoji = assistant_msg | |
| # Add the modified messages to the new history | |
| history_with_emojis.append([user_msg_with_emoji, assistant_msg_with_emoji]) | |
| return history_with_emojis | |
| # Wire up the event chain with emoji display | |
| user_input.submit( | |
| user, | |
| [user_input, chatbot], | |
| [user_input, chatbot], | |
| queue=False | |
| ).then( | |
| bot, | |
| [chatbot, system_message, max_tokens_slider, temperature_slider, top_p_slider, model_dropdown], | |
| [chatbot], | |
| queue=True | |
| ).then( | |
| display_with_emojis, # Add the emoji processing step | |
| [chatbot], | |
| [chatbot] | |
| ) | |
| send_button.click( | |
| user, | |
| [user_input, chatbot], | |
| [user_input, chatbot], | |
| queue=False | |
| ).then( | |
| bot, | |
| [chatbot, system_message, max_tokens_slider, temperature_slider, top_p_slider, model_dropdown], | |
| [chatbot], | |
| queue=True | |
| ).then( | |
| display_with_emojis, # Add the emoji processing step | |
| [chatbot], | |
| [chatbot] | |
| ) | |
| # Clear the chat history - using a proper function | |
| def clear_history(): | |
| return [] | |
| clear_button.click(clear_history, None, chatbot, queue=False) | |
| if __name__ == "__main__": | |
| demo.launch() |