File size: 10,925 Bytes
a90f37e
 
 
2931efa
 
 
420b66d
2931efa
 
a90f37e
2931efa
856a204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a90f37e
f545f9a
2931efa
31142c0
420b66d
6cac78c
f53a587
31142c0
420b66d
 
6cac78c
 
 
 
420b66d
 
 
6cac78c
 
420b66d
 
6cac78c
 
 
 
31142c0
6cac78c
 
420b66d
6cac78c
420b66d
6cac78c
 
 
 
 
 
420b66d
 
6cac78c
 
 
31142c0
6cac78c
 
31142c0
6cac78c
 
 
31142c0
6cac78c
 
 
 
420b66d
6cac78c
 
 
 
 
 
f53a587
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420b66d
6cac78c
 
420b66d
 
6cac78c
 
31142c0
6cac78c
f53a587
420b66d
 
6cac78c
 
420b66d
6cac78c
31142c0
6cac78c
 
420b66d
 
 
31142c0
 
 
 
 
420b66d
 
31142c0
 
 
 
 
 
 
 
 
420b66d
f53a587
 
 
 
420b66d
31142c0
420b66d
 
31142c0
420b66d
 
 
 
31142c0
 
 
6cac78c
f545f9a
 
31142c0
6cac78c
f545f9a
31142c0
420b66d
f545f9a
31142c0
 
f545f9a
 
 
 
 
 
 
 
 
 
 
 
 
31142c0
420b66d
a90f37e
 
2931efa
31142c0
72c3fa0
 
 
420b66d
72c3fa0
 
 
 
 
 
6cac78c
2931efa
 
 
 
 
 
 
6cac78c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2931efa
6cac78c
 
2931efa
6cac78c
 
 
856a204
 
6cac78c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2931efa
 
 
420b66d
 
6cac78c
 
 
 
 
 
 
856a204
 
 
6cac78c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
856a204
6cac78c
 
 
 
 
 
 
 
856a204
 
6cac78c
 
2931efa
6cac78c
 
 
 
 
 
 
856a204
 
2931efa
 
856a204
 
 
 
 
a90f37e
 
f545f9a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
import gradio as gr
from huggingface_hub import InferenceClient

# Define available models (update with your actual model IDs)
model_list = {
    "Safe LM": "HuggingFaceH4/zephyr-7b-beta",  # Replace with your Safe LM model ID
    "Baseline 1": "HuggingFaceH4/zephyr-7b-beta",
    "Another Model": "HuggingFaceH4/zephyr-7b-beta"
}

def respond(message, history, system_message, max_tokens, temperature, top_p, selected_model):
    try:
        # Create an InferenceClient for the selected model
        client = InferenceClient(model_list.get(selected_model, "HuggingFaceH4/zephyr-7b-beta"))
        
        # Build conversation messages for the client
        messages = [{"role": "system", "content": system_message}]
        for user_msg, assistant_msg in history:
            if user_msg:  # Only add non-empty messages
                messages.append({"role": "user", "content": user_msg})
            if assistant_msg:  # Only add non-empty messages
                messages.append({"role": "assistant", "content": assistant_msg})
        messages.append({"role": "user", "content": message})
        
        response = ""
        
        # Stream the response from the client
        for token_message in client.chat_completion(
            messages,
            max_tokens=max_tokens,
            stream=True,
            temperature=temperature,
            top_p=top_p,
        ):
            # Safe extraction of token with error handling
            try:
                token = token_message.choices[0].delta.content
                if token is not None:  # Handle potential None values
                    response += token
                    yield response
            except (AttributeError, IndexError) as e:
                # Handle cases where token structure might be different
                print(f"Error extracting token: {e}")
                continue
    except Exception as e:
        # Return error message if the model call fails
        print(f"Error calling model API: {e}")
        yield f"Sorry, there was an error: {str(e)}"

# Custom CSS for branding with consistent styling and hiding footer
css = """
@import url('https://fonts.googleapis.com/css2?family=Crimson+Pro:wght@400;500;600;700&display=swap');

body { 
    background-color: #f0f5fb; /* Light pastel blue background */
    font-family: 'Crimson Pro', 'Palatino', serif;
    max-width: 100%;
    overflow-x: hidden;
}
.gradio-container { 
    background-color: #FFFFFF; 
    border-radius: 16px; 
    box-shadow: 0 3px 10px rgba(0,0,0,0.05); 
    max-width: 90%;
    margin: 10px auto;
}
.app-header {
    background: linear-gradient(135deg, #4a90e2, #75c6ef);
    padding: 15px;
    border-radius: 16px 16px 0 0;
    position: relative;
    color: white;
    overflow: hidden;
    border-bottom: 3px solid #e6c200;
}
.app-header::before {
    content: "🛡️";
    position: absolute;
    font-size: 100px;
    opacity: 0.1;
    right: -20px;
    top: -30px;
    transform: rotate(15deg);
}
.app-title {
    font-size: 24px;
    font-weight: 600;
    margin: 0;
    display: flex;
    align-items: center;
    font-family: 'Crimson Pro', serif;
}
.app-title .safe {
    font-weight: 700;
    margin-right: 5px;
}
.app-title .lm {
    font-weight: 400;
    letter-spacing: 1px;
}
.app-title .shield {
    margin-right: 10px;
    font-size: 22px;
}
.app-subtitle {
    font-size: 14px;
    opacity: 0.9;
    margin-top: 5px;
}
/* Send and Clear Chat buttons with gold accents */
button[aria-label="Send"], button[aria-label="Clear Chat"] {
    background: white !important;
    color: #333 !important;
    border: 2px solid #e6c200 !important;
    border-radius: 8px !important;
    padding: 8px 16px !important;
    font-weight: 600 !important;
    transition: all 0.3s ease !important;
    font-family: 'Crimson Pro', serif !important;
}
button[aria-label="Send"]:hover, button[aria-label="Clear Chat"]:hover {
    background: #fff9e6 !important;
    transform: translateY(-1px) !important;
    box-shadow: 0 3px 6px rgba(230, 194, 0, 0.2) !important;
}
/* Other buttons remain with blue gradient */
button:not([aria-label="Send"]):not([aria-label="Clear Chat"]), .gradio-button:not([aria-label="Send"]):not([aria-label="Clear Chat"]) {
    background: linear-gradient(to right, #4a90e2, #75c6ef) !important;
    color: white !important;
    border: none !important;
    border-radius: 8px !important;
    padding: 8px 16px !important;
    font-weight: 600 !important;
    transition: all 0.3s ease !important;
    font-family: 'Crimson Pro', serif !important;
}
button:not([aria-label="Send"]):not([aria-label="Clear Chat"]):hover, .gradio-button:not([aria-label="Send"]):not([aria-label="Clear Chat"]):hover {
    transform: translateY(-1px) !important;
    box-shadow: 0 3px 6px rgba(74, 144, 226, 0.2) !important;
}
.gradio-dropdown, .gradio-slider, .gradio-textbox, .gradio-checkbox {
    border-radius: 8px !important;
    border: 1px solid #e1e5eb !important;
    font-family: 'Crimson Pro', serif !important;
}
.model-select {
    border-left: 3px solid #e6c200 !important;
    padding-left: 8px !important;
}
/* Clean chat styling without background colors */
.chatbot .user-message::before {
    content: "👤 ";
    font-size: 16px;
}
.chatbot .assistant-message::before {
    content: "🛡️ ";
    font-size: 16px;
}
.chatbot .user-message, .chatbot .assistant-message {
    background-color: transparent !important;
    border-radius: 0 !important;
    padding: 10px !important;
    border-bottom: 1px solid #f0f0f0 !important;
    font-family: 'Crimson Pro', serif !important;
    font-size: 16px !important;
}
/* Remove the blue background behind message text */
.chatbot .user-message p, .chatbot .assistant-message p {
    background-color: transparent !important;
}
.chatbot .user-message {
    color: #333333 !important;
}
.chatbot .assistant-message {
    color: #1a73e8 !important;
}
#chatbot {
    height: 400px !important;
    overflow-y: auto !important;
    border: 1px solid #e1e5eb !important;
    border-radius: 8px !important;
    background-color: #ffffff !important;
}
/* Hide Gradio branding and footer */
footer, .footer {
    display: none !important;
}
.gradio-footer {
    display: none !important;
}
.gradio-container .footer {
    display: none !important;
}
.gradio-container .footer-logo {
    display: none !important;
}
div.gradio-footer {
    display: none !important;
}
.footer-message {
    display: none !important;
}
button[aria-label="Use via API"] {
    display: none !important;
}



"""

with gr.Blocks(css=css) as demo:
    # Custom header with branding - removed the hr element
    gr.HTML("""
    <div class="app-header">
        <h1 class="app-title">
            <span class="shield">🛡️</span>
            <span class="safe">Safe</span>
            <span class="lm">Playground</span>
        </h1>
        <p class="app-subtitle">Responsible AI for everyone</p>
    </div>
    """)
        
    with gr.Row():
        # Left sidebar: Model selector
        with gr.Column(scale=1):
            gr.Markdown("## Models")
            model_dropdown = gr.Dropdown(
                choices=list(model_list.keys()),
                label="Select Model",
                value="Safe LM",
                elem_classes=["model-select"]
            )
            
            # Settings
            gr.Markdown("### Settings")
            system_message = gr.Textbox(
                value="You are a friendly and safe assistant.",
                label="System Message",
                lines=2
            )
            max_tokens_slider = gr.Slider(
                minimum=1, maximum=2048, value=512, step=1, 
                label="Max New Tokens"
            )
            temperature_slider = gr.Slider(
                minimum=0.1, maximum=4.0, value=0.7, step=0.1, 
                label="Temperature"
            )
            top_p_slider = gr.Slider(
                minimum=0.1, maximum=1.0, value=0.95, step=0.05, 
                label="Top-p (nucleus sampling)"
            )
            
        # Main area: Chat interface
        with gr.Column(scale=3):
            chatbot = gr.Chatbot(
                label="Conversation", 
                show_label=True,
                # Removed avatar_images which may not be supported in some Gradio versions
            elem_id="chatbot",
                height=500
            )
            with gr.Row():
                user_input = gr.Textbox(
                    placeholder="Type your message here...", 
                    label="Your Message",
                    show_label=False,
                    scale=9
                )
                send_button = gr.Button(
                    "Send", 
                    scale=1,
                    variant="primary"
                )
            
            with gr.Row():
                clear_button = gr.Button("Clear Chat")
    
    # No footer - removed
    gr.HTML('<div style="height: 20px;"></div>')
    
    # Fix 1: Correct event handling for the chatbot interface
    def user(user_message, history):
        # Return the user's message and add it to history
        return "", history + [[user_message, None]]
    
    def bot(history, system_message, max_tokens, temperature, top_p, selected_model):
                    # Get the last user message from history (with error checking)
        if not history or len(history) == 0:
            return history
        user_message = history[-1][0]
        # Call respond function with the message
        response_generator = respond(
            user_message, 
            history[:-1],  # Pass history without the current message
            system_message, 
            max_tokens, 
            temperature, 
            top_p, 
            selected_model
        )
        # Update history as responses come in
        for response in response_generator:
            history[-1][1] = response
            yield history
    
    # Wire up the event chain - use queue=True for the bot responses
    user_input.submit(
        user,
        [user_input, chatbot],
        [user_input, chatbot],
        queue=False
    ).then(
        bot,
        [chatbot, system_message, max_tokens_slider, temperature_slider, top_p_slider, model_dropdown],
        [chatbot],
        queue=True
    )
    
    send_button.click(
        user,
        [user_input, chatbot],
        [user_input, chatbot],
        queue=False
    ).then(
        bot,
        [chatbot, system_message, max_tokens_slider, temperature_slider, top_p_slider, model_dropdown],
        [chatbot],
        queue=True
    )
    
    # Clear the chat history - use a proper function instead of lambda
    def clear_history():
        return []
    
    clear_button.click(clear_history, None, chatbot, queue=False)

if __name__ == "__main__":
    # Simple launch without parameters that might not be supported
    demo.launch()