File size: 12,026 Bytes
a90f37e
 
 
2931efa
 
587f61c
420b66d
587f61c
43e0eaa
e9867ef
587f61c
 
 
 
2931efa
a90f37e
6da6bfa
 
 
 
 
 
 
587f61c
2931efa
856a204
6da6bfa
 
856a204
6da6bfa
 
856a204
c731b5a
 
6da6bfa
c731b5a
 
6da6bfa
c731b5a
 
 
 
 
 
 
6da6bfa
c731b5a
 
6da6bfa
 
 
 
c731b5a
6da6bfa
 
 
 
 
 
c731b5a
6da6bfa
 
 
c731b5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6da6bfa
856a204
6da6bfa
 
 
c731b5a
6da6bfa
a90f37e
216da8f
2931efa
216da8f
 
6cac78c
216da8f
c169c98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216da8f
 
 
 
f53a587
 
 
216da8f
 
6cac78c
216da8f
 
 
 
31142c0
216da8f
 
31142c0
c169c98
33783bd
f545f9a
 
216da8f
 
 
33783bd
f545f9a
216da8f
 
72c3fa0
c169c98
008f45b
 
0bf931b
216da8f
72c3fa0
 
 
6da6bfa
 
 
6cac78c
2931efa
 
 
 
 
 
 
6cac78c
 
 
 
 
 
 
 
 
 
 
 
c8bb8d4
6cac78c
 
 
 
 
 
 
 
 
2931efa
6cac78c
 
2931efa
6cac78c
 
 
fed3387
6cac78c
 
 
 
 
 
 
 
 
 
 
216da8f
6cac78c
 
2931efa
216da8f
6cac78c
579c644
6cac78c
fed3387
 
 
6cac78c
 
216da8f
856a204
 
216da8f
 
6cac78c
fed3387
 
 
 
 
 
 
 
 
 
 
 
216da8f
6cac78c
 
 
fed3387
6cac78c
 
 
 
 
 
216da8f
fed3387
6cac78c
fed3387
6cac78c
 
fed3387
6cac78c
 
 
 
 
 
 
 
856a204
 
6cac78c
 
2931efa
6cac78c
 
 
 
 
 
 
856a204
 
2931efa
 
579c644
856a204
 
 
 
a90f37e
 
f545f9a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
import gradio as gr
from huggingface_hub import InferenceClient

# Define available models (update with your actual model IDs)
model_list = {
    "Safe LM": "HuggingFaceH4/zephyr-7b-beta",
    "Baseline 1": "HuggingFaceH4/zephyr-7b-beta",
    "Another Model": "HuggingFaceH4/zephyr-7b-beta",
    "LLaMA3.2-1B": "meta-llama/Llama-3.2-1B-Instruct",
    "Mix IFT V2 - Score0 Rephrased": "locuslab/mix_ift_v2-smollm2-360m-smollm2-360m-score0_mix_rephrased_from_beginning-300B",
    "Mix IFT V2 - Score0 Only": "locuslab/mix_ift_v2-smollm2-360m-smollm2-360m-score0_only-300B",
    "Mix IFT V2 - All Raw Folders Metadata": "locuslab/mix_ift_v2-smollm2-360m-smollm2-360m-all_raw_folders_metadata-300B",
    "Mix IFT V2 - All Raw Folders Baseline": "locuslab/mix_ift_v2-smollm2-360m-smollm2-360m-all_raw_folders_baseline-300B",
    "Mix IFT V2 - Score0 Only MBS16 GBS1024": "locuslab/mix_ift_v2-smollm2-360m-smollm2-360m-score0_only-300B-mbs16-gbs1024-16feb-lr2e-05-gbs16"
}

# Dictionary to track which models support chat completion vs. text generation
model_tasks = {
    "HuggingFaceH4/zephyr-7b-beta": "chat-completion",  # This model supports chat completion
    # Add other models that support chat completion
}
# Default to text-generation for models not specified above


def respond(message, history, system_message, max_tokens, temperature, top_p, selected_model):
    try:
        # Get the model ID for the selected model
        model_id = model_list.get(selected_model, "HuggingFaceH4/zephyr-7b-beta")
        
        # Create an InferenceClient for the selected model
        client = InferenceClient(model_id)
        
        # Always use text generation for locuslab models
        if "locuslab" in model_id:
            # Format the prompt manually for text generation
            # Simple formatting that works with most models
            formatted_prompt = ""
            
            # Add minimal formatting for better results with research models
            if len(history) > 0:
                # Include minimal context from history
                last_exchanges = history[-1:]  # Just use the last exchange
                for user_msg, assistant_msg in last_exchanges:
                    if user_msg:
                        formatted_prompt += f"{user_msg}\n"
            
            # Add current message - keep it simple
            formatted_prompt += f"{message}"
            
            response = ""
            
            # Use text generation instead of chat completion
            print(f"Using text generation with prompt: {formatted_prompt}")
            for token in client.text_generation(
                formatted_prompt,
                max_new_tokens=max_tokens,
                stream=True,
                temperature=temperature,
                top_p=top_p,
                do_sample=True  # Enable sampling for more creative responses
            ):
                response += token
                yield response
        else:
            # Try chat completion for standard models
            try:
                messages = [{"role": "system", "content": system_message}]
                for user_msg, assistant_msg in history:
                    if user_msg:  # Only add non-empty messages
                        messages.append({"role": "user", "content": user_msg})
                    if assistant_msg:  # Only add non-empty messages
                        messages.append({"role": "assistant", "content": assistant_msg})
                messages.append({"role": "user", "content": message})
                
                response = ""
                
                # Stream the response from the client
                for token_message in client.chat_completion(
                    messages,
                    max_tokens=max_tokens,
                    stream=True,
                    temperature=temperature,
                    top_p=top_p,
                ):
                    # Safe extraction of token with error handling
                    try:
                        token = token_message.choices[0].delta.content
                        if token is not None:  # Handle potential None values
                            response += token
                            yield response
                    except (AttributeError, IndexError) as e:
                        # Handle cases where token structure might be different
                        print(f"Error extracting token: {e}")
                        continue
            except Exception as e:
                # If chat completion fails, fall back to text generation
                print(f"Chat completion failed: {e}. Falling back to text generation.")
                formatted_prompt = f"{system_message}\n\n"
                
                for user_msg, assistant_msg in history:
                    if user_msg:
                        formatted_prompt += f"User: {user_msg}\n"
                    if assistant_msg:
                        formatted_prompt += f"Assistant: {assistant_msg}\n"
                
                formatted_prompt += f"User: {message}\nAssistant:"
                
                response = ""
                
                # Use text generation instead of chat completion
                for token in client.text_generation(
                    formatted_prompt,
                    max_new_tokens=max_tokens,
                    stream=True,
                    temperature=temperature,
                    top_p=top_p,
                ):
                    response += token
                    yield response
                
    except Exception as e:
        # Return detailed error message if the model call fails
        error_message = str(e)
        print(f"Error calling model API: {error_message}")
        yield f"Error: {error_message}. Please try a different model or adjust parameters."


# Custom CSS for styling
css = """
body { 
    background-color: #f0f5fb; /* Light pastel blue background */
}
.gradio-container { 
    background-color: white;
    border-radius: 16px;
    box-shadow: 0 2px 10px rgba(0,0,0,0.05);
    max-width: 90%;
    margin: 15px auto;
    padding-bottom: 20px;
}
/* Header styling with diagonal shield */
.app-header {
    position: relative;
    overflow: hidden;
}
.app-header::before {
    content: "🛡️";
    position: absolute;
    font-size: 100px;
    opacity: 0.1;
    right: -20px;
    top: -30px;
    transform: rotate(15deg);
    pointer-events: none;
}
/* Simple styling for buttons */
#send-btn {
    background-color: white !important;
    color: #333 !important;
    border: 2px solid #e6c200 !important;
}
#send-btn:hover {
    background-color: #fff9e6 !important;
}
#clear-btn {
    background-color: white !important;
    color: #333 !important;
    border: 2px solid #e6c200 !important;
}
#clear-btn:hover {
    background-color: #fff9e6 !important;
}
/* Hide elements */
footer {
    display: none !important;
}
.footer {
    display: none !important;
}
"""

with gr.Blocks(css=css) as demo:
    # Custom header with branding
    gr.HTML("""
    <div class="app-header" style="background: linear-gradient(135deg, #4a90e2, #75c6ef); padding: 15px; border-radius: 16px 16px 0 0; color: white; border-bottom: 3px solid #e6c200;">
        <h1 style="font-size: 32px; font-weight: 600; margin: 0; display: flex; align-items: center; font-family: 'Palatino', serif;">
            <span style="margin-right: 10px; font-size: 32px;">🛡️</span>
            <span style="font-weight: 700; margin-right: 1px;">Safe</span>
            <span style="font-weight: 400; letter-spacing: 1px;">Playground</span>
        </h1>
    </div>
    """)
    
    # Status message for API errors
    status_message = gr.Markdown("", elem_id="status-message")
        
    with gr.Row():
        # Left sidebar: Model selector
        with gr.Column(scale=1):
            gr.Markdown("## Models")
            model_dropdown = gr.Dropdown(
                choices=list(model_list.keys()),
                label="Select Model",
                value="Safe LM",
                elem_classes=["model-select"]
            )
            
            # Settings
            gr.Markdown("### Settings")
            system_message = gr.Textbox(
                value="You are a friendly and safe assistant.",
                label="System Message",
                lines=2
            )
            max_tokens_slider = gr.Slider(
                minimum=1, maximum=2048, value=100, step=1, 
                label="Max New Tokens"
            )
            temperature_slider = gr.Slider(
                minimum=0.1, maximum=4.0, value=0.7, step=0.1, 
                label="Temperature"
            )
            top_p_slider = gr.Slider(
                minimum=0.1, maximum=1.0, value=0.95, step=0.05, 
                label="Top-p (nucleus sampling)"
            )
            
        # Main area: Chat interface
        with gr.Column(scale=3):
            chatbot = gr.Chatbot(
                label="Conversation", 
                show_label=True,
                height=400
            )
            with gr.Row():
                user_input = gr.Textbox(
                    placeholder="Type your message here...", 
                    label="Your Message",
                    show_label=False,
                    scale=9
                )
                send_button = gr.Button(
                    "Send", 
                    scale=1,
                    elem_id="send-btn"
                )
            
            with gr.Row():
                clear_button = gr.Button("Clear Chat", elem_id="clear-btn")
    
    # Define functions for chatbot interactions
    def user(user_message, history):
        # Add emoji to user message
        user_message_with_emoji = f"👤 {user_message}"
        return "", history + [[user_message_with_emoji, None]]
    
    def bot(history, system_message, max_tokens, temperature, top_p, selected_model):
        # Ensure there's history
        if not history or len(history) == 0:
            return history
            
        # Get the last user message from history
        user_message = history[-1][0]
        # Remove emoji for processing if present
        if user_message.startswith("👤 "):
            user_message = user_message[2:].strip()
        
        # Process previous history to clean emojis
        clean_history = []
        for h_user, h_bot in history[:-1]:
            if h_user and h_user.startswith("👤 "):
                h_user = h_user[2:].strip()
            if h_bot and h_bot.startswith("🛡️ "):
                h_bot = h_bot[2:].strip()
            clean_history.append([h_user, h_bot])
        
        # Call respond function with the message
        response_generator = respond(
            user_message, 
            clean_history,  # Pass clean history
            system_message, 
            max_tokens, 
            temperature, 
            top_p, 
            selected_model
        )
        
        # Update history as responses come in, adding emoji
        for response in response_generator:
            history[-1][1] = f"🛡️ {response}"
            yield history
    
    # Wire up the event chain
    user_input.submit(
        user,
        [user_input, chatbot],
        [user_input, chatbot],
        queue=False
    ).then(
        bot,
        [chatbot, system_message, max_tokens_slider, temperature_slider, top_p_slider, model_dropdown],
        [chatbot],
        queue=True
    )
    
    send_button.click(
        user,
        [user_input, chatbot],
        [user_input, chatbot],
        queue=False
    ).then(
        bot,
        [chatbot, system_message, max_tokens_slider, temperature_slider, top_p_slider, model_dropdown],
        [chatbot],
        queue=True
    )
    
    # Clear the chat history
    def clear_history():
        return []
    
    clear_button.click(clear_history, None, chatbot, queue=False)

if __name__ == "__main__":
    demo.launch()