File size: 3,295 Bytes
623519a
 
fddc5fe
 
623519a
 
 
c3a9a59
623519a
23af0ed
 
 
623519a
23af0ed
623519a
23af0ed
 
 
 
623519a
fddc5fe
 
623519a
 
 
 
 
 
 
 
23af0ed
 
 
e33aa88
623519a
 
 
 
 
 
 
 
 
 
 
6d720ba
623519a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23af0ed
623519a
 
 
 
 
 
 
 
e69bdf3
623519a
 
 
 
c3a9a59
623519a
 
 
 
 
 
 
 
 
 
23af0ed
e69bdf3
623519a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e69bdf3
623519a
 
 
 
e69bdf3
623519a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import os
import gradio as gr
from huggingface_hub import InferenceClient

def respond(message, history: list[dict[str, str]]):
    # Puxa o token do secret do Hugging Face
    client = InferenceClient(token=os.environ["HF_TOKEN"], model="openai/gpt-oss-20b")

    # System prompt amigável
    system_message = """
You are BitAI (V1), a friendly, curious, and talkative chatbot created by the user 'Sal'.
You can share opinions, answer casual questions, and chat about personal-style topics in a safe and friendly way.
Avoid repeating the same phrases, and always try to keep the conversation engaging and natural.
Politely refuse only things that are truly harmful, illegal, or unsafe.
If someone asks what you are, clarify politely that you are BitAI, an AI chatbot.
"""

    messages = [{"role": "system", "content": system_message}]
    messages.extend(history)
    messages.append({"role": "user", "content": message})

    response = ""
    for message in client.chat_completion(
        messages,
        max_tokens=2048,
        stream=True,
        temperature=0.7,
        top_p=0.95,
    ):
        choices = message.choices
        token = ""
        if len(choices) and choices[0].delta.content:
            token = choices[0].delta.content
        response += token
        yield response

with gr.Blocks(css="""
/* Chat arredondado */
.gr-chat-interface {
    border-radius: 20px !important;
    overflow: hidden !important;
    border: 2px solid #333 !important;
    background-color: #1a1a1a !important;
    color: white;
}

/* Botões grandes escuros com cantos muito arredondados */
.gr-button, .gr-chat-send-button {
    border-radius: 50px;
    padding: 12px 20px;
    background-color: #111;
    color: white;
    font-weight: bold;
    cursor: pointer;
    height: 50px;
    transition: background-color 0.5s;
}
.gr-button:active, .gr-chat-send-button:active {
    background-color: white !important;
    color: #111 !important;
    transition: background-color 0.5s;
}

/* Outros botões menores */
button:not(.gr-chat-send-button) {
    border-radius: 30px;
    padding: 6px 12px;
    background-color: #222;
    color: white;
    height: 40px;
    transition: background-color 0.5s;
}
button:not(.gr-chat-send-button):active {
    background-color: white !important;
    color: #111 !important;
    transition: background-color 0.5s;
}

/* Textbox menor */
textarea {
    height: 40px !important;
    border-radius: 20px !important;
    border: 1px solid #444 !important;
    padding: 8px !important;
    background-color: #111;
    color: white;
    resize: none !important;
}

/* Loader animado embaixo da mensagem da IA */
#bitai-loader {
    width: 20px;
    height: 20px;
    margin: 10px auto 0 auto;
    border-radius: 50%;
    background: linear-gradient(45deg, #ff6, #f06);
    animation: moveLoader 1s infinite alternate;
}

@keyframes moveLoader {
    0% { transform: translateY(0px); }
    50% { transform: translateY(5px); }
    100% { transform: translateY(0px); }
}
""") as demo:

    with gr.Column():
        gr.HTML("<h2 style='text-align:center; color:white'>BitAI</h2>")
        chatbot = gr.ChatInterface(respond, type="messages")
        gr.HTML("<div id='bitai-loader'></div>")  # ícone que se mexe

if __name__ == "__main__":
    demo.launch()