File size: 3,204 Bytes
b6185eb
e8b5e7a
 
0611243
5cc7e19
 
34fcc83
f12b1ae
 
 
 
 
7aec9f9
f12b1ae
 
 
0611243
e8b5e7a
f12b1ae
e8b5e7a
f12b1ae
 
5cc7e19
b6185eb
 
5cc7e19
 
 
 
 
206b8e2
5cc7e19
e8b5e7a
0611243
 
e8b5e7a
b6185eb
5cc7e19
b6185eb
 
bbd6808
b6185eb
34fcc83
bbd6808
34fcc83
bbd6808
 
206b8e2
4df42a0
b6185eb
7aec9f9
b6185eb
e8b5e7a
b6185eb
 
206b8e2
 
34fcc83
206b8e2
5cc7e19
 
 
b6185eb
 
 
7aec9f9
b6185eb
 
5cc7e19
 
206b8e2
 
5cc7e19
206b8e2
 
 
4df42a0
7aec9f9
206b8e2
5cc7e19
206b8e2
 
 
7aec9f9
206b8e2
5cc7e19
f12b1ae
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
# app.py
import gradio as gr
import asyncio
from itertools import cycle
from core.utils import generate_first_question
from core.mbti_analyzer import analyze_mbti
from core.interviewer import generate_question

# --------------------------------------------------------------
# 🌀 Асинхронная анимация "Thinking..."
# --------------------------------------------------------------
async def async_loader(update_fn, delay=0.15):
    frames = cycle(["⠋","⠙","⠹","⠸","⠼","⠴","⠦","⠧","⠇","⠏"])
    for frame in frames:
        update_fn(f"💭 Interviewer is thinking... {frame}")
        await asyncio.sleep(delay)


# --------------------------------------------------------------
# ⚙️ Основная логика
# --------------------------------------------------------------
def analyze_and_ask(user_text, prev_count):
    if not user_text.strip():
        yield "⚠️ Please enter your answer.", "", prev_count
        return

    try:
        n = int(prev_count.split("/")[0]) + 1
    except Exception:
        n = 1
    counter = f"{n}/8"

    # мгновенный отклик
    yield "⏳ Analyzing personality...", "💭 Interviewer is thinking... ⠋", counter

    # анализ MBTI
    mbti_gen = analyze_mbti(user_text)
    mbti_text = ""
    for chunk in mbti_gen:
        mbti_text = chunk
        yield mbti_text, "💭 Interviewer is thinking... ⠙", counter

    # генерация вопроса новой моделью (без инструкций)
    try:
        question = generate_question()
    except Exception as e:
        question = f"⚠️ Question generator error: {e}"

    yield mbti_text, question, counter


# --------------------------------------------------------------
# 🧱 Интерфейс Gradio
# --------------------------------------------------------------
with gr.Blocks(theme=gr.themes.Soft(), title="MBTI Personality Interviewer") as demo:
    gr.Markdown(
        "## 🧠 MBTI Personality Interviewer\n"
        "Определи личностный тип и получи случайные вопросы MBTI категории."
    )

    with gr.Row():
        with gr.Column(scale=1):
            inp = gr.Textbox(
                label="Ваш ответ",
                placeholder="Например: I enjoy working with people and organizing events.",
                lines=4
            )
            btn = gr.Button("Анализировать и задать новый вопрос", variant="primary")
        with gr.Column(scale=1):
            mbti_out = gr.Textbox(label="📊 Анализ MBTI", lines=4)
            interviewer_out = gr.Textbox(label="💬 Следующий вопрос", lines=3)
            progress = gr.Textbox(label="⏳ Прогресс", value="0/8")

    btn.click(
        analyze_and_ask,
        inputs=[inp, progress],
        outputs=[mbti_out, interviewer_out, progress],
        show_progress=True
    )

    demo.load(
        lambda: ("", generate_first_question(), "0/8"),
        inputs=None,
        outputs=[mbti_out, interviewer_out, progress]
    )

demo.queue(max_size=32).launch(server_name="0.0.0.0", server_port=7860)