File size: 2,264 Bytes
909ac92 3462c0f 5cc7e19 909ac92 5cc7e19 3462c0f 909ac92 5cc7e19 909ac92 5cc7e19 909ac92 5cc7e19 909ac92 3462c0f 909ac92 5cc7e19 3462c0f 909ac92 5cc7e19 909ac92 5cc7e19 3462c0f 5cc7e19 3462c0f 5cc7e19 916e292 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import asyncio
import gradio as gr
from core.utils import generate_first_question
from core.mbti_analyzer import analyze_mbti
from core.interviewer import generate_next_question
# ---- Адаптер ----
def analyze_and_ask_sync(user_text, prev_count, user_id="default_user"):
"""Синхронный адаптер для Gradio"""
return asyncio.run(analyze_and_ask(user_text, prev_count, user_id))
# ---- Асинхронная логика ----
async def analyze_and_ask(user_text, prev_count, user_id="default_user"):
if not user_text.strip():
return "⚠️ Введите ответ.", "", prev_count
try:
n = int(prev_count.split("/")[0]) + 1
except Exception:
n = 1
counter = f"{n}/16"
mbti_text = ""
for part in analyze_mbti(user_text):
mbti_text = part
next_q_data = await generate_next_question(user_id, user_text)
if next_q_data["completed"]:
next_question = "✅ Interview finished! All 16 personality categories covered."
else:
next_question = f"({next_q_data['category']}) {next_q_data['question']}"
return mbti_text, next_question, counter
# ---- Gradio UI ----
with gr.Blocks(theme=gr.themes.Soft(), title="MBTI Interviewer AI") as demo:
gr.Markdown("## 🧠 MBTI Personality Interviewer\nАнализ и вопросы по 16 категориям MBTI.")
with gr.Row():
with gr.Column(scale=1):
inp = gr.Textbox(label="Ваш ответ", placeholder="Например: I enjoy organizing group projects.", lines=4)
btn = gr.Button("Отправить", variant="primary")
with gr.Column(scale=1):
mbti_out = gr.Textbox(label="📊 Анализ MBTI", lines=4)
interviewer_out = gr.Textbox(label="💬 Следующий вопрос", lines=3)
progress = gr.Textbox(label="⏳ Прогресс", value="0/16")
# Здесь вызываем адаптер
btn.click(analyze_and_ask_sync, inputs=[inp, progress], outputs=[mbti_out, interviewer_out, progress])
demo.load(lambda: ("", generate_first_question(), "0/16"),
inputs=None, outputs=[mbti_out, interviewer_out, progress])
if __name__ == "__main__":
demo.launch()
|