File size: 3,433 Bytes
1ef591c
a9ecae7
dd7e39b
c8cd73e
a9ecae7
c8cd73e
 
a9ecae7
 
c8cd73e
a9ecae7
 
 
 
 
 
 
 
 
 
 
 
c8cd73e
 
a9ecae7
c8cd73e
a9ecae7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd7e39b
c8cd73e
 
a9ecae7
c8cd73e
a9ecae7
 
dd7e39b
c8cd73e
a9ecae7
 
7fa6779
a9ecae7
 
 
 
 
 
 
dd7e39b
c8cd73e
a9ecae7
c8cd73e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSequenceClassification

# =========================================================
# 1️⃣  Настройка моделей
# =========================================================

# Основная MBTI-модель (твоя fine-tuned)
MBTI_MODEL = "f3nsmart/MBTIclassifier"

# Лёгкая LLM-модель для генерации вопросов
INTERVIEWER_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"

# Загружаем MBTI-классификатор
mbti_tok = AutoTokenizer.from_pretrained(MBTI_MODEL)
mbti_model = AutoModelForSequenceClassification.from_pretrained(MBTI_MODEL)
mbti_pipe = pipeline("text-classification", model=mbti_model, tokenizer=mbti_tok, return_all_scores=True)

# Загружаем интервьюера
interviewer_tok = AutoTokenizer.from_pretrained(INTERVIEWER_MODEL)
interviewer_model = AutoModelForCausalLM.from_pretrained(INTERVIEWER_MODEL)
interviewer_pipe = pipeline("text-generation", model=interviewer_model, tokenizer=interviewer_tok, max_new_tokens=60)

# =========================================================
# 2️⃣  Логика
# =========================================================

def analyze_and_ask(user_text, question_count=1):
    if not user_text.strip():
        return "⚠️ Введите ответ.", "", "0/30"

    # Анализ MBTI
    res = mbti_pipe(user_text)[0]
    res_sorted = sorted(res, key=lambda x: x["score"], reverse=True)
    mbti_text = "\n".join([f"{r['label']}{r['score']:.3f}" for r in res_sorted[:3]])

    # Генерация нового вопроса
    prompt = f"You are a professional interviewer for MBTI testing. Generate one short, open-ended, natural question based on this answer: '{user_text}'. Avoid yes/no questions."
    gen = interviewer_pipe(prompt)[0]["generated_text"]
    gen = gen.split("question:")[-1].strip() if "question:" in gen.lower() else gen.strip()

    # Счётчик
    counter = f"{question_count}/30"
    return mbti_text, gen, counter


# =========================================================
# 3️⃣  Интерфейс Gradio
# =========================================================
with gr.Blocks(theme=gr.themes.Soft(), title="Adaptive MBTI Interviewer") as demo:
    gr.Markdown("## 🧠 Adaptive MBTI Interviewer\nОпредели личностный тип и получи следующий вопрос от интервьюера.")

    with gr.Row():
        with gr.Column(scale=2):
            inp = gr.Textbox(label="Ваш ответ", placeholder="Например: I enjoy working with people and organizing events.", lines=4)
            btn = gr.Button("Анализировать и задать новый вопрос", variant="primary")

        with gr.Column(scale=2):
            mbti_out = gr.Textbox(label="📊 Анализ MBTI", lines=5)
            question_out = gr.Textbox(label="💬 Следующий вопрос от интервьюера", lines=3)
            counter = gr.Textbox(label="Прогресс", value="0/30")

    btn.click(fn=analyze_and_ask, inputs=[inp, counter], outputs=[mbti_out, question_out, counter])

# =========================================================
# 4️⃣  Запуск
# =========================================================
if __name__ == "__main__":
    demo.launch()