MBTI / app.py
QAway-to
New model TinyLlama/TinyLlama-1.1B-Chat-v1.0. v1.0
a9ecae7
raw
history blame
3.43 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSequenceClassification
# =========================================================
# 1️⃣ Настройка моделей
# =========================================================
# Основная MBTI-модель (твоя fine-tuned)
MBTI_MODEL = "f3nsmart/MBTIclassifier"
# Лёгкая LLM-модель для генерации вопросов
INTERVIEWER_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
# Загружаем MBTI-классификатор
mbti_tok = AutoTokenizer.from_pretrained(MBTI_MODEL)
mbti_model = AutoModelForSequenceClassification.from_pretrained(MBTI_MODEL)
mbti_pipe = pipeline("text-classification", model=mbti_model, tokenizer=mbti_tok, return_all_scores=True)
# Загружаем интервьюера
interviewer_tok = AutoTokenizer.from_pretrained(INTERVIEWER_MODEL)
interviewer_model = AutoModelForCausalLM.from_pretrained(INTERVIEWER_MODEL)
interviewer_pipe = pipeline("text-generation", model=interviewer_model, tokenizer=interviewer_tok, max_new_tokens=60)
# =========================================================
# 2️⃣ Логика
# =========================================================
def analyze_and_ask(user_text, question_count=1):
if not user_text.strip():
return "⚠️ Введите ответ.", "", "0/30"
# Анализ MBTI
res = mbti_pipe(user_text)[0]
res_sorted = sorted(res, key=lambda x: x["score"], reverse=True)
mbti_text = "\n".join([f"{r['label']}{r['score']:.3f}" for r in res_sorted[:3]])
# Генерация нового вопроса
prompt = f"You are a professional interviewer for MBTI testing. Generate one short, open-ended, natural question based on this answer: '{user_text}'. Avoid yes/no questions."
gen = interviewer_pipe(prompt)[0]["generated_text"]
gen = gen.split("question:")[-1].strip() if "question:" in gen.lower() else gen.strip()
# Счётчик
counter = f"{question_count}/30"
return mbti_text, gen, counter
# =========================================================
# 3️⃣ Интерфейс Gradio
# =========================================================
with gr.Blocks(theme=gr.themes.Soft(), title="Adaptive MBTI Interviewer") as demo:
gr.Markdown("## 🧠 Adaptive MBTI Interviewer\nОпредели личностный тип и получи следующий вопрос от интервьюера.")
with gr.Row():
with gr.Column(scale=2):
inp = gr.Textbox(label="Ваш ответ", placeholder="Например: I enjoy working with people and organizing events.", lines=4)
btn = gr.Button("Анализировать и задать новый вопрос", variant="primary")
with gr.Column(scale=2):
mbti_out = gr.Textbox(label="📊 Анализ MBTI", lines=5)
question_out = gr.Textbox(label="💬 Следующий вопрос от интервьюера", lines=3)
counter = gr.Textbox(label="Прогресс", value="0/30")
btn.click(fn=analyze_and_ask, inputs=[inp, counter], outputs=[mbti_out, question_out, counter])
# =========================================================
# 4️⃣ Запуск
# =========================================================
if __name__ == "__main__":
demo.launch()