|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSequenceClassification |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MBTI_MODEL = "f3nsmart/MBTIclassifier" |
|
|
|
|
|
|
|
|
INTERVIEWER_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" |
|
|
|
|
|
|
|
|
mbti_tok = AutoTokenizer.from_pretrained(MBTI_MODEL) |
|
|
mbti_model = AutoModelForSequenceClassification.from_pretrained(MBTI_MODEL) |
|
|
mbti_pipe = pipeline("text-classification", model=mbti_model, tokenizer=mbti_tok, return_all_scores=True) |
|
|
|
|
|
|
|
|
interviewer_tok = AutoTokenizer.from_pretrained(INTERVIEWER_MODEL) |
|
|
interviewer_model = AutoModelForCausalLM.from_pretrained(INTERVIEWER_MODEL) |
|
|
interviewer_pipe = pipeline("text-generation", model=interviewer_model, tokenizer=interviewer_tok, max_new_tokens=60) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def analyze_and_ask(user_text, question_count=1): |
|
|
if not user_text.strip(): |
|
|
return "⚠️ Введите ответ.", "", "0/30" |
|
|
|
|
|
|
|
|
res = mbti_pipe(user_text)[0] |
|
|
res_sorted = sorted(res, key=lambda x: x["score"], reverse=True) |
|
|
mbti_text = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in res_sorted[:3]]) |
|
|
|
|
|
|
|
|
prompt = f"You are a professional interviewer for MBTI testing. Generate one short, open-ended, natural question based on this answer: '{user_text}'. Avoid yes/no questions." |
|
|
gen = interviewer_pipe(prompt)[0]["generated_text"] |
|
|
gen = gen.split("question:")[-1].strip() if "question:" in gen.lower() else gen.strip() |
|
|
|
|
|
|
|
|
counter = f"{question_count}/30" |
|
|
return mbti_text, gen, counter |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft(), title="Adaptive MBTI Interviewer") as demo: |
|
|
gr.Markdown("## 🧠 Adaptive MBTI Interviewer\nОпредели личностный тип и получи следующий вопрос от интервьюера.") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=2): |
|
|
inp = gr.Textbox(label="Ваш ответ", placeholder="Например: I enjoy working with people and organizing events.", lines=4) |
|
|
btn = gr.Button("Анализировать и задать новый вопрос", variant="primary") |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
mbti_out = gr.Textbox(label="📊 Анализ MBTI", lines=5) |
|
|
question_out = gr.Textbox(label="💬 Следующий вопрос от интервьюера", lines=3) |
|
|
counter = gr.Textbox(label="Прогресс", value="0/30") |
|
|
|
|
|
btn.click(fn=analyze_and_ask, inputs=[inp, counter], outputs=[mbti_out, question_out, counter]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|