QAway-to
commited on
Commit
·
288e23f
1
Parent(s):
40e0f05
New version v1.5
Browse files
app.py
CHANGED
|
@@ -1,79 +1,74 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
| 3 |
|
| 4 |
-
# === 1️⃣ MBTI
|
| 5 |
MODEL_ID = "f3nsmart/MBTIclassifier"
|
| 6 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 7 |
model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
|
| 8 |
-
|
| 9 |
|
| 10 |
# === 2️⃣ Интервьюер (Phi-3-mini) ===
|
| 11 |
q_gen = pipeline(
|
| 12 |
"text-generation",
|
| 13 |
-
model="microsoft/Phi-3-mini-4k-instruct",
|
| 14 |
temperature=0.6,
|
| 15 |
top_p=0.9,
|
| 16 |
max_new_tokens=60
|
| 17 |
)
|
| 18 |
|
| 19 |
-
# === 3️⃣
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
| 24 |
if not user_input.strip():
|
| 25 |
-
return "⚠️ Введите ответ.", "Please
|
| 26 |
|
| 27 |
-
# ---
|
| 28 |
-
results =
|
| 29 |
results = sorted(results, key=lambda x: x["score"], reverse=True)
|
| 30 |
result_text = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in results[:3]])
|
| 31 |
|
| 32 |
-
# ---
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
progress = f"{
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
# ---
|
| 38 |
-
context = " ".join(session["answers"][-3:])
|
| 39 |
prompt = (
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
f"
|
| 44 |
)
|
| 45 |
|
| 46 |
try:
|
| 47 |
-
|
| 48 |
-
next_q =
|
| 49 |
-
next_q = next_q.strip().capitalize()
|
| 50 |
-
|
| 51 |
if len(next_q.split()) < 4 or len(next_q) > 140:
|
| 52 |
-
next_q = "
|
| 53 |
-
|
| 54 |
except Exception as e:
|
| 55 |
next_q = f"(⚠️ Ошибка генерации вопроса: {e})"
|
| 56 |
|
| 57 |
-
return result_text, next_q, progress
|
|
|
|
| 58 |
|
| 59 |
-
# ===
|
| 60 |
with gr.Blocks(css="""
|
| 61 |
.gradio-container {max-width: 1600px !important; margin: auto;}
|
| 62 |
-
#inp, #out, #question {
|
| 63 |
-
|
| 64 |
-
margin-left: auto !important;
|
| 65 |
-
margin-right: auto !important;
|
| 66 |
-
font-size: 16px !important;
|
| 67 |
-
}
|
| 68 |
-
#progress {
|
| 69 |
-
text-align: center;
|
| 70 |
-
font-weight: bold;
|
| 71 |
-
color: #4CAF50;
|
| 72 |
-
font-size: 18px;
|
| 73 |
-
}
|
| 74 |
textarea {height: 100px !important;}
|
| 75 |
""") as demo:
|
| 76 |
-
gr.Markdown("## 🧠 Adaptive MBTI Classifier\
|
|
|
|
|
|
|
| 77 |
|
| 78 |
question = gr.Textbox(
|
| 79 |
label="Вопрос",
|
|
@@ -86,6 +81,8 @@ with gr.Blocks(css="""
|
|
| 86 |
progress = gr.Markdown("**0/30**", elem_id="progress")
|
| 87 |
|
| 88 |
btn = gr.Button("Ответить")
|
| 89 |
-
btn.click(mbti_interview,
|
|
|
|
|
|
|
| 90 |
|
| 91 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
| 3 |
|
| 4 |
+
# === 1️⃣ Модель для анализа MBTI ===
|
| 5 |
MODEL_ID = "f3nsmart/MBTIclassifier"
|
| 6 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 7 |
model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
|
| 8 |
+
analyzer = pipeline("text-classification", model=model, tokenizer=tokenizer, return_all_scores=True)
|
| 9 |
|
| 10 |
# === 2️⃣ Интервьюер (Phi-3-mini) ===
|
| 11 |
q_gen = pipeline(
|
| 12 |
"text-generation",
|
| 13 |
+
model="microsoft/Phi-3-mini-4k-instruct",
|
| 14 |
temperature=0.6,
|
| 15 |
top_p=0.9,
|
| 16 |
max_new_tokens=60
|
| 17 |
)
|
| 18 |
|
| 19 |
+
# === 3️⃣ Функция для одного шага интервью ===
|
| 20 |
+
def mbti_interview(user_input, history):
|
| 21 |
+
"""
|
| 22 |
+
history — список [(вопрос, ответ), ...]
|
| 23 |
+
"""
|
| 24 |
+
# инициализация
|
| 25 |
+
if history is None:
|
| 26 |
+
history = []
|
| 27 |
if not user_input.strip():
|
| 28 |
+
return history, "⚠️ Введите ответ.", "Please describe yourself.", f"{len(history)}/30"
|
| 29 |
|
| 30 |
+
# --- анализ ---
|
| 31 |
+
results = analyzer(user_input)[0]
|
| 32 |
results = sorted(results, key=lambda x: x["score"], reverse=True)
|
| 33 |
result_text = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in results[:3]])
|
| 34 |
|
| 35 |
+
# --- обновляем историю ---
|
| 36 |
+
last_q = history[-1][0] if history else "Initial question"
|
| 37 |
+
history.append((last_q, user_input))
|
| 38 |
+
progress = f"{len(history)}/30"
|
| 39 |
+
|
| 40 |
+
# --- контекст последних 5 пар ---
|
| 41 |
+
convo = "\n".join([f"Q: {q}\nA: {a}" for q, a in history[-5:]])
|
| 42 |
|
| 43 |
+
# --- запрос к модели интервьюера ---
|
|
|
|
| 44 |
prompt = (
|
| 45 |
+
"You are an HR interviewer conducting an MBTI personality assessment. "
|
| 46 |
+
"Based on the following recent dialogue, generate ONE new, natural, short question. "
|
| 47 |
+
"Avoid repeating any previous questions.\n\n"
|
| 48 |
+
f"{convo}\n\nNext question:"
|
| 49 |
)
|
| 50 |
|
| 51 |
try:
|
| 52 |
+
raw = q_gen(prompt)[0]["generated_text"]
|
| 53 |
+
next_q = raw.split("?")[0].split("\n")[-1].strip().capitalize() + "?"
|
|
|
|
|
|
|
| 54 |
if len(next_q.split()) < 4 or len(next_q) > 140:
|
| 55 |
+
next_q = "What motivates you the most when you start something new?"
|
|
|
|
| 56 |
except Exception as e:
|
| 57 |
next_q = f"(⚠️ Ошибка генерации вопроса: {e})"
|
| 58 |
|
| 59 |
+
return history, result_text, next_q, progress
|
| 60 |
+
|
| 61 |
|
| 62 |
+
# === 4️⃣ Интерфейс Gradio ===
|
| 63 |
with gr.Blocks(css="""
|
| 64 |
.gradio-container {max-width: 1600px !important; margin: auto;}
|
| 65 |
+
#inp, #out, #question {width: 90% !important; margin: auto; font-size: 16px;}
|
| 66 |
+
#progress {text-align: center; font-weight: bold; color: #4CAF50; font-size: 18px;}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
textarea {height: 100px !important;}
|
| 68 |
""") as demo:
|
| 69 |
+
gr.Markdown("## 🧠 Adaptive MBTI Classifier\nDynamic conversation with context memory")
|
| 70 |
+
|
| 71 |
+
state = gr.State([]) # сохраняет [(вопрос, ответ)] между итерациями
|
| 72 |
|
| 73 |
question = gr.Textbox(
|
| 74 |
label="Вопрос",
|
|
|
|
| 81 |
progress = gr.Markdown("**0/30**", elem_id="progress")
|
| 82 |
|
| 83 |
btn = gr.Button("Ответить")
|
| 84 |
+
btn.click(fn=mbti_interview,
|
| 85 |
+
inputs=[inp, state],
|
| 86 |
+
outputs=[state, out, question, progress])
|
| 87 |
|
| 88 |
demo.launch()
|