QAway-to
commited on
Commit
·
dd7e39b
1
Parent(s):
c8cd73e
New version v1.9
Browse files
app.py
CHANGED
|
@@ -6,39 +6,28 @@ from transformers import (
|
|
| 6 |
AutoModelForSequenceClassification,
|
| 7 |
pipeline,
|
| 8 |
)
|
|
|
|
| 9 |
# =========================================================
|
| 10 |
# 1. Настройка моделей
|
| 11 |
# =========================================================
|
| 12 |
-
|
| 13 |
-
# Твоя fine-tuned MBTI модель
|
| 14 |
MBTI_MODEL_ID = "f3nsmart/MBTIclassifier"
|
|
|
|
| 15 |
|
| 16 |
-
# Генератор вопросов — лёгкая LLM
|
| 17 |
-
LLM_MODEL_ID = "microsoft/Phi-3-mini-4k-instruct" # можно заменить на другую
|
| 18 |
-
|
| 19 |
-
# Загружаем классификатор
|
| 20 |
mbti_tokenizer = AutoTokenizer.from_pretrained(MBTI_MODEL_ID)
|
| 21 |
mbti_model = AutoModelForSequenceClassification.from_pretrained(MBTI_MODEL_ID)
|
| 22 |
analyzer = pipeline("text-classification", model=mbti_model, tokenizer=mbti_tokenizer, return_all_scores=True)
|
| 23 |
|
| 24 |
-
# Загружаем генератор вопросов
|
| 25 |
q_gen = pipeline("text-generation", model=LLM_MODEL_ID)
|
| 26 |
|
| 27 |
# =========================================================
|
| 28 |
# 2. Основная функция
|
| 29 |
# =========================================================
|
| 30 |
-
def classify_and_ask(user_input):
|
| 31 |
-
"""
|
| 32 |
-
Параллельно выполняет:
|
| 33 |
-
- классификацию текста по MBTI,
|
| 34 |
-
- генерацию нового открытого вопроса.
|
| 35 |
-
"""
|
| 36 |
if not user_input.strip():
|
| 37 |
-
return "⚠️ Введите текст.", "⚠️ Вопрос не сформирован."
|
| 38 |
|
| 39 |
start_time = time.perf_counter()
|
| 40 |
|
| 41 |
-
# === Параллельная обработка ===
|
| 42 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 43 |
future_analysis = executor.submit(lambda: analyzer(user_input))
|
| 44 |
future_question = executor.submit(lambda: q_gen(
|
|
@@ -53,23 +42,40 @@ def classify_and_ask(user_input):
|
|
| 53 |
elapsed = time.perf_counter() - start_time
|
| 54 |
print(f"⏱ Время обработки запроса: {elapsed:.2f} сек")
|
| 55 |
|
| 56 |
-
# === Обработка результатов анализа ===
|
| 57 |
results = sorted(analysis_result[0], key=lambda x: x["score"], reverse=True)
|
| 58 |
top = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in results[:3]])
|
| 59 |
|
| 60 |
-
# === Обработка сгенерированного вопроса ===
|
| 61 |
raw = question_result[0]["generated_text"].replace("\n", " ").strip()
|
| 62 |
question = raw.split("?")[0].split("Question:")[-1].strip().capitalize() + "?"
|
| 63 |
|
| 64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
# =========================================================
|
| 67 |
-
# 3.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
# =========================================================
|
| 69 |
with gr.Blocks(title="MBTI Interactive Interview") as demo:
|
| 70 |
gr.Markdown("## 🧠 MBTI Personality Interviewer\n"
|
| 71 |
"Определи личностный тип и получи следующий вопрос от интервьюера.")
|
| 72 |
|
|
|
|
|
|
|
| 73 |
with gr.Row():
|
| 74 |
with gr.Column(scale=1):
|
| 75 |
inp = gr.Textbox(
|
|
@@ -78,15 +84,23 @@ with gr.Blocks(title="MBTI Interactive Interview") as demo:
|
|
| 78 |
lines=4
|
| 79 |
)
|
| 80 |
btn = gr.Button("Анализировать и задать новый вопрос")
|
|
|
|
| 81 |
|
| 82 |
with gr.Column(scale=1):
|
| 83 |
out_analysis = gr.Textbox(label="📊 Анализ MBTI", lines=6)
|
| 84 |
out_question = gr.Textbox(label="💬 Следующий вопрос от интервьюера", lines=3)
|
| 85 |
|
| 86 |
-
btn.click(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
# =========================================================
|
| 89 |
-
#
|
| 90 |
# =========================================================
|
| 91 |
if __name__ == "__main__":
|
| 92 |
demo.launch()
|
|
|
|
| 6 |
AutoModelForSequenceClassification,
|
| 7 |
pipeline,
|
| 8 |
)
|
| 9 |
+
|
| 10 |
# =========================================================
|
| 11 |
# 1. Настройка моделей
|
| 12 |
# =========================================================
|
|
|
|
|
|
|
| 13 |
MBTI_MODEL_ID = "f3nsmart/MBTIclassifier"
|
| 14 |
+
LLM_MODEL_ID = "microsoft/Phi-3-mini-4k-instruct"
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
mbti_tokenizer = AutoTokenizer.from_pretrained(MBTI_MODEL_ID)
|
| 17 |
mbti_model = AutoModelForSequenceClassification.from_pretrained(MBTI_MODEL_ID)
|
| 18 |
analyzer = pipeline("text-classification", model=mbti_model, tokenizer=mbti_tokenizer, return_all_scores=True)
|
| 19 |
|
|
|
|
| 20 |
q_gen = pipeline("text-generation", model=LLM_MODEL_ID)
|
| 21 |
|
| 22 |
# =========================================================
|
| 23 |
# 2. Основная функция
|
| 24 |
# =========================================================
|
| 25 |
+
def classify_and_ask(user_input, question_count):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
if not user_input.strip():
|
| 27 |
+
return "⚠️ Введите текст.", "⚠️ Вопрос не сформирован.", question_count
|
| 28 |
|
| 29 |
start_time = time.perf_counter()
|
| 30 |
|
|
|
|
| 31 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 32 |
future_analysis = executor.submit(lambda: analyzer(user_input))
|
| 33 |
future_question = executor.submit(lambda: q_gen(
|
|
|
|
| 42 |
elapsed = time.perf_counter() - start_time
|
| 43 |
print(f"⏱ Время обработки запроса: {elapsed:.2f} сек")
|
| 44 |
|
|
|
|
| 45 |
results = sorted(analysis_result[0], key=lambda x: x["score"], reverse=True)
|
| 46 |
top = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in results[:3]])
|
| 47 |
|
|
|
|
| 48 |
raw = question_result[0]["generated_text"].replace("\n", " ").strip()
|
| 49 |
question = raw.split("?")[0].split("Question:")[-1].strip().capitalize() + "?"
|
| 50 |
|
| 51 |
+
# Инкрементируем счётчик
|
| 52 |
+
question_count += 1
|
| 53 |
+
progress = f"{question_count}/30"
|
| 54 |
+
|
| 55 |
+
return top, question, progress
|
| 56 |
+
|
| 57 |
|
| 58 |
# =========================================================
|
| 59 |
+
# 3. Функция для первого вопроса при старте
|
| 60 |
+
# =========================================================
|
| 61 |
+
def generate_first_question():
|
| 62 |
+
result = q_gen(
|
| 63 |
+
"You are starting an MBTI interview. Ask the FIRST open-ended question "
|
| 64 |
+
"to understand the person’s personality, starting with 'How', 'Why', 'What', or 'When'."
|
| 65 |
+
)[0]["generated_text"]
|
| 66 |
+
question = result.split("?")[0].split("Question:")[-1].strip().capitalize() + "?"
|
| 67 |
+
return question, "1/30"
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
# =========================================================
|
| 71 |
+
# 4. Интерфейс Gradio
|
| 72 |
# =========================================================
|
| 73 |
with gr.Blocks(title="MBTI Interactive Interview") as demo:
|
| 74 |
gr.Markdown("## 🧠 MBTI Personality Interviewer\n"
|
| 75 |
"Определи личностный тип и получи следующий вопрос от интервьюера.")
|
| 76 |
|
| 77 |
+
question_state = gr.State(1)
|
| 78 |
+
|
| 79 |
with gr.Row():
|
| 80 |
with gr.Column(scale=1):
|
| 81 |
inp = gr.Textbox(
|
|
|
|
| 84 |
lines=4
|
| 85 |
)
|
| 86 |
btn = gr.Button("Анализировать и задать новый вопрос")
|
| 87 |
+
progress = gr.Markdown("**1/30**", elem_id="progress")
|
| 88 |
|
| 89 |
with gr.Column(scale=1):
|
| 90 |
out_analysis = gr.Textbox(label="📊 Анализ MBTI", lines=6)
|
| 91 |
out_question = gr.Textbox(label="💬 Следующий вопрос от интервьюера", lines=3)
|
| 92 |
|
| 93 |
+
btn.click(
|
| 94 |
+
fn=classify_and_ask,
|
| 95 |
+
inputs=[inp, question_state],
|
| 96 |
+
outputs=[out_analysis, out_question, progress],
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
# генерируем первый вопрос при запуске
|
| 100 |
+
demo.load(generate_first_question, outputs=[out_question, progress])
|
| 101 |
|
| 102 |
# =========================================================
|
| 103 |
+
# 5. Запуск
|
| 104 |
# =========================================================
|
| 105 |
if __name__ == "__main__":
|
| 106 |
demo.launch()
|