QAway-to commited on
Commit
288e23f
·
1 Parent(s): 40e0f05

New version v1.5

Browse files
Files changed (1) hide show
  1. app.py +40 -43
app.py CHANGED
@@ -1,79 +1,74 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
3
 
4
- # === 1️⃣ MBTI классификатор ===
5
  MODEL_ID = "f3nsmart/MBTIclassifier"
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
7
  model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
8
- pipe = pipeline("text-classification", model=model, tokenizer=tokenizer, return_all_scores=True)
9
 
10
  # === 2️⃣ Интервьюер (Phi-3-mini) ===
11
  q_gen = pipeline(
12
  "text-generation",
13
- model="microsoft/Phi-3-mini-4k-instruct", # 🔹 Новая модель
14
  temperature=0.6,
15
  top_p=0.9,
16
  max_new_tokens=60
17
  )
18
 
19
- # === 3️⃣ Состояние сессии ===
20
- session = {"answers": [], "count": 0}
21
-
22
- # === 4️⃣ Основная логика ===
23
- def mbti_interview(user_input):
 
 
 
24
  if not user_input.strip():
25
- return "⚠️ Введите ответ.", "Please share something about yourself.", "0/30"
26
 
27
- # --- Анализ личности ---
28
- results = pipe(user_input)[0]
29
  results = sorted(results, key=lambda x: x["score"], reverse=True)
30
  result_text = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in results[:3]])
31
 
32
- # --- Обновляем историю ---
33
- session["answers"].append(user_input)
34
- session["count"] += 1
35
- progress = f"{session['count']}/30"
 
 
 
36
 
37
- # --- Генерация нового вопроса ---
38
- context = " ".join(session["answers"][-3:])
39
  prompt = (
40
- f"You are an HR interviewer assessing MBTI traits. "
41
- f"Ask one short, natural question to understand the user's personality better. "
42
- f"Base your next question on this context:\n{context}\n"
43
- f"Ask only one question, polite and specific."
44
  )
45
 
46
  try:
47
- raw_q = q_gen(prompt)[0]["generated_text"]
48
- next_q = raw_q.split("?")[0] + "?"
49
- next_q = next_q.strip().capitalize()
50
-
51
  if len(next_q.split()) < 4 or len(next_q) > 140:
52
- next_q = "Can you tell me more about how you make important decisions?"
53
-
54
  except Exception as e:
55
  next_q = f"(⚠️ Ошибка генерации вопроса: {e})"
56
 
57
- return result_text, next_q, progress
 
58
 
59
- # === 5️⃣ Интерфейс Gradio ===
60
  with gr.Blocks(css="""
61
  .gradio-container {max-width: 1600px !important; margin: auto;}
62
- #inp, #out, #question {
63
- width: 90% !important;
64
- margin-left: auto !important;
65
- margin-right: auto !important;
66
- font-size: 16px !important;
67
- }
68
- #progress {
69
- text-align: center;
70
- font-weight: bold;
71
- color: #4CAF50;
72
- font-size: 18px;
73
- }
74
  textarea {height: 100px !important;}
75
  """) as demo:
76
- gr.Markdown("## 🧠 Adaptive MBTI Classifier\n### Model adapts questions dynamically based on your answers.")
 
 
77
 
78
  question = gr.Textbox(
79
  label="Вопрос",
@@ -86,6 +81,8 @@ with gr.Blocks(css="""
86
  progress = gr.Markdown("**0/30**", elem_id="progress")
87
 
88
  btn = gr.Button("Ответить")
89
- btn.click(mbti_interview, inputs=inp, outputs=[out, question, progress])
 
 
90
 
91
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
3
 
4
+ # === 1️⃣ Модель для анализа MBTI ===
5
  MODEL_ID = "f3nsmart/MBTIclassifier"
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
7
  model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
8
+ analyzer = pipeline("text-classification", model=model, tokenizer=tokenizer, return_all_scores=True)
9
 
10
  # === 2️⃣ Интервьюер (Phi-3-mini) ===
11
  q_gen = pipeline(
12
  "text-generation",
13
+ model="microsoft/Phi-3-mini-4k-instruct",
14
  temperature=0.6,
15
  top_p=0.9,
16
  max_new_tokens=60
17
  )
18
 
19
+ # === 3️⃣ Функция для одного шага интервью ===
20
+ def mbti_interview(user_input, history):
21
+ """
22
+ history список [(вопрос, ответ), ...]
23
+ """
24
+ # инициализация
25
+ if history is None:
26
+ history = []
27
  if not user_input.strip():
28
+ return history, "⚠️ Введите ответ.", "Please describe yourself.", f"{len(history)}/30"
29
 
30
+ # --- анализ ---
31
+ results = analyzer(user_input)[0]
32
  results = sorted(results, key=lambda x: x["score"], reverse=True)
33
  result_text = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in results[:3]])
34
 
35
+ # --- обновляем историю ---
36
+ last_q = history[-1][0] if history else "Initial question"
37
+ history.append((last_q, user_input))
38
+ progress = f"{len(history)}/30"
39
+
40
+ # --- контекст последних 5 пар ---
41
+ convo = "\n".join([f"Q: {q}\nA: {a}" for q, a in history[-5:]])
42
 
43
+ # --- запрос к модели интервьюера ---
 
44
  prompt = (
45
+ "You are an HR interviewer conducting an MBTI personality assessment. "
46
+ "Based on the following recent dialogue, generate ONE new, natural, short question. "
47
+ "Avoid repeating any previous questions.\n\n"
48
+ f"{convo}\n\nNext question:"
49
  )
50
 
51
  try:
52
+ raw = q_gen(prompt)[0]["generated_text"]
53
+ next_q = raw.split("?")[0].split("\n")[-1].strip().capitalize() + "?"
 
 
54
  if len(next_q.split()) < 4 or len(next_q) > 140:
55
+ next_q = "What motivates you the most when you start something new?"
 
56
  except Exception as e:
57
  next_q = f"(⚠️ Ошибка генерации вопроса: {e})"
58
 
59
+ return history, result_text, next_q, progress
60
+
61
 
62
+ # === 4️⃣ Интерфейс Gradio ===
63
  with gr.Blocks(css="""
64
  .gradio-container {max-width: 1600px !important; margin: auto;}
65
+ #inp, #out, #question {width: 90% !important; margin: auto; font-size: 16px;}
66
+ #progress {text-align: center; font-weight: bold; color: #4CAF50; font-size: 18px;}
 
 
 
 
 
 
 
 
 
 
67
  textarea {height: 100px !important;}
68
  """) as demo:
69
+ gr.Markdown("## 🧠 Adaptive MBTI Classifier\nDynamic conversation with context memory")
70
+
71
+ state = gr.State([]) # сохраняет [(вопрос, ответ)] между итерациями
72
 
73
  question = gr.Textbox(
74
  label="Вопрос",
 
81
  progress = gr.Markdown("**0/30**", elem_id="progress")
82
 
83
  btn = gr.Button("Ответить")
84
+ btn.click(fn=mbti_interview,
85
+ inputs=[inp, state],
86
+ outputs=[state, out, question, progress])
87
 
88
  demo.launch()