QAway-to commited on
Commit
0611243
·
1 Parent(s): 4df42a0

Back to normal app.py v1.5

Browse files
Files changed (2) hide show
  1. app.py +22 -9
  2. core/interviewer.py +27 -8
app.py CHANGED
@@ -1,44 +1,57 @@
1
  # app.py
2
  import gradio as gr
 
 
3
  from core.utils import generate_first_question
4
  from core.mbti_analyzer import analyze_mbti
5
  from core.interviewer import generate_question, session_state
6
 
7
- def analyze_and_ask(user_text, prev_count):
 
 
 
 
 
 
 
8
  if not user_text.strip():
9
  yield "⚠️ Please enter your answer.", "", prev_count
10
  return
11
 
12
  user_id = "default_user"
13
-
14
  try:
15
  n = int(prev_count.split("/")[0]) + 1
16
  except Exception:
17
  n = 1
18
  counter = f"{n}/8"
19
 
20
- # 1️⃣ Сразу показываем “Thinking...”
21
- yield "⏳ Analyzing personality...", "💭 Interviewer is thinking...", counter
 
 
 
 
 
22
 
23
- # 2️⃣ Анализ MBTI
24
  mbti_gen = analyze_mbti(user_text)
25
  mbti_text = ""
26
  for chunk in mbti_gen:
27
  mbti_text = chunk
28
- yield mbti_text, "💭 Interviewer is thinking...", counter
29
 
30
- # 3️⃣ Генерация вопроса
31
  question = generate_question(user_id)
32
 
33
  if question.startswith("✅ All"):
34
  yield f"{mbti_text}\n\nSession complete.", "🎯 All MBTI axes covered.", "8/8"
35
  return
36
 
37
- # 4️⃣ Финальный вывод
38
  yield mbti_text, question, counter
39
 
40
  # --------------------------------------------------------------
41
- # Интерфейс Gradio
42
  # --------------------------------------------------------------
43
  with gr.Blocks(theme=gr.themes.Soft(), title="MBTI Personality Interviewer") as demo:
44
  gr.Markdown(
 
1
  # app.py
2
  import gradio as gr
3
+ import asyncio
4
+ from itertools import cycle
5
  from core.utils import generate_first_question
6
  from core.mbti_analyzer import analyze_mbti
7
  from core.interviewer import generate_question, session_state
8
 
9
+ async def async_loader(progress_fn):
10
+ """Асинхронный loader-аниматор (вращающиеся точки)."""
11
+ frames = cycle(["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"])
12
+ for _ in range(10):
13
+ await asyncio.sleep(0.2)
14
+ progress_fn(next(frames))
15
+
16
+ def analyze_and_ask(user_text, prev_count, progress=gr.Progress(track_tqdm=True)):
17
  if not user_text.strip():
18
  yield "⚠️ Please enter your answer.", "", prev_count
19
  return
20
 
21
  user_id = "default_user"
 
22
  try:
23
  n = int(prev_count.split("/")[0]) + 1
24
  except Exception:
25
  n = 1
26
  counter = f"{n}/8"
27
 
28
+ # 1️⃣ Первое сообщение — мгновенно
29
+ yield "⏳ Analyzing personality...", "💭 Interviewer is thinking...", counter
30
+
31
+ # 2️⃣ Анимация лоадера в фоне
32
+ loop = asyncio.new_event_loop()
33
+ asyncio.set_event_loop(loop)
34
+ loop.create_task(async_loader(lambda f: None))
35
 
36
+ # 3️⃣ Анализ MBTI
37
  mbti_gen = analyze_mbti(user_text)
38
  mbti_text = ""
39
  for chunk in mbti_gen:
40
  mbti_text = chunk
41
+ yield mbti_text, "💭 Interviewer is thinking...", counter
42
 
43
+ # 4️⃣ Генерация вопроса
44
  question = generate_question(user_id)
45
 
46
  if question.startswith("✅ All"):
47
  yield f"{mbti_text}\n\nSession complete.", "🎯 All MBTI axes covered.", "8/8"
48
  return
49
 
50
+ # 5️⃣ Финальный вывод
51
  yield mbti_text, question, counter
52
 
53
  # --------------------------------------------------------------
54
+ # UI
55
  # --------------------------------------------------------------
56
  with gr.Blocks(theme=gr.themes.Soft(), title="MBTI Personality Interviewer") as demo:
57
  gr.Markdown(
core/interviewer.py CHANGED
@@ -1,5 +1,6 @@
1
  # core/interviewer.py
2
  import random
 
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
 
5
  INTERVIEWER_MODEL = "f3nsmart/TinyLlama-MBTI-Interviewer-LoRA"
@@ -13,7 +14,7 @@ llm_pipe = pipeline(
13
  "text-generation",
14
  model=model,
15
  tokenizer=tokenizer,
16
- max_new_tokens=60,
17
  temperature=0.6,
18
  top_p=0.9,
19
  )
@@ -40,14 +41,34 @@ def select_next_category(user_id: str):
40
  return next_cat
41
 
42
  def build_prompt(category: str):
 
43
  return (
44
- f"Ask one open-ended question about {category}. "
45
- f"Do not repeat or rephrase previous questions. "
46
- f"Output only the question itself."
 
47
  )
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  def generate_question(user_id: str) -> str:
50
- """Возвращает только финальный вопрос."""
51
  if user_id not in session_state:
52
  init_session(user_id)
53
 
@@ -57,7 +78,5 @@ def generate_question(user_id: str) -> str:
57
 
58
  prompt = build_prompt(category)
59
  raw = llm_pipe(prompt)[0]["generated_text"]
60
- question = raw.strip().split("\n")[0]
61
- if "?" not in question:
62
- question += "?"
63
  return f"({category}) {question}"
 
1
  # core/interviewer.py
2
  import random
3
+ import itertools
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
 
6
  INTERVIEWER_MODEL = "f3nsmart/TinyLlama-MBTI-Interviewer-LoRA"
 
14
  "text-generation",
15
  model=model,
16
  tokenizer=tokenizer,
17
+ max_new_tokens=70,
18
  temperature=0.6,
19
  top_p=0.9,
20
  )
 
41
  return next_cat
42
 
43
  def build_prompt(category: str):
44
+ # ✅ Новый, более "демонстративный" промпт:
45
  return (
46
+ f"You are a friendly MBTI interviewer.\n"
47
+ f"Ask one short, open-ended question that explores {category.lower()}.\n"
48
+ f"Examples: 'What makes you feel most energized in social situations?'\n"
49
+ f"Output only the question, without quotes, without explanations."
50
  )
51
 
52
+ def clean_question(text: str) -> str:
53
+ """Удаляет инструкции и оставляет только вопрос."""
54
+ text = text.strip()
55
+
56
+ # убираем строки с 'ask', 'instruction' и т.п.
57
+ bad_phrases = ["ask", "instruction", "output only", "question about", "you are"]
58
+ for phrase in bad_phrases:
59
+ if phrase.lower() in text.lower():
60
+ # берём только часть после последнего примера знака '?'
61
+ if '?' in text:
62
+ text = text.split('?')[-1]
63
+ else:
64
+ text = text.replace(phrase, '')
65
+ text = text.strip().strip('"').strip("'")
66
+ if not text.endswith("?"):
67
+ text += "?"
68
+ return text
69
+
70
  def generate_question(user_id: str) -> str:
71
+ """Генерация нового вопроса по категории."""
72
  if user_id not in session_state:
73
  init_session(user_id)
74
 
 
78
 
79
  prompt = build_prompt(category)
80
  raw = llm_pipe(prompt)[0]["generated_text"]
81
+ question = clean_question(raw)
 
 
82
  return f"({category}) {question}"