QAway-to commited on
Commit
3dff80d
·
1 Parent(s): a9ecae7

New model TinyLlama/TinyLlama-1.1B-Chat-v1.0. v1.1

Browse files
Files changed (1) hide show
  1. app.py +56 -21
app.py CHANGED
@@ -1,51 +1,82 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSequenceClassification
 
 
 
 
 
3
 
4
  # =========================================================
5
- # 1️⃣ Настройка моделей
6
  # =========================================================
7
-
8
- # Основная MBTI-модель (твоя fine-tuned)
9
  MBTI_MODEL = "f3nsmart/MBTIclassifier"
10
-
11
- # Лёгкая LLM-модель для генерации вопросов
12
  INTERVIEWER_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
13
 
14
- # Загружаем MBTI-классификатор
15
  mbti_tok = AutoTokenizer.from_pretrained(MBTI_MODEL)
16
  mbti_model = AutoModelForSequenceClassification.from_pretrained(MBTI_MODEL)
17
  mbti_pipe = pipeline("text-classification", model=mbti_model, tokenizer=mbti_tok, return_all_scores=True)
18
 
19
- # Загружаем интервьюера
20
  interviewer_tok = AutoTokenizer.from_pretrained(INTERVIEWER_MODEL)
21
  interviewer_model = AutoModelForCausalLM.from_pretrained(INTERVIEWER_MODEL)
22
- interviewer_pipe = pipeline("text-generation", model=interviewer_model, tokenizer=interviewer_tok, max_new_tokens=60)
 
 
 
 
 
 
 
23
 
24
  # =========================================================
25
- # 2️⃣ Логика
26
  # =========================================================
27
-
28
- def analyze_and_ask(user_text, question_count=1):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  if not user_text.strip():
30
- return "⚠️ Введите ответ.", "", "0/30"
 
 
 
 
 
 
 
31
 
32
  # Анализ MBTI
33
  res = mbti_pipe(user_text)[0]
34
  res_sorted = sorted(res, key=lambda x: x["score"], reverse=True)
35
  mbti_text = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in res_sorted[:3]])
36
 
37
- # Генерация нового вопроса
38
- prompt = f"You are a professional interviewer for MBTI testing. Generate one short, open-ended, natural question based on this answer: '{user_text}'. Avoid yes/no questions."
39
  gen = interviewer_pipe(prompt)[0]["generated_text"]
40
- gen = gen.split("question:")[-1].strip() if "question:" in gen.lower() else gen.strip()
41
 
42
- # Счётчик
43
- counter = f"{question_count}/30"
44
- return mbti_text, gen, counter
45
 
46
 
47
  # =========================================================
48
- # 3️⃣ Интерфейс Gradio
49
  # =========================================================
50
  with gr.Blocks(theme=gr.themes.Soft(), title="Adaptive MBTI Interviewer") as demo:
51
  gr.Markdown("## 🧠 Adaptive MBTI Interviewer\nОпредели личностный тип и получи следующий вопрос от интервьюера.")
@@ -60,10 +91,14 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Adaptive MBTI Interviewer") as dem
60
  question_out = gr.Textbox(label="💬 Следующий вопрос от интервьюера", lines=3)
61
  counter = gr.Textbox(label="Прогресс", value="0/30")
62
 
 
 
 
 
63
  btn.click(fn=analyze_and_ask, inputs=[inp, counter], outputs=[mbti_out, question_out, counter])
64
 
65
  # =========================================================
66
- # 4️⃣ Запуск
67
  # =========================================================
68
  if __name__ == "__main__":
69
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import (
3
+ AutoTokenizer,
4
+ AutoModelForCausalLM,
5
+ AutoModelForSequenceClassification,
6
+ pipeline
7
+ )
8
 
9
  # =========================================================
10
+ # 1️⃣ Настройки моделей
11
  # =========================================================
 
 
12
  MBTI_MODEL = "f3nsmart/MBTIclassifier"
 
 
13
  INTERVIEWER_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
14
 
 
15
  mbti_tok = AutoTokenizer.from_pretrained(MBTI_MODEL)
16
  mbti_model = AutoModelForSequenceClassification.from_pretrained(MBTI_MODEL)
17
  mbti_pipe = pipeline("text-classification", model=mbti_model, tokenizer=mbti_tok, return_all_scores=True)
18
 
 
19
  interviewer_tok = AutoTokenizer.from_pretrained(INTERVIEWER_MODEL)
20
  interviewer_model = AutoModelForCausalLM.from_pretrained(INTERVIEWER_MODEL)
21
+ interviewer_pipe = pipeline(
22
+ "text-generation",
23
+ model=interviewer_model,
24
+ tokenizer=interviewer_tok,
25
+ max_new_tokens=60,
26
+ temperature=0.8,
27
+ top_p=0.9
28
+ )
29
 
30
  # =========================================================
31
+ # 2️⃣ Функции
32
  # =========================================================
33
+ def generate_first_question():
34
+ """Первый вопрос при запуске"""
35
+ prompt = "Generate one short, open-ended question starting with 'how', 'what', 'why', or 'when' to start a personality interview."
36
+ q = interviewer_pipe(prompt)[0]["generated_text"]
37
+ q = clean_question(q)
38
+ return q or "How do you usually spend your free time?"
39
+
40
+
41
+ def clean_question(text):
42
+ """Удаляем мусор из вывода модели"""
43
+ text = text.strip()
44
+ for bad in ["You are", "instruction", "Generate", "MBTI", "task", "assistant:"]:
45
+ if bad.lower() in text.lower():
46
+ text = text.split(bad, 1)[-1]
47
+ text = text.replace(":", "").replace("'", "").strip()
48
+ if len(text.split()) < 3:
49
+ return None
50
+ return text
51
+
52
+
53
+ def analyze_and_ask(user_text, prev_count):
54
+ """Основной цикл: анализ + новый вопрос"""
55
  if not user_text.strip():
56
+ return "⚠️ Введите ответ.", "", prev_count
57
+
58
+ # Счётчик
59
+ try:
60
+ n = int(prev_count.split("/")[0]) + 1
61
+ except Exception:
62
+ n = 1
63
+ counter = f"{n}/30"
64
 
65
  # Анализ MBTI
66
  res = mbti_pipe(user_text)[0]
67
  res_sorted = sorted(res, key=lambda x: x["score"], reverse=True)
68
  mbti_text = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in res_sorted[:3]])
69
 
70
+ # Генерация вопроса
71
+ prompt = f"Ask one new open-ended interview question based on this user answer: '{user_text}'. Avoid yes/no questions."
72
  gen = interviewer_pipe(prompt)[0]["generated_text"]
73
+ question = clean_question(gen) or "Can you tell me more about your daily habits?"
74
 
75
+ return mbti_text, question, counter
 
 
76
 
77
 
78
  # =========================================================
79
+ # 3️⃣ Интерфейс Gradio
80
  # =========================================================
81
  with gr.Blocks(theme=gr.themes.Soft(), title="Adaptive MBTI Interviewer") as demo:
82
  gr.Markdown("## 🧠 Adaptive MBTI Interviewer\nОпредели личностный тип и получи следующий вопрос от интервьюера.")
 
91
  question_out = gr.Textbox(label="💬 Следующий вопрос от интервьюера", lines=3)
92
  counter = gr.Textbox(label="Прогресс", value="0/30")
93
 
94
+ # Первый вопрос при запуске
95
+ demo.load(fn=generate_first_question, inputs=None, outputs=question_out)
96
+
97
+ # Кнопка анализа и генерации
98
  btn.click(fn=analyze_and_ask, inputs=[inp, counter], outputs=[mbti_out, question_out, counter])
99
 
100
  # =========================================================
101
+ # 4️⃣ Запуск
102
  # =========================================================
103
  if __name__ == "__main__":
104
  demo.launch()