QAway-to commited on
Commit
a9ecae7
·
1 Parent(s): 5de66ba

New model TinyLlama/TinyLlama-1.1B-Chat-v1.0. v1.0

Browse files
Files changed (1) hide show
  1. app.py +48 -90
app.py CHANGED
@@ -1,111 +1,69 @@
1
  import gradio as gr
2
- import concurrent.futures
3
- import time
4
- from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM, pipeline
5
 
6
  # =========================================================
7
- # 1. Настройка моделей
8
  # =========================================================
9
- MBTI_MODEL_ID = "f3nsmart/MBTIclassifier"
10
- LLM_MODEL_ID = "google/gemma-1.1-2b-it" # Быстрая, контекстная и лаконичная
11
 
12
- # Классификатор (твой fine-tuned)
13
- mbti_tokenizer = AutoTokenizer.from_pretrained(MBTI_MODEL_ID)
14
- mbti_model = AutoModelForSequenceClassification.from_pretrained(MBTI_MODEL_ID)
15
- analyzer = pipeline("text-classification", model=mbti_model, tokenizer=mbti_tokenizer, return_all_scores=True)
16
 
17
- # Интервьюер (Gemma)
18
- q_tokenizer = AutoTokenizer.from_pretrained(LLM_MODEL_ID)
19
- q_model = AutoModelForCausalLM.from_pretrained(LLM_MODEL_ID)
20
- interviewer = pipeline("text-generation", model=q_model, tokenizer=q_tokenizer)
 
 
 
 
 
 
 
 
21
 
22
  # =========================================================
23
- # 2. Основная логика
24
  # =========================================================
25
- def classify_and_ask(user_input, question_count):
26
- """
27
- Классифицирует MBTI и генерирует следующий вопрос от интервьюера.
28
- """
29
- if not user_input.strip():
30
- return "⚠️ Введите текст.", "⚠️ Вопрос не сформирован.", question_count
31
-
32
- start_time = time.perf_counter()
33
-
34
- # Асинхронное выполнение двух задач (аналитика + вопрос)
35
- with concurrent.futures.ThreadPoolExecutor() as executor:
36
- future_analysis = executor.submit(lambda: analyzer(user_input))
37
- future_question = executor.submit(lambda: interviewer(
38
- f"""You are an MBTI interviewer.
39
- The user answered: "{user_input}".
40
- Ask one short, open-ended question that helps reveal their personality type.
41
- Avoid repeating previous topics.
42
- Start directly with 'How', 'Why', 'What', or 'When'.
43
- Output only the question itself, nothing else.""",
44
- max_new_tokens=40,
45
- temperature=0.8,
46
- top_p=0.9,
47
- do_sample=True,
48
- ))
49
-
50
- analysis_result = future_analysis.result()
51
- question_result = future_question.result()
52
-
53
- elapsed = time.perf_counter() - start_time
54
- print(f"⏱ Время обработки запроса : {elapsed:.2f} сек")
55
-
56
- # Парсим результаты
57
- results = sorted(analysis_result[0], key=lambda x: x["score"], reverse=True)
58
- top = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in results[:3]])
59
-
60
- raw_q = question_result[0]["generated_text"].strip()
61
- # Убираем лишние фразы и добавляем "?" если отсутствует
62
- question = raw_q.split("\n")[-1].split(":")[-1].strip()
63
- if not question.endswith("?"):
64
- question += "?"
65
-
66
- question_count += 1
67
- progress = f"{question_count}/30"
68
-
69
- return top, question, progress
70
 
71
 
72
  # =========================================================
73
- # 3. Интерфейс Gradio
74
  # =========================================================
75
- with gr.Blocks(title="MBTI Interactive Interview (Gemma)") as demo:
76
- gr.Markdown(
77
- "## 🧠 MBTI Personality Interviewer\n"
78
- "Определи личностный тип и получи следующий вопрос от интервьюера."
79
- )
80
-
81
- question_state = gr.State(1)
82
 
83
  with gr.Row():
84
- with gr.Column(scale=1):
85
- inp = gr.Textbox(
86
- label="Введите свой ответ",
87
- placeholder="Например: I enjoy working with people and organizing events.",
88
- lines=4,
89
- )
90
  btn = gr.Button("Анализировать и задать новый вопрос", variant="primary")
91
- progress = gr.Markdown("**1/30**", elem_id="progress")
92
-
93
- with gr.Column(scale=1):
94
- out_analysis = gr.Textbox(label="📊 Анализ MBTI", lines=6)
95
- out_question = gr.Textbox(
96
- label="💬 Вопрос от интервьюера",
97
- value="How do you usually spend your free time?",
98
- lines=3,
99
- )
100
-
101
- btn.click(
102
- fn=classify_and_ask,
103
- inputs=[inp, question_state],
104
- outputs=[out_analysis, out_question, progress],
105
- )
106
 
107
  # =========================================================
108
- # 4. Запуск
109
  # =========================================================
110
  if __name__ == "__main__":
111
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSequenceClassification
 
 
3
 
4
  # =========================================================
5
+ # 1️⃣ Настройка моделей
6
  # =========================================================
 
 
7
 
8
+ # Основная MBTI-модель (твоя fine-tuned)
9
+ MBTI_MODEL = "f3nsmart/MBTIclassifier"
 
 
10
 
11
+ # Лёгкая LLM-модель для генерации вопросов
12
+ INTERVIEWER_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
13
+
14
+ # Загружаем MBTI-классификатор
15
+ mbti_tok = AutoTokenizer.from_pretrained(MBTI_MODEL)
16
+ mbti_model = AutoModelForSequenceClassification.from_pretrained(MBTI_MODEL)
17
+ mbti_pipe = pipeline("text-classification", model=mbti_model, tokenizer=mbti_tok, return_all_scores=True)
18
+
19
+ # Загружаем интервьюера
20
+ interviewer_tok = AutoTokenizer.from_pretrained(INTERVIEWER_MODEL)
21
+ interviewer_model = AutoModelForCausalLM.from_pretrained(INTERVIEWER_MODEL)
22
+ interviewer_pipe = pipeline("text-generation", model=interviewer_model, tokenizer=interviewer_tok, max_new_tokens=60)
23
 
24
  # =========================================================
25
+ # 2️⃣ Логика
26
  # =========================================================
27
+
28
+ def analyze_and_ask(user_text, question_count=1):
29
+ if not user_text.strip():
30
+ return "⚠️ Введите ответ.", "", "0/30"
31
+
32
+ # Анализ MBTI
33
+ res = mbti_pipe(user_text)[0]
34
+ res_sorted = sorted(res, key=lambda x: x["score"], reverse=True)
35
+ mbti_text = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in res_sorted[:3]])
36
+
37
+ # Генерация нового вопроса
38
+ prompt = f"You are a professional interviewer for MBTI testing. Generate one short, open-ended, natural question based on this answer: '{user_text}'. Avoid yes/no questions."
39
+ gen = interviewer_pipe(prompt)[0]["generated_text"]
40
+ gen = gen.split("question:")[-1].strip() if "question:" in gen.lower() else gen.strip()
41
+
42
+ # Счётчик
43
+ counter = f"{question_count}/30"
44
+ return mbti_text, gen, counter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
 
47
  # =========================================================
48
+ # 3️⃣ Интерфейс Gradio
49
  # =========================================================
50
+ with gr.Blocks(theme=gr.themes.Soft(), title="Adaptive MBTI Interviewer") as demo:
51
+ gr.Markdown("## 🧠 Adaptive MBTI Interviewer\nОпредели личностный тип и получи следующий вопрос от интервьюера.")
 
 
 
 
 
52
 
53
  with gr.Row():
54
+ with gr.Column(scale=2):
55
+ inp = gr.Textbox(label="Ваш ответ", placeholder="Например: I enjoy working with people and organizing events.", lines=4)
 
 
 
 
56
  btn = gr.Button("Анализировать и задать новый вопрос", variant="primary")
57
+
58
+ with gr.Column(scale=2):
59
+ mbti_out = gr.Textbox(label="📊 Анализ MBTI", lines=5)
60
+ question_out = gr.Textbox(label="💬 Следующий вопрос от интервьюера", lines=3)
61
+ counter = gr.Textbox(label="Прогресс", value="0/30")
62
+
63
+ btn.click(fn=analyze_and_ask, inputs=[inp, counter], outputs=[mbti_out, question_out, counter])
 
 
 
 
 
 
 
 
64
 
65
  # =========================================================
66
+ # 4️⃣ Запуск
67
  # =========================================================
68
  if __name__ == "__main__":
69
  demo.launch()