mrnoobpm commited on
Commit
f0b3ced
·
verified ·
1 Parent(s): a51a5e4

Update App.py

Browse files
Files changed (1) hide show
  1. App.py +142 -20
App.py CHANGED
@@ -1,27 +1,149 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
 
4
- # Türkçe model
5
- tokenizer = AutoTokenizer.from_pretrained("ytu-ce-cosmos/turkish-gpt2")
6
- model = AutoModelForCausalLM.from_pretrained("ytu-ce-cosmos/turkish-gpt2")
7
 
8
- def dusun(prompt, max_length=150):
9
- inputs = tokenizer(prompt, return_tensors="pt")
10
- outputs = model.generate(**inputs, max_length=max_length, do_sample=True, temperature=0.8)
11
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
12
 
13
- demo = gr.Interface(
14
- fn=dusun,
15
- inputs=[
16
- gr.Textbox(label="Türkçe Prompt", lines=3),
17
- gr.Slider(50, 200, value=100, label="Uzunluk")
18
- ],
19
- outputs=gr.Textbox(label="Üretilen Metin", lines=10),
20
- title="🇹🇷 Türkçe GPT-2",
21
- examples=[
22
- ["Bir varmış bir yokmuş"],
23
- ["Yapay zeka geleceği"],
24
- ]
25
  )
26
 
27
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
 
5
+ print("🔥 Modeller yükleniyor...")
 
 
6
 
7
+ # DİL MODELİ (Türkçe)
8
+ print("📝 Türkçe model yükleniyor...")
9
+ lang_tokenizer = AutoTokenizer.from_pretrained("malhajar/llama-3.2-3B-Turkish")
10
+ lang_model = AutoModelForCausalLM.from_pretrained(
11
+ "malhajar/llama-3.2-3B-Turkish",
12
+ torch_dtype=torch.float16,
13
+ device_map="auto"
14
+ )
15
 
16
+ # MATEMATİK MODELİ
17
+ print("🧮 Matematik model yükleniyor...")
18
+ math_tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-math-7b-instruct")
19
+ math_model = AutoModelForCausalLM.from_pretrained(
20
+ "deepseek-ai/deepseek-math-7b-instruct",
21
+ torch_dtype=torch.float16,
22
+ device_map="auto"
 
 
 
 
 
23
  )
24
 
25
+ print("✅ Tüm modeller hazır!")
26
+
27
+ def detect_math(prompt):
28
+ """Matematik prompt mu kontrol et"""
29
+ math_keywords = [
30
+ "çarp", "böl", "topla", "çıkar", "hesap", "kaç",
31
+ "toplam", "fark", "çarpım", "bölüm", "üzeri",
32
+ "=", "+", "-", "*", "/", "×", "÷", "kare", "küp"
33
+ ]
34
+ prompt_lower = prompt.lower()
35
+ return any(keyword in prompt_lower for keyword in math_keywords)
36
+
37
+ def generate_text(model, tokenizer, prompt, max_length=200):
38
+ """Genel metin üretme fonksiyonu"""
39
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
40
+ inputs = {k: v.to(model.device) for k, v in inputs.items()}
41
+
42
+ with torch.no_grad():
43
+ outputs = model.generate(
44
+ **inputs,
45
+ max_length=max_length,
46
+ do_sample=True,
47
+ temperature=0.7,
48
+ top_p=0.9,
49
+ pad_token_id=tokenizer.eos_token_id
50
+ )
51
+
52
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
53
+
54
+ def hybrid_ai(prompt, max_length, show_both):
55
+ """Ana AI fonksiyonu"""
56
+ if not prompt.strip():
57
+ return "⚠️ Lütfen bir metin girin!", ""
58
+
59
+ is_math = detect_math(prompt)
60
+
61
+ if show_both:
62
+ # Her iki modeli de göster
63
+ try:
64
+ math_result = generate_text(math_model, math_tokenizer, prompt, max_length)
65
+ lang_result = generate_text(lang_model, lang_tokenizer, prompt, max_length)
66
+
67
+ result1 = f"### 🧮 Matematik AI (DeepSeek-Math)\n\n{math_result}"
68
+ result2 = f"### 💬 Dil AI (Llama-3.2 Turkish)\n\n{lang_result}"
69
+ return result1, result2
70
+ except Exception as e:
71
+ return f"❌ Hata: {str(e)}", ""
72
+
73
+ else:
74
+ # Otomatik seçim
75
+ try:
76
+ if is_math:
77
+ result = generate_text(math_model, math_tokenizer, prompt, max_length)
78
+ return f"### 🧮 Matematik AI Seçildi (DeepSeek-Math)\n\n{result}", ""
79
+ else:
80
+ result = generate_text(lang_model, lang_tokenizer, prompt, max_length)
81
+ return f"### 💬 Dil AI Seçildi (Llama-3.2 Turkish)\n\n{result}", ""
82
+ except Exception as e:
83
+ return f"❌ Hata: {str(e)}", ""
84
+
85
+ # Gradio Arayüzü
86
+ with gr.Blocks(title="Hybrid AI: Matematik + Dil", theme=gr.themes.Soft()) as demo:
87
+ gr.Markdown("""
88
+ # 🔥 Hybrid AI: 2 Model Birleşimi
89
+
90
+ **Matematik:** DeepSeek-Math-7B
91
+ **Dil:** Llama-3.2-3B Turkish
92
+
93
+ Otomatik algılama veya her ikisini karşılaştır!
94
+ """)
95
+
96
+ with gr.Row():
97
+ with gr.Column():
98
+ prompt_input = gr.Textbox(
99
+ label="📝 Prompt",
100
+ placeholder="Matematik soru veya normal metin...",
101
+ lines=5
102
+ )
103
+
104
+ max_length = gr.Slider(
105
+ 50, 300, 150, step=10,
106
+ label="Maksimum Uzunluk"
107
+ )
108
+
109
+ show_both = gr.Checkbox(
110
+ label="🔀 Her İki Modeli de Göster (Karşılaştırma Modu)",
111
+ value=False
112
+ )
113
+
114
+ generate_btn = gr.Button("🚀 Üret", variant="primary", size="lg")
115
+
116
+ with gr.Row():
117
+ output1 = gr.Textbox(label="Sonuç 1", lines=10)
118
+ output2 = gr.Textbox(label="Sonuç 2 (Karşılaştırma Modunda)", lines=10, visible=True)
119
+
120
+ # Örnekler
121
+ with gr.Accordion("📚 Örnek Promptlar", open=False):
122
+ gr.Examples(
123
+ examples=[
124
+ ["347 çarpı 829 eşittir"],
125
+ ["Bir varmış bir yokmuş, evvel zaman içinde"],
126
+ ["Yapay zeka geleceği nasıl şekillendirir?"],
127
+ ["Bir sepette 15 elma var. 7 tanesini yedim, 12 tane daha aldım. Toplam kaç elma var?"],
128
+ ["İstanbul'un tarihi dokusu hakkında yazı yaz"]
129
+ ],
130
+ inputs=prompt_input
131
+ )
132
+
133
+ # Event
134
+ generate_btn.click(
135
+ fn=hybrid_ai,
136
+ inputs=[prompt_input, max_length, show_both],
137
+ outputs=[output1, output2]
138
+ )
139
+
140
+ gr.Markdown("""
141
+ ---
142
+ 💡 **Nasıl Çalışır:**
143
+ - ✅ Matematik kelimeleri varsa → DeepSeek-Math
144
+ - ✅ Normal metin varsa → Llama-3.2 Turkish
145
+ - 🔀 Karşılaştırma modunda her ikisi de çalışır!
146
+ """)
147
+
148
+ if __name__ == "__main__":
149
+ demo.launch()