File size: 8,456 Bytes
9d3a830
86c0d78
9d3a830
bbd8ccb
 
6a748ed
efbfdc1
bbd8ccb
6edd612
 
efbfdc1
bbd8ccb
 
 
 
 
 
 
efbfdc1
bbd8ccb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
efbfdc1
 
 
 
 
 
 
 
 
 
 
6a748ed
 
efbfdc1
 
66c614b
 
efbfdc1
 
 
 
9d3a830
 
 
efbfdc1
9d3a830
efbfdc1
 
bbd8ccb
 
 
 
 
 
 
 
 
 
 
efbfdc1
 
 
 
 
 
 
 
 
 
 
bbd8ccb
 
 
 
 
 
 
 
 
 
 
efbfdc1
 
 
 
bbd8ccb
efbfdc1
 
 
 
 
 
 
 
 
 
 
9d3a830
 
 
bbd8ccb
 
 
 
 
 
 
9d3a830
bbd8ccb
 
 
 
 
 
efbfdc1
 
 
9d3a830
bbd8ccb
 
 
 
efbfdc1
 
bbd8ccb
efbfdc1
bbd8ccb
9d3a830
 
efbfdc1
bbd8ccb
 
 
 
efbfdc1
 
 
bbd8ccb
 
 
efbfdc1
bbd8ccb
 
 
 
 
 
 
 
 
 
 
 
 
 
efbfdc1
 
bbd8ccb
 
efbfdc1
bbd8ccb
 
 
efbfdc1
bbd8ccb
efbfdc1
bbd8ccb
 
efbfdc1
bbd8ccb
efbfdc1
bbd8ccb
efbfdc1
 
 
 
 
 
bbd8ccb
9d3a830
 
bbd8ccb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
import gradio as gr
import os
from huggingface_hub import InferenceClient
from datasets import load_dataset
import random
import re

# Load math datasets for sample problems
fw = load_dataset("HuggingFaceFW/fineweb-edu", name="sample-10BT", split="train", streaming=True)
ds = load_dataset("HuggingFaceH4/ultrachat_200k", streaming=True)

def load_sample_problems():
    """Load sample problems from math datasets"""
    try:
        gsm8k = load_dataset("openai/gsm8k", "main", streaming=True)
        samples = []
        for i, item in enumerate(gsm8k["train"]):
            samples.append(item["question"])
            if i >= 50:
                break
        return samples
    except:
        return [
            "What is the derivative of f(x) = 3x² + 2x - 1?",
            "A triangle has sides of length 5, 12, and 13. What is its area?",
            "If log₂(x) + log₂(x+6) = 4, find the value of x.",
            "Find the limit: lim(x→0) (sin(x)/x)",
            "Solve the system: x + 2y = 7, 3x - y = 4"
        ]

math_samples = load_sample_problems()

def create_math_system_message():
    """Create specialized system prompt for mathematics"""
    return """You are Mathetics AI, an advanced mathematics tutor and problem solver. 

🧮 **Your Expertise:**
- Step-by-step problem solving with clear explanations
- Multiple solution approaches when applicable
- Proper mathematical notation and terminology
- Verification of answers through different methods

📐 **Problem Domains:**
- Arithmetic, Algebra, and Number Theory
- Geometry, Trigonometry, and Coordinate Geometry
- Calculus (Limits, Derivatives, Integrals)
- Statistics, Probability, and Data Analysis
- Competition Mathematics (AMC, AIME level)

💡 **Teaching Style:**
1. **Understand the Problem** - Identify what's being asked
2. **Plan the Solution** - Choose the appropriate method
3. **Execute Step-by-Step** - Show all work clearly
4. **Verify the Answer** - Check if the result makes sense
5. **Alternative Methods** - Mention other possible approaches

Always be precise, educational, and encourage mathematical thinking."""

def render_latex(text):
    """Format math expressions for LaTeX rendering"""
    if not text:  # Safety check
        return text
    # Wrap inline math: x^2 → $x^2$
    text = re.sub(r'\b([a-zA-Z]+)\^?(\d*)\b', r'$\1^{\2}$', text)
    # Wrap functions: sin(x) → $\sin(x)$
    text = re.sub(r'([a-zA-Z]+)\(([^)]+)\)', r'$\1(\2)$', text)
    # Wrap limits: lim(x→0) → $\lim_{x\to0}$
    text = re.sub(r'lim\(([a-z])→([0-9.]+)\)', r'$\lim_{ \1 \to \2}$', text)
    return text

def respond(message, history, system_message, max_tokens, temperature, top_p):
    # Show initial thinking
    yield "🤔 Thinking for the suitable answer..."
    
    # Use Qwen Math model
    client = InferenceClient(model="Qwen/Qwen2.5-Math-7B-Instruct")
    
    # Build messages
    messages = [{"role": "system", "content": system_message}]
    messages.extend(history)
    messages.append({"role": "user", "content": message})
    
    response = ""
    max_tokens = max(max_tokens, 1536)  # Ensure no truncation
    
    try:
        for message_chunk in client.chat_completion(
            messages,
            max_tokens=max_tokens,
            stream=True,
            temperature=temperature,
            top_p=top_p,
        ):
            choices = message_chunk.choices
            if len(choices) and choices[0].delta.content:
                token = choices[0].delta.content
                response += token
                # Stream raw during generation, format at end
                if "```" in response or len(response) > 500:  # Format after substantial response
                    formatted = render_latex(response)
                    yield formatted
                else:
                    yield response
                    
        # Final formatted response
        final_formatted = render_latex(response)
        yield final_formatted
            
    except Exception as e:
        error_msg = f"❌ **Error**: {str(e)}\n\n💡 **Troubleshooting**:\n- Make sure you're logged in with Hugging Face\n- Check if the model is accessible\n- Try a simpler problem first"
        yield error_msg

def get_random_sample():
    """Get a random sample problem"""
    if math_samples:
        return random.choice(math_samples)
    return "Solve for x: 2x² + 5x - 3 = 0"

def insert_sample_to_chat(difficulty):
    """Insert random sample into chat input"""
    sample = get_random_sample()
    return "", [None, sample]  # Clear input, add sample to chat

def show_help():
    return """**Math Help Tips:**
        
    1. **Be Specific**: "Find the derivative of..." instead of "Help with calculus"
    2. **Show Your Work**: "I got x=5, is this correct?"
    3. **Ask for Steps**: "Show me step-by-step how to solve..."
    4. **Request Verification**: "Check my solution to this problem"
    5. **Alternative Methods**: "What's another way to solve this?"
    """

# Create ChatInterface
chatbot = gr.ChatInterface(
    respond,
    type="messages",
    title="🧮 **Mathetics AI** - Advanced Mathematics Solver",
    description="""
    **Powered by Qwen 2.5-Math** | **Specialized for Mathematical Problem Solving**
    
    ✨ **Capabilities**: Algebra • Geometry • Calculus • Statistics • Competition Math
    📚 **Features**: Step-by-step solutions • Multiple approaches • Clear explanations
    """,
    additional_inputs=[
        gr.Textbox(
            value=create_math_system_message(), 
            label="🧠 System Message (Math Tutor Personality)",
            lines=3,
            max_lines=10
        ),
        gr.Slider(minimum=256, maximum=2048, value=768, step=64, label="📝 Max Tokens"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.3, step=0.1, label="🎯 Temperature"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.85, step=0.05, label="🔍 Top-p"),
    ],
    examples=[
        ["What is the derivative of f(x) = 3x² + 2x - 1?"],
        ["A triangle has sides of length 5, 12, and 13. What is its area?"],
        ["Find the limit: lim(x→0) (sin(x)/x)"],
        ["Solve the system: x + 2y = 7, 3x - y = 4"],
        ["What is the integral of ∫(2x³ - 5x + 3)dx?"]
    ],
    cache_examples=False,
    concurrency_limit=10,
)

# Main interface
with gr.Blocks(
    title="🧮 Mathetics AI", 
    theme=gr.themes.Soft(),
    css="""
    .math-highlight { background-color: #f0f8ff; padding: 10px; border-left: 4px solid #4CAF50; margin: 10px 0; border-radius: 5px; }
    .difficulty-selector { background-color: #fff3e0; padding: 15px; border-radius: 10px; margin: 10px 0; }
    .markdown math { font-size: 1.1em; }
    """
) as demo:
    
    gr.Markdown("# 🧮 **Mathematics AI** - Advanced Mathematics Solver\n**Your Personal AI Math Tutor**")
    
    with gr.Row():
        with gr.Column(scale=4):
            chatbot.render()
            
        with gr.Column(scale=1):
            with gr.Accordion("🎲 **Quick Actions**", open=True):
                difficulty_preset = gr.Dropdown(
                    choices=["Elementary", "High School", "College", "Competition"],
                    value="High School",
                    label="🎯 Problem Difficulty",
                    elem_classes=["difficulty-selector"]
                )
                
                sample_btn = gr.Button("🎯 Get Sample Problem", variant="secondary")
                help_btn = gr.Button("❓ Math Help Tips", variant="secondary")
                
                gr.Markdown("### 🔧 **Quick Tools**")
                gr.Markdown("- **Algebra**: Equations, inequalities, factoring\n- **Geometry**: Area, volume, trigonometry\n- **Calculus**: Derivatives, integrals, limits\n- **Statistics**: Probability, distributions\n- **Number Theory**: Prime factorization, GCD/LCM")
    
    gr.Markdown("""
    ---
    **🔧 Technical Details:** Qwen/Qwen2.5-Math-7B-Instruct | Real-time streaming responses
    
    **💡 Usage Tips:** Be specific • Request step-by-step • Ask for verification
    """)
    
    # Event handlers
    sample_btn.click(
        insert_sample_to_chat,
        inputs=[difficulty_preset],
        outputs=[chatbot]  # Actually feeds into chat now!
    )
    
    help_btn.click(
        show_help,
        outputs=gr.Markdown()  # Could show in a separate output
    )

if __name__ == "__main__":
    demo.launch()