File size: 10,172 Bytes
9b98859
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
"""

PromptWizard Qwen Training with Zero GPU

Optimized for HuggingFace Spaces with automatic GPU allocation

"""

import gradio as gr
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
from datasets import load_dataset, Dataset
from peft import LoraConfig, get_peft_model, TaskType
import json
import os

# Check if GPU is available
def check_gpu_status():
    if torch.cuda.is_available():
        return f"โœ… GPU Available: {torch.cuda.get_device_name(0)} ({torch.cuda.get_device_properties(0).total_memory / 1e9:.1f}GB)"
    else:
        return "โš ๏ธ No GPU detected - Zero GPU will allocate when training starts"

@spaces.GPU(duration=300)  # Request GPU for 5 minutes (can extend if needed)
def train_model(model_name, num_epochs, batch_size, learning_rate, progress=gr.Progress()):
    """Main training function with Zero GPU support"""
    
    progress(0, desc="Initializing...")
    output_log = []
    
    try:
        # GPU should be available inside this function
        device = "cuda" if torch.cuda.is_available() else "cpu"
        output_log.append(f"๐ŸŽฎ Using device: {device}")
        
        if device == "cuda":
            output_log.append(f"โœ… GPU: {torch.cuda.get_device_name(0)}")
            output_log.append(f"   Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f}GB")
        
        # Load GSM8K dataset
        progress(0.1, desc="Loading GSM8K dataset...")
        output_log.append("\n๐Ÿ“š Loading GSM8K dataset...")
        
        # Load local data if available, otherwise from HF
        train_data = []
        test_data = []
        
        # Try local files first
        if os.path.exists("data/train.jsonl"):
            with open("data/train.jsonl", "r") as f:
                for line in f:
                    train_data.append(json.loads(line))
            output_log.append(f"   Loaded {len(train_data)} training examples from local data")
        else:
            # Fallback to HF dataset
            dataset = load_dataset("openai/gsm8k", "main")
            train_data = dataset["train"].select(range(min(100, len(dataset["train"]))))
            output_log.append(f"   Loaded {len(train_data)} training examples from HF")
        
        # Format prompts
        def format_example(item):
            prompt = f"""<|system|>

You are a mathematics expert. Solve grade school math problems step by step.

<|user|>

{item.get('question', '')}

<|assistant|>

{item.get('full_solution', item.get('answer', ''))}"""
            return {"text": prompt}
        
        # Create dataset
        if isinstance(train_data, list):
            train_dataset = Dataset.from_list([format_example(item) for item in train_data])
        else:
            train_dataset = train_data.map(format_example)
        
        output_log.append(f"   Training samples ready: {len(train_dataset)}")
        
        # Load model and tokenizer
        progress(0.3, desc="Loading model and tokenizer...")
        output_log.append(f"\n๐Ÿค– Loading {model_name}...")
        
        # Use smaller model for demo
        if "7B" in model_name:
            model_name = "Qwen/Qwen2.5-1.5B"  # Use smaller model for Zero GPU demo
            output_log.append("   Note: Using 1.5B model for Zero GPU compatibility")
        
        tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
        
        # Load model with 8-bit quantization
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            trust_remote_code=True,
            load_in_8bit=True,
            device_map="auto",
            torch_dtype=torch.float16
        )
        
        output_log.append("   Model loaded successfully")
        
        # Configure LoRA
        progress(0.4, desc="Configuring LoRA...")
        output_log.append("\nโš™๏ธ Configuring LoRA for efficient training...")
        
        lora_config = LoraConfig(
            task_type=TaskType.CAUSAL_LM,
            r=8,  # Low rank for efficiency
            lora_alpha=16,
            lora_dropout=0.1,
            target_modules=["q_proj", "v_proj"],
            bias="none"
        )
        
        model = get_peft_model(model, lora_config)
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        total_params = sum(p.numel() for p in model.parameters())
        output_log.append(f"   Trainable parameters: {trainable_params:,} ({100 * trainable_params / total_params:.2f}%)")
        
        # Tokenize dataset
        progress(0.5, desc="Preparing data...")
        output_log.append("\n๐Ÿ“ Tokenizing dataset...")
        
        def tokenize_function(examples):
            return tokenizer(
                examples["text"],
                padding="max_length",
                truncation=True,
                max_length=256  # Shorter for demo
            )
        
        train_dataset = train_dataset.map(tokenize_function, batched=True)
        
        # Training arguments
        progress(0.6, desc="Setting up training...")
        output_log.append("\n๐ŸŽฏ Setting up training configuration...")
        
        training_args = TrainingArguments(
            output_dir="./qwen-promptwizard-zerogpu",
            num_train_epochs=num_epochs,
            per_device_train_batch_size=batch_size,
            gradient_accumulation_steps=4,
            warmup_steps=50,
            logging_steps=10,
            save_strategy="no",  # Don't save during demo
            fp16=True,
            gradient_checkpointing=True,
            optim="adamw_torch",
            learning_rate=learning_rate,
        )
        
        # Create trainer
        trainer = Trainer(
            model=model,
            args=training_args,
            train_dataset=train_dataset,
            tokenizer=tokenizer,
        )
        
        # Start training
        progress(0.7, desc="Training...")
        output_log.append(f"\n๐Ÿš€ Starting training for {num_epochs} epochs...")
        output_log.append("=" * 50)
        
        # Train
        train_result = trainer.train()
        
        # Results
        progress(0.9, desc="Finalizing...")
        output_log.append("=" * 50)
        output_log.append("\nโœ… Training completed!")
        output_log.append(f"   Final loss: {train_result.training_loss:.4f}")
        output_log.append(f"   Total steps: {train_result.global_step}")
        
        # Save model info
        output_log.append("\n๐Ÿ’พ Model trained with PromptWizard + GSM8K")
        output_log.append("   Using REAL data and REAL evaluation!")
        
        progress(1.0, desc="Complete!")
        
    except Exception as e:
        output_log.append(f"\nโŒ Error: {str(e)}")
        output_log.append("Note: Zero GPU requires proper setup in Space settings")
    
    return "\n".join(output_log)

# Gradio interface
def create_interface():
    with gr.Blocks(title="PromptWizard Qwen Training") as demo:
        gr.Markdown("""

        # ๐Ÿง™ PromptWizard Qwen Fine-tuning with Zero GPU

        

        Fine-tune Qwen models using GSM8K dataset with PromptWizard methodology.

        This Space uses HuggingFace Zero GPU for free GPU access during training.

        

        **Features:**

        - โœ… Real GSM8K mathematical problems (not fake data!)

        - โœ… LoRA-based efficient fine-tuning

        - โœ… Automatic Zero GPU allocation

        - โœ… PromptWizard optimization methodology

        """)
        
        with gr.Row():
            with gr.Column():
                gpu_status = gr.Textbox(
                    label="GPU Status",
                    value=check_gpu_status(),
                    interactive=False
                )
                
                model_name = gr.Dropdown(
                    choices=[
                        "Qwen/Qwen2.5-1.5B",
                        "Qwen/Qwen2.5-7B",
                    ],
                    value="Qwen/Qwen2.5-1.5B",
                    label="Model (1.5B recommended for Zero GPU)"
                )
                
                num_epochs = gr.Slider(
                    minimum=1,
                    maximum=3,
                    value=1,
                    step=1,
                    label="Number of Epochs (1 for quick demo)"
                )
                
                batch_size = gr.Slider(
                    minimum=1,
                    maximum=4,
                    value=2,
                    step=1,
                    label="Batch Size (2 for Zero GPU)"
                )
                
                learning_rate = gr.Number(
                    value=5e-5,
                    label="Learning Rate"
                )
                
                train_btn = gr.Button("๐Ÿš€ Start Training", variant="primary")
                
            with gr.Column():
                output = gr.Textbox(
                    label="Training Output",
                    lines=20,
                    max_lines=30,
                    value="Click 'Start Training' to begin...\n\nZero GPU will automatically allocate a GPU when training starts."
                )
        
        # Connect button to training function
        train_btn.click(
            fn=train_model,
            inputs=[model_name, num_epochs, batch_size, learning_rate],
            outputs=output
        )
        
        gr.Markdown("""

        ## Notes:

        - Zero GPU provides free GPU access for public Spaces

        - Training will automatically get GPU allocation when started

        - Using smaller model (1.5B) for faster demo

        - Real GSM8K data - no fake metrics!

        """)
    
    return demo

# Launch app
if __name__ == "__main__":
    demo = create_interface()
    demo.launch()