Spaces:
Sleeping
Sleeping
File size: 9,492 Bytes
94f255b a5f3b08 94f255b ce14469 af86d16 31a42db 88fb0c6 94f255b 31a42db a5f3b08 94f255b a5f3b08 003e6f5 8c913fc 003e6f5 b1fceea 003e6f5 b1fceea 003e6f5 b1fceea 817ccbc b1fceea 817ccbc cfbfbbe b1fceea 817ccbc b1fceea cfbfbbe b1fceea cfbfbbe a5f3b08 af86d16 94f255b a5f3b08 94f255b 8c913fc a5f3b08 ff77368 a5f3b08 4ba54ec 8c913fc 4f107f2 94f255b a5f3b08 94f255b a5f3b08 94f255b a5f3b08 94f255b a5f3b08 4f107f2 94f255b a5f3b08 94f255b a5f3b08 94f255b 4ba54ec 94f255b 4f107f2 31a42db a5f3b08 94f255b 4f107f2 94f255b a5f3b08 94f255b a5f3b08 94f255b a5f3b08 94f255b a5f3b08 94f255b a5f3b08 94f255b ae9ce4a 94f255b ae9ce4a 4f107f2 a5f3b08 94f255b a5f3b08 94f255b 8c913fc 94f255b 4f107f2 94f255b 4f107f2 94f255b af86d16 a5f3b08 bb47c06 94f255b 4f107f2 a5f3b08 003e6f5 94f255b a5f3b08 94f255b a5f3b08 4ba54ec 8c913fc a5f3b08 af86d16 a5f3b08 af86d16 a5f3b08 af86d16 94f255b a5f3b08 94f255b a5f3b08 94f255b af86d16 a5f3b08 31a42db af86d16 88fb0c6 8c913fc af86d16 94f255b a5f3b08 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
"""
PromptWizard Qwen Training โ Configurable Dataset & Repo
Fine-tunes Qwen using a user-selected dataset and uploads the trained model
to a user-specified Hugging Face Hub repo asynchronously with detailed logs.
"""
import gradio as gr
import spaces
import torch
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
Trainer,
TrainingArguments,
)
from datasets import load_dataset
from peft import LoraConfig, get_peft_model, TaskType
from huggingface_hub import HfApi, HfFolder, Repository
import os, tempfile, shutil, asyncio, threading, time
from datetime import datetime
# ==== Async upload wrapper ====
def start_async_upload(local_dir, hf_repo, output_log):
"""Starts async model upload in a background thread."""
def runner():
output_log.append(f"[INFO] ๐ Async upload thread started for repo: {hf_repo}")
asyncio.run(async_upload_model(local_dir, hf_repo, output_log))
output_log.append(f"[INFO] ๐ Async upload thread finished for repo: {hf_repo}")
threading.Thread(target=runner, daemon=True).start()
from huggingface_hub import upload_folder, HfFolder
async def async_upload_model(local_dir, hf_repo, output_log, max_retries=3):
"""
Uploads a local model directory to HF Hub asynchronously using HTTP API.
"""
try:
token = HfFolder.get_token()
output_log.append(f"[INFO] โ๏ธ Preparing to upload to repo: {hf_repo}")
attempt = 0
while attempt < max_retries:
try:
output_log.append(f"[INFO] ๐ Attempt {attempt+1} to upload folder via HTTP API...")
upload_folder(
folder_path=local_dir,
repo_id=hf_repo,
repo_type="model",
token=token,
ignore_patterns=["*.lock", "*.tmp"], # ignore temp files
create_pr=False,
)
output_log.append("[SUCCESS] โ
Model successfully uploaded to HF Hub!")
break
except Exception as e:
attempt += 1
output_log.append(f"[ERROR] Upload attempt {attempt} failed: {e}")
if attempt >= max_retries:
output_log.append("[ERROR] โ Max retries reached. Upload failed.")
else:
output_log.append("[INFO] Retrying upload in 5 seconds...")
await asyncio.sleep(5)
except Exception as e:
output_log.append(f"[ERROR] โ Unexpected error during upload: {e}")
# ==== GPU check ====
def check_gpu_status():
return "๐ Zero GPU Ready - GPU will be allocated when training starts"
# ==== Logging helper ====
def log_message(output_log, msg):
line = f"[{datetime.now().strftime('%H:%M:%S')}] {msg}"
print(line)
output_log.append(line)
# ==== Main Training ====
@spaces.GPU(duration=300)
def train_model(base_model, dataset_name, num_epochs, batch_size, learning_rate, hf_repo):
output_log = []
test_split = 0.2
try:
log_message(output_log, "๐ Initializing training sequence...")
# ===== Device =====
device = "cuda" if torch.cuda.is_available() else "cpu"
log_message(output_log, f"๐ฎ Using device: {device}")
if device == "cuda":
log_message(output_log, f"โ
GPU: {torch.cuda.get_device_name(0)}")
# ===== Load dataset =====
log_message(output_log, f"\n๐ Loading dataset: {dataset_name} ...")
dataset = load_dataset(dataset_name)
dataset = dataset["train"].train_test_split(test_size=test_split)
train_dataset = dataset["train"]
test_dataset = dataset["test"]
log_message(output_log, f" Training samples: {len(train_dataset)}")
log_message(output_log, f" Test samples: {len(test_dataset)}")
log_message(output_log, f" Columns: {train_dataset.column_names}")
# ===== Format examples =====
def format_example(item):
text = item.get("text") or item.get("content") or " ".join(str(v) for v in item.values())
prompt = f"""<|system|>
You are a wise teacher interpreting Bhagavad Gita with deep insights.
<|user|>
{text}
<|assistant|>
"""
return {"text": prompt}
train_dataset = train_dataset.map(format_example)
test_dataset = test_dataset.map(format_example)
log_message(output_log, f"โ
Formatted {len(train_dataset)} train + {len(test_dataset)} test examples")
# ===== Load model & tokenizer =====
log_message(output_log, f"\n๐ค Loading model: {base_model}")
tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
base_model,
trust_remote_code=True,
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
low_cpu_mem_usage=True,
)
if device == "cuda":
model = model.to(device)
log_message(output_log, "โ
Model and tokenizer loaded successfully")
log_message(output_log, f"Tokenizer vocab size: {tokenizer.vocab_size}")
# ===== LoRA configuration =====
log_message(output_log, "\nโ๏ธ Configuring LoRA for efficient fine-tuning...")
lora_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
r=8,
lora_alpha=16,
lora_dropout=0.1,
target_modules=["q_proj", "v_proj"],
bias="none",
)
model = get_peft_model(model, lora_config)
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
log_message(output_log, f"Trainable params after LoRA: {trainable_params:,}")
# ===== Tokenization + labels =====
def tokenize_fn(examples):
tokenized = tokenizer(
examples["text"],
padding="max_length",
truncation=True,
max_length=256,
)
tokenized["labels"] = tokenized["input_ids"].copy()
return tokenized
train_dataset = train_dataset.map(tokenize_fn, batched=True)
test_dataset = test_dataset.map(tokenize_fn, batched=True)
log_message(output_log, "โ
Tokenization + labels done")
# ===== Training arguments =====
output_dir = "./qwen-gita-lora"
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
gradient_accumulation_steps=2,
warmup_steps=10,
logging_steps=5,
save_strategy="epoch",
fp16=device == "cuda",
optim="adamw_torch",
learning_rate=learning_rate,
max_steps=100, # Limit for demo
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=test_dataset,
tokenizer=tokenizer,
)
# ===== Train =====
log_message(output_log, "\n๐ Starting training...")
trainer.train()
log_message(output_log, "\n๐พ Saving trained model locally...")
trainer.save_model(output_dir)
tokenizer.save_pretrained(output_dir)
# ===== Async upload =====
log_message(output_log, f"\nโ๏ธ Initiating async upload to {hf_repo}")
start_async_upload(output_dir, hf_repo, output_log)
log_message(output_log, "โ
Training complete & async upload started!")
except Exception as e:
log_message(output_log, f"\nโ Error during training: {e}")
return "\n".join(output_log)
# ==== Gradio Interface ====
def create_interface():
with gr.Blocks(title="PromptWizard โ Qwen Trainer") as demo:
gr.Markdown("""
# ๐ง PromptWizard Qwen Fine-tuning
Fine-tune Qwen on any dataset and upload to any Hugging Face repo.
""")
with gr.Row():
with gr.Column():
gr.Textbox(label="GPU Status", value=check_gpu_status(), interactive=False)
base_model = gr.Textbox(label="Base Model", value="Qwen/Qwen2.5-0.5B")
dataset_name = gr.Textbox(label="Dataset Name", value="rahul7star/Gita")
hf_repo = gr.Textbox(label="HF Repo for Upload", value="rahul7star/Qwen0.5-3B-Gita")
num_epochs = gr.Slider(1, 3, value=1, step=1, label="Epochs")
batch_size = gr.Slider(1, 4, value=2, step=1, label="Batch Size")
learning_rate = gr.Number(value=5e-5, label="Learning Rate")
train_btn = gr.Button("๐ Start Fine-tuning", variant="primary")
with gr.Column():
output = gr.Textbox(
label="Training Log",
lines=25,
max_lines=40,
value="Click 'Start Fine-tuning' to train and upload your model.",
)
train_btn.click(
fn=train_model,
inputs=[base_model, dataset_name, num_epochs, batch_size, learning_rate, hf_repo],
outputs=output,
)
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch(server_name="0.0.0.0", server_port=7860)
|