demo-training-scripts / demo_train.py
evalstate's picture
evalstate HF Staff
Upload demo_train.py with huggingface_hub
e78dff9 verified
raw
history blame
1.49 kB
# /// script
# dependencies = [
# "trl>=0.12.0",
# "peft>=0.7.0",
# "transformers>=4.36.0",
# "datasets>=2.14.0",
# ]
# ///
from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
import os
print("πŸš€ Starting quick demo training...")
# Load a tiny subset of data
dataset = load_dataset("trl-lib/Capybara", split="train[:50]")
print(f"βœ… Dataset loaded: {len(dataset)} examples")
# Training configuration
config = SFTConfig(
# CRITICAL: Hub settings
output_dir="demo-qwen-sft",
push_to_hub=True,
hub_model_id="evalstate/demo-qwen-sft",
# Quick demo settings
max_steps=10, # Just 10 steps for quick demo
per_device_train_batch_size=2,
learning_rate=2e-5,
# Logging
logging_steps=2,
save_strategy="no", # Don't save checkpoints for quick demo
# Optimization
warmup_steps=2,
lr_scheduler_type="constant",
)
# LoRA configuration (reduces memory)
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
target_modules=["q_proj", "v_proj"],
)
# Initialize and train
trainer = SFTTrainer(
model="Qwen/Qwen2.5-0.5B",
train_dataset=dataset,
args=config,
peft_config=peft_config,
)
print("πŸƒ Training for 10 steps...")
trainer.train()
print("πŸ’Ύ Pushing to Hub...")
trainer.push_to_hub()
print("βœ… Complete! Model at: https://huggingface.co/evalstate/demo-qwen-sft")