gguf-improve
#5
by
evalstate
HF Staff
- opened
- README.md +0 -6
- dataset_inspector.py +0 -416
- trl/SKILL.md +48 -165
- trl/references/trackio_guide.md +19 -8
- trl/references/training_methods.md +34 -4
- trl/references/training_patterns.md +1 -1
- trl/references/troubleshooting.md +3 -10
- trl/references/uv_scripts_guide.md +414 -0
- trl/scripts/train_dpo_example.py +2 -2
- trl/scripts/train_grpo_example.py +2 -2
- trl/scripts/train_sft_example.py +2 -2
- trl/scripts/validate_dataset.py +175 -0
README.md
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
viewer: false
|
| 3 |
-
---
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
working here `https://github.com/evalstate/skills-dev`
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset_inspector.py
DELETED
|
@@ -1,416 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python3
|
| 2 |
-
# /// script
|
| 3 |
-
# dependencies = []
|
| 4 |
-
# ///
|
| 5 |
-
"""
|
| 6 |
-
Dataset Format Inspector for TRL Training (LLM-Optimized Output)
|
| 7 |
-
|
| 8 |
-
Inspects Hugging Face datasets to determine TRL training compatibility.
|
| 9 |
-
Uses Datasets Server API for instant results - no dataset download needed!
|
| 10 |
-
|
| 11 |
-
ULTRA-EFFICIENT: Uses HF Datasets Server API - completes in <2 seconds.
|
| 12 |
-
|
| 13 |
-
Usage with HF Jobs:
|
| 14 |
-
hf_jobs("uv", {
|
| 15 |
-
"script": "https://huggingface.co/datasets/evalstate/trl-helpers/raw/main/dataset_inspector.py",
|
| 16 |
-
"script_args": ["--dataset", "your/dataset", "--split", "train"]
|
| 17 |
-
})
|
| 18 |
-
"""
|
| 19 |
-
|
| 20 |
-
import argparse
|
| 21 |
-
import sys
|
| 22 |
-
import json
|
| 23 |
-
import urllib.request
|
| 24 |
-
import urllib.parse
|
| 25 |
-
from typing import List, Dict, Any
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def parse_args():
|
| 29 |
-
parser = argparse.ArgumentParser(description="Inspect dataset format for TRL training")
|
| 30 |
-
parser.add_argument("--dataset", type=str, required=True, help="Dataset name")
|
| 31 |
-
parser.add_argument("--split", type=str, default="train", help="Dataset split (default: train)")
|
| 32 |
-
parser.add_argument("--config", type=str, default="default", help="Dataset config name (default: default)")
|
| 33 |
-
parser.add_argument("--preview", type=int, default=150, help="Max chars per field preview")
|
| 34 |
-
parser.add_argument("--samples", type=int, default=5, help="Number of samples to fetch (default: 5)")
|
| 35 |
-
parser.add_argument("--json-output", action="store_true", help="Output as JSON")
|
| 36 |
-
return parser.parse_args()
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
def api_request(url: str) -> Dict:
|
| 40 |
-
"""Make API request to Datasets Server"""
|
| 41 |
-
try:
|
| 42 |
-
with urllib.request.urlopen(url, timeout=10) as response:
|
| 43 |
-
return json.loads(response.read().decode())
|
| 44 |
-
except urllib.error.HTTPError as e:
|
| 45 |
-
if e.code == 404:
|
| 46 |
-
return None
|
| 47 |
-
raise Exception(f"API request failed: {e.code} {e.reason}")
|
| 48 |
-
except Exception as e:
|
| 49 |
-
raise Exception(f"API request failed: {str(e)}")
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
def get_splits(dataset: str) -> Dict:
|
| 53 |
-
"""Get available splits for dataset"""
|
| 54 |
-
url = f"https://datasets-server.huggingface.co/splits?dataset={urllib.parse.quote(dataset)}"
|
| 55 |
-
return api_request(url)
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
def get_rows(dataset: str, config: str, split: str, offset: int = 0, length: int = 5) -> Dict:
|
| 59 |
-
"""Get rows from dataset"""
|
| 60 |
-
url = f"https://datasets-server.huggingface.co/rows?dataset={urllib.parse.quote(dataset)}&config={config}&split={split}&offset={offset}&length={length}"
|
| 61 |
-
return api_request(url)
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
def find_columns(columns: List[str], patterns: List[str]) -> List[str]:
|
| 65 |
-
"""Find columns matching patterns"""
|
| 66 |
-
return [c for c in columns if any(p in c.lower() for p in patterns)]
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
def check_sft_compatibility(columns: List[str]) -> Dict[str, Any]:
|
| 70 |
-
"""Check SFT compatibility"""
|
| 71 |
-
has_messages = "messages" in columns
|
| 72 |
-
has_text = "text" in columns
|
| 73 |
-
has_prompt_completion = "prompt" in columns and "completion" in columns
|
| 74 |
-
|
| 75 |
-
ready = has_messages or has_text or has_prompt_completion
|
| 76 |
-
|
| 77 |
-
possible_prompt = find_columns(columns, ["prompt", "instruction", "question", "input"])
|
| 78 |
-
possible_response = find_columns(columns, ["response", "completion", "output", "answer"])
|
| 79 |
-
|
| 80 |
-
return {
|
| 81 |
-
"ready": ready,
|
| 82 |
-
"reason": "messages" if has_messages else "text" if has_text else "prompt+completion" if has_prompt_completion else None,
|
| 83 |
-
"possible_prompt": possible_prompt[0] if possible_prompt else None,
|
| 84 |
-
"possible_response": possible_response[0] if possible_response else None,
|
| 85 |
-
"has_context": "context" in columns,
|
| 86 |
-
}
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
def check_dpo_compatibility(columns: List[str]) -> Dict[str, Any]:
|
| 90 |
-
"""Check DPO compatibility"""
|
| 91 |
-
has_standard = "prompt" in columns and "chosen" in columns and "rejected" in columns
|
| 92 |
-
|
| 93 |
-
possible_prompt = find_columns(columns, ["prompt", "instruction", "question", "input"])
|
| 94 |
-
possible_chosen = find_columns(columns, ["chosen", "preferred", "winner"])
|
| 95 |
-
possible_rejected = find_columns(columns, ["rejected", "dispreferred", "loser"])
|
| 96 |
-
|
| 97 |
-
can_map = bool(possible_prompt and possible_chosen and possible_rejected)
|
| 98 |
-
|
| 99 |
-
return {
|
| 100 |
-
"ready": has_standard,
|
| 101 |
-
"can_map": can_map,
|
| 102 |
-
"prompt_col": possible_prompt[0] if possible_prompt else None,
|
| 103 |
-
"chosen_col": possible_chosen[0] if possible_chosen else None,
|
| 104 |
-
"rejected_col": possible_rejected[0] if possible_rejected else None,
|
| 105 |
-
}
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
def check_grpo_compatibility(columns: List[str]) -> Dict[str, Any]:
|
| 109 |
-
"""Check GRPO compatibility"""
|
| 110 |
-
has_prompt = "prompt" in columns
|
| 111 |
-
has_no_responses = "chosen" not in columns and "rejected" not in columns
|
| 112 |
-
|
| 113 |
-
possible_prompt = find_columns(columns, ["prompt", "instruction", "question", "input"])
|
| 114 |
-
|
| 115 |
-
return {
|
| 116 |
-
"ready": has_prompt and has_no_responses,
|
| 117 |
-
"can_map": bool(possible_prompt) and has_no_responses,
|
| 118 |
-
"prompt_col": possible_prompt[0] if possible_prompt else None,
|
| 119 |
-
}
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
def check_kto_compatibility(columns: List[str]) -> Dict[str, Any]:
|
| 123 |
-
"""Check KTO compatibility"""
|
| 124 |
-
return {"ready": "prompt" in columns and "completion" in columns and "label" in columns}
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
def generate_mapping_code(method: str, info: Dict[str, Any]) -> str:
|
| 128 |
-
"""Generate mapping code for a training method"""
|
| 129 |
-
if method == "SFT":
|
| 130 |
-
if info["ready"]:
|
| 131 |
-
return None
|
| 132 |
-
|
| 133 |
-
prompt_col = info.get("possible_prompt")
|
| 134 |
-
response_col = info.get("possible_response")
|
| 135 |
-
has_context = info.get("has_context", False)
|
| 136 |
-
|
| 137 |
-
if not prompt_col:
|
| 138 |
-
return None
|
| 139 |
-
|
| 140 |
-
if has_context and response_col:
|
| 141 |
-
return f"""def format_for_sft(example):
|
| 142 |
-
text = f"Instruction: {{example['{prompt_col}']}}\\n\\n"
|
| 143 |
-
if example.get('context'):
|
| 144 |
-
text += f"Context: {{example['context']}}\\n\\n"
|
| 145 |
-
text += f"Response: {{example['{response_col}']}}"
|
| 146 |
-
return {{'text': text}}
|
| 147 |
-
|
| 148 |
-
dataset = dataset.map(format_for_sft, remove_columns=dataset.column_names)"""
|
| 149 |
-
elif response_col:
|
| 150 |
-
return f"""def format_for_sft(example):
|
| 151 |
-
return {{'text': f"{{example['{prompt_col}']}}\\n\\n{{example['{response_col}']}}}}
|
| 152 |
-
|
| 153 |
-
dataset = dataset.map(format_for_sft, remove_columns=dataset.column_names)"""
|
| 154 |
-
else:
|
| 155 |
-
return f"""def format_for_sft(example):
|
| 156 |
-
return {{'text': example['{prompt_col}']}}
|
| 157 |
-
|
| 158 |
-
dataset = dataset.map(format_for_sft, remove_columns=dataset.column_names)"""
|
| 159 |
-
|
| 160 |
-
elif method == "DPO":
|
| 161 |
-
if info["ready"] or not info["can_map"]:
|
| 162 |
-
return None
|
| 163 |
-
|
| 164 |
-
return f"""def format_for_dpo(example):
|
| 165 |
-
return {{
|
| 166 |
-
'prompt': example['{info['prompt_col']}'],
|
| 167 |
-
'chosen': example['{info['chosen_col']}'],
|
| 168 |
-
'rejected': example['{info['rejected_col']}'],
|
| 169 |
-
}}
|
| 170 |
-
|
| 171 |
-
dataset = dataset.map(format_for_dpo, remove_columns=dataset.column_names)"""
|
| 172 |
-
|
| 173 |
-
elif method == "GRPO":
|
| 174 |
-
if info["ready"] or not info["can_map"]:
|
| 175 |
-
return None
|
| 176 |
-
|
| 177 |
-
return f"""def format_for_grpo(example):
|
| 178 |
-
return {{'prompt': example['{info['prompt_col']}']}}
|
| 179 |
-
|
| 180 |
-
dataset = dataset.map(format_for_grpo, remove_columns=dataset.column_names)"""
|
| 181 |
-
|
| 182 |
-
return None
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
def format_value_preview(value: Any, max_chars: int) -> str:
|
| 186 |
-
"""Format value for preview"""
|
| 187 |
-
if value is None:
|
| 188 |
-
return "None"
|
| 189 |
-
elif isinstance(value, str):
|
| 190 |
-
return value[:max_chars] + ("..." if len(value) > max_chars else "")
|
| 191 |
-
elif isinstance(value, list):
|
| 192 |
-
if len(value) > 0 and isinstance(value[0], dict):
|
| 193 |
-
return f"[{len(value)} items] Keys: {list(value[0].keys())}"
|
| 194 |
-
preview = str(value)
|
| 195 |
-
return preview[:max_chars] + ("..." if len(preview) > max_chars else "")
|
| 196 |
-
else:
|
| 197 |
-
preview = str(value)
|
| 198 |
-
return preview[:max_chars] + ("..." if len(preview) > max_chars else "")
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
def main():
|
| 202 |
-
args = parse_args()
|
| 203 |
-
|
| 204 |
-
print(f"Fetching dataset info via Datasets Server API...")
|
| 205 |
-
|
| 206 |
-
try:
|
| 207 |
-
# Get splits info
|
| 208 |
-
splits_data = get_splits(args.dataset)
|
| 209 |
-
if not splits_data or "splits" not in splits_data:
|
| 210 |
-
print(f"ERROR: Could not fetch splits for dataset '{args.dataset}'")
|
| 211 |
-
print(f" Dataset may not exist or is not accessible via Datasets Server API")
|
| 212 |
-
sys.exit(1)
|
| 213 |
-
|
| 214 |
-
# Find the right config
|
| 215 |
-
available_configs = set()
|
| 216 |
-
split_found = False
|
| 217 |
-
config_to_use = args.config
|
| 218 |
-
|
| 219 |
-
for split_info in splits_data["splits"]:
|
| 220 |
-
available_configs.add(split_info["config"])
|
| 221 |
-
if split_info["config"] == args.config and split_info["split"] == args.split:
|
| 222 |
-
split_found = True
|
| 223 |
-
|
| 224 |
-
# If default config not found, try first available
|
| 225 |
-
if not split_found and available_configs:
|
| 226 |
-
config_to_use = list(available_configs)[0]
|
| 227 |
-
print(f"Config '{args.config}' not found, trying '{config_to_use}'...")
|
| 228 |
-
|
| 229 |
-
# Get rows
|
| 230 |
-
rows_data = get_rows(args.dataset, config_to_use, args.split, offset=0, length=args.samples)
|
| 231 |
-
|
| 232 |
-
if not rows_data or "rows" not in rows_data:
|
| 233 |
-
print(f"ERROR: Could not fetch rows for dataset '{args.dataset}'")
|
| 234 |
-
print(f" Split '{args.split}' may not exist")
|
| 235 |
-
print(f" Available configs: {', '.join(sorted(available_configs))}")
|
| 236 |
-
sys.exit(1)
|
| 237 |
-
|
| 238 |
-
rows = rows_data["rows"]
|
| 239 |
-
if not rows:
|
| 240 |
-
print(f"ERROR: No rows found in split '{args.split}'")
|
| 241 |
-
sys.exit(1)
|
| 242 |
-
|
| 243 |
-
# Extract column info from first row
|
| 244 |
-
first_row = rows[0]["row"]
|
| 245 |
-
columns = list(first_row.keys())
|
| 246 |
-
features = rows_data.get("features", [])
|
| 247 |
-
|
| 248 |
-
# Get total count if available
|
| 249 |
-
total_examples = "Unknown"
|
| 250 |
-
for split_info in splits_data["splits"]:
|
| 251 |
-
if split_info["config"] == config_to_use and split_info["split"] == args.split:
|
| 252 |
-
total_examples = f"{split_info.get('num_examples', 'Unknown'):,}" if isinstance(split_info.get('num_examples'), int) else "Unknown"
|
| 253 |
-
break
|
| 254 |
-
|
| 255 |
-
except Exception as e:
|
| 256 |
-
print(f"ERROR: {str(e)}")
|
| 257 |
-
sys.exit(1)
|
| 258 |
-
|
| 259 |
-
# Run compatibility checks
|
| 260 |
-
sft_info = check_sft_compatibility(columns)
|
| 261 |
-
dpo_info = check_dpo_compatibility(columns)
|
| 262 |
-
grpo_info = check_grpo_compatibility(columns)
|
| 263 |
-
kto_info = check_kto_compatibility(columns)
|
| 264 |
-
|
| 265 |
-
# Determine recommended methods
|
| 266 |
-
recommended = []
|
| 267 |
-
if sft_info["ready"]:
|
| 268 |
-
recommended.append("SFT")
|
| 269 |
-
elif sft_info["possible_prompt"]:
|
| 270 |
-
recommended.append("SFT (needs mapping)")
|
| 271 |
-
|
| 272 |
-
if dpo_info["ready"]:
|
| 273 |
-
recommended.append("DPO")
|
| 274 |
-
elif dpo_info["can_map"]:
|
| 275 |
-
recommended.append("DPO (needs mapping)")
|
| 276 |
-
|
| 277 |
-
if grpo_info["ready"]:
|
| 278 |
-
recommended.append("GRPO")
|
| 279 |
-
elif grpo_info["can_map"]:
|
| 280 |
-
recommended.append("GRPO (needs mapping)")
|
| 281 |
-
|
| 282 |
-
if kto_info["ready"]:
|
| 283 |
-
recommended.append("KTO")
|
| 284 |
-
|
| 285 |
-
# JSON output mode
|
| 286 |
-
if args.json_output:
|
| 287 |
-
result = {
|
| 288 |
-
"dataset": args.dataset,
|
| 289 |
-
"config": config_to_use,
|
| 290 |
-
"split": args.split,
|
| 291 |
-
"total_examples": total_examples,
|
| 292 |
-
"columns": columns,
|
| 293 |
-
"features": [{"name": f["name"], "type": f["type"]} for f in features] if features else [],
|
| 294 |
-
"compatibility": {
|
| 295 |
-
"SFT": sft_info,
|
| 296 |
-
"DPO": dpo_info,
|
| 297 |
-
"GRPO": grpo_info,
|
| 298 |
-
"KTO": kto_info,
|
| 299 |
-
},
|
| 300 |
-
"recommended_methods": recommended,
|
| 301 |
-
}
|
| 302 |
-
print(json.dumps(result, indent=2))
|
| 303 |
-
sys.exit(0)
|
| 304 |
-
|
| 305 |
-
# Human-readable output optimized for LLM parsing
|
| 306 |
-
print("=" * 80)
|
| 307 |
-
print(f"DATASET INSPECTION RESULTS")
|
| 308 |
-
print("=" * 80)
|
| 309 |
-
|
| 310 |
-
print(f"\nDataset: {args.dataset}")
|
| 311 |
-
print(f"Config: {config_to_use}")
|
| 312 |
-
print(f"Split: {args.split}")
|
| 313 |
-
print(f"Total examples: {total_examples}")
|
| 314 |
-
print(f"Samples fetched: {len(rows)}")
|
| 315 |
-
|
| 316 |
-
print(f"\n{'COLUMNS':-<80}")
|
| 317 |
-
if features:
|
| 318 |
-
for feature in features:
|
| 319 |
-
print(f" {feature['name']}: {feature['type']}")
|
| 320 |
-
else:
|
| 321 |
-
for col in columns:
|
| 322 |
-
print(f" {col}: (type info not available)")
|
| 323 |
-
|
| 324 |
-
print(f"\n{'EXAMPLE DATA':-<80}")
|
| 325 |
-
example = first_row
|
| 326 |
-
for col in columns:
|
| 327 |
-
value = example.get(col)
|
| 328 |
-
display = format_value_preview(value, args.preview)
|
| 329 |
-
print(f"\n{col}:")
|
| 330 |
-
print(f" {display}")
|
| 331 |
-
|
| 332 |
-
print(f"\n{'TRAINING METHOD COMPATIBILITY':-<80}")
|
| 333 |
-
|
| 334 |
-
# SFT
|
| 335 |
-
print(f"\n[SFT] {'β READY' if sft_info['ready'] else 'β NEEDS MAPPING'}")
|
| 336 |
-
if sft_info["ready"]:
|
| 337 |
-
print(f" Reason: Dataset has '{sft_info['reason']}' field")
|
| 338 |
-
print(f" Action: Use directly with SFTTrainer")
|
| 339 |
-
elif sft_info["possible_prompt"]:
|
| 340 |
-
print(f" Detected: prompt='{sft_info['possible_prompt']}' response='{sft_info['possible_response']}'")
|
| 341 |
-
print(f" Action: Apply mapping code (see below)")
|
| 342 |
-
else:
|
| 343 |
-
print(f" Status: Cannot determine mapping - manual inspection needed")
|
| 344 |
-
|
| 345 |
-
# DPO
|
| 346 |
-
print(f"\n[DPO] {'β READY' if dpo_info['ready'] else 'β NEEDS MAPPING' if dpo_info['can_map'] else 'β INCOMPATIBLE'}")
|
| 347 |
-
if dpo_info["ready"]:
|
| 348 |
-
print(f" Reason: Dataset has 'prompt', 'chosen', 'rejected' fields")
|
| 349 |
-
print(f" Action: Use directly with DPOTrainer")
|
| 350 |
-
elif dpo_info["can_map"]:
|
| 351 |
-
print(f" Detected: prompt='{dpo_info['prompt_col']}' chosen='{dpo_info['chosen_col']}' rejected='{dpo_info['rejected_col']}'")
|
| 352 |
-
print(f" Action: Apply mapping code (see below)")
|
| 353 |
-
else:
|
| 354 |
-
print(f" Status: Missing required fields (prompt + chosen + rejected)")
|
| 355 |
-
|
| 356 |
-
# GRPO
|
| 357 |
-
print(f"\n[GRPO] {'β READY' if grpo_info['ready'] else 'β NEEDS MAPPING' if grpo_info['can_map'] else 'β INCOMPATIBLE'}")
|
| 358 |
-
if grpo_info["ready"]:
|
| 359 |
-
print(f" Reason: Dataset has 'prompt' field")
|
| 360 |
-
print(f" Action: Use directly with GRPOTrainer")
|
| 361 |
-
elif grpo_info["can_map"]:
|
| 362 |
-
print(f" Detected: prompt='{grpo_info['prompt_col']}'")
|
| 363 |
-
print(f" Action: Apply mapping code (see below)")
|
| 364 |
-
else:
|
| 365 |
-
print(f" Status: Missing prompt field")
|
| 366 |
-
|
| 367 |
-
# KTO
|
| 368 |
-
print(f"\n[KTO] {'β READY' if kto_info['ready'] else 'β INCOMPATIBLE'}")
|
| 369 |
-
if kto_info["ready"]:
|
| 370 |
-
print(f" Reason: Dataset has 'prompt', 'completion', 'label' fields")
|
| 371 |
-
print(f" Action: Use directly with KTOTrainer")
|
| 372 |
-
else:
|
| 373 |
-
print(f" Status: Missing required fields (prompt + completion + label)")
|
| 374 |
-
|
| 375 |
-
# Mapping code
|
| 376 |
-
print(f"\n{'MAPPING CODE (if needed)':-<80}")
|
| 377 |
-
|
| 378 |
-
mapping_needed = False
|
| 379 |
-
|
| 380 |
-
sft_mapping = generate_mapping_code("SFT", sft_info)
|
| 381 |
-
if sft_mapping:
|
| 382 |
-
print(f"\n# For SFT Training:")
|
| 383 |
-
print(sft_mapping)
|
| 384 |
-
mapping_needed = True
|
| 385 |
-
|
| 386 |
-
dpo_mapping = generate_mapping_code("DPO", dpo_info)
|
| 387 |
-
if dpo_mapping:
|
| 388 |
-
print(f"\n# For DPO Training:")
|
| 389 |
-
print(dpo_mapping)
|
| 390 |
-
mapping_needed = True
|
| 391 |
-
|
| 392 |
-
grpo_mapping = generate_mapping_code("GRPO", grpo_info)
|
| 393 |
-
if grpo_mapping:
|
| 394 |
-
print(f"\n# For GRPO Training:")
|
| 395 |
-
print(grpo_mapping)
|
| 396 |
-
mapping_needed = True
|
| 397 |
-
|
| 398 |
-
if not mapping_needed:
|
| 399 |
-
print("\nNo mapping needed - dataset is ready for training!")
|
| 400 |
-
|
| 401 |
-
print(f"\n{'SUMMARY':-<80}")
|
| 402 |
-
print(f"Recommended training methods: {', '.join(recommended) if recommended else 'None (dataset needs formatting)'}")
|
| 403 |
-
print(f"\nNote: Used Datasets Server API (instant, no download required)")
|
| 404 |
-
|
| 405 |
-
print("\n" + "=" * 80)
|
| 406 |
-
sys.exit(0)
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
if __name__ == "__main__":
|
| 410 |
-
try:
|
| 411 |
-
main()
|
| 412 |
-
except KeyboardInterrupt:
|
| 413 |
-
sys.exit(0)
|
| 414 |
-
except Exception as e:
|
| 415 |
-
print(f"ERROR: {e}", file=sys.stderr)
|
| 416 |
-
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
trl/SKILL.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
---
|
| 2 |
name: trl
|
| 3 |
-
description: This skill should be used when users want to train or fine-tune language models using TRL (Transformer Reinforcement Learning) on Hugging Face Jobs infrastructure. Covers SFT, DPO, GRPO
|
| 4 |
license: Complete terms in LICENSE.txt
|
| 5 |
---
|
| 6 |
|
|
@@ -14,7 +14,9 @@ Train language models using TRL (Transformer Reinforcement Learning) on fully ma
|
|
| 14 |
- **SFT** (Supervised Fine-Tuning) - Standard instruction tuning
|
| 15 |
- **DPO** (Direct Preference Optimization) - Alignment from preference data
|
| 16 |
- **GRPO** (Group Relative Policy Optimization) - Online RL training
|
|
|
|
| 17 |
- **Reward Modeling** - Train reward models for RLHF
|
|
|
|
| 18 |
|
| 19 |
**For detailed TRL method documentation:**
|
| 20 |
```python
|
|
@@ -30,7 +32,7 @@ hf_doc_fetch("https://huggingface.co/docs/trl/dpo_trainer") # DPO
|
|
| 30 |
|
| 31 |
Use this skill when users want to:
|
| 32 |
- Fine-tune language models on cloud GPUs without local infrastructure
|
| 33 |
-
- Train with TRL methods (SFT, DPO, GRPO, etc.)
|
| 34 |
- Run training jobs on Hugging Face Jobs infrastructure
|
| 35 |
- Convert trained models to GGUF for local deployment (Ollama, LM Studio, llama.cpp)
|
| 36 |
- Ensure trained models are permanently saved to the Hub
|
|
@@ -40,7 +42,7 @@ Use this skill when users want to:
|
|
| 40 |
|
| 41 |
When assisting with training jobs:
|
| 42 |
|
| 43 |
-
1. **
|
| 44 |
|
| 45 |
2. **Always include Trackio** - Every training script should include Trackio for real-time monitoring. Use example scripts in `scripts/` as templates.
|
| 46 |
|
|
@@ -50,7 +52,7 @@ When assisting with training jobs:
|
|
| 50 |
|
| 51 |
## Local Script Dependencies
|
| 52 |
|
| 53 |
-
To run scripts locally (like `estimate_cost.py`), install dependencies:
|
| 54 |
```bash
|
| 55 |
pip install -r requirements.txt
|
| 56 |
```
|
|
@@ -61,14 +63,14 @@ Before starting any training job, verify:
|
|
| 61 |
|
| 62 |
### β
**Account & Authentication**
|
| 63 |
- Hugging Face Account with [Pro](https://hf.co/pro), [Team](https://hf.co/enterprise), or [Enterprise](https://hf.co/enterprise) plan (Jobs require paid plan)
|
| 64 |
-
- Authenticated login: Check with `
|
| 65 |
- **HF_TOKEN for Hub Push** β οΈ CRITICAL - Training environment is ephemeral, must push to Hub or ALL training results are lost
|
| 66 |
- Token must have write permissions and is automatically available as `$HF_TOKEN` in job secrets
|
| 67 |
|
| 68 |
### β
**Dataset Requirements**
|
| 69 |
- Dataset must exist on Hub or be loadable via `datasets.load_dataset()`
|
| 70 |
- Format must match training method (SFT: "messages"/text/prompt-completion; DPO: chosen/rejected; GRPO: prompt-only)
|
| 71 |
-
-
|
| 72 |
- Size appropriate for hardware (Demo: 50-100 examples on t4-small; Production: 1K-10K+ on a10g-large/a100-large)
|
| 73 |
|
| 74 |
### β οΈ **Critical Settings**
|
|
@@ -116,9 +118,27 @@ The job is running in the background. Ask me to check status/logs when ready!
|
|
| 116 |
|
| 117 |
## Quick Start: Three Approaches
|
| 118 |
|
| 119 |
-
### Approach 1:
|
| 120 |
|
| 121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
```python
|
| 124 |
hf_jobs("uv", {
|
|
@@ -165,56 +185,17 @@ trackio.finish()
|
|
| 165 |
})
|
| 166 |
```
|
| 167 |
|
| 168 |
-
**Benefits:**
|
| 169 |
-
**When to use:**
|
| 170 |
-
|
| 171 |
-
#### Working with Scripts
|
| 172 |
-
|
| 173 |
-
β οΈ **Important:** The `script` parameter accepts either inline code (as shown above) OR a URL. **Local file paths do NOT work.**
|
| 174 |
-
|
| 175 |
-
**Why local paths don't work:**
|
| 176 |
-
Jobs run in isolated Docker containers without access to your local filesystem. Scripts must be:
|
| 177 |
-
- Inline code (recommended for custom training)
|
| 178 |
-
- Publicly accessible URLs
|
| 179 |
-
- Private repo URLs (with HF_TOKEN)
|
| 180 |
-
|
| 181 |
-
**Common mistakes:**
|
| 182 |
-
```python
|
| 183 |
-
# β These will all fail
|
| 184 |
-
hf_jobs("uv", {"script": "train.py"})
|
| 185 |
-
hf_jobs("uv", {"script": "./scripts/train.py"})
|
| 186 |
-
hf_jobs("uv", {"script": "/path/to/train.py"})
|
| 187 |
-
```
|
| 188 |
-
|
| 189 |
-
**Correct approaches:**
|
| 190 |
-
```python
|
| 191 |
-
# β
Inline code (recommended)
|
| 192 |
-
hf_jobs("uv", {"script": "# /// script\n# dependencies = [...]\n# ///\n\n<your code>"})
|
| 193 |
-
|
| 194 |
-
# β
From Hugging Face Hub
|
| 195 |
-
hf_jobs("uv", {"script": "https://huggingface.co/user/repo/resolve/main/train.py"})
|
| 196 |
-
|
| 197 |
-
# β
From GitHub
|
| 198 |
-
hf_jobs("uv", {"script": "https://raw.githubusercontent.com/user/repo/main/train.py"})
|
| 199 |
-
|
| 200 |
-
# β
From Gist
|
| 201 |
-
hf_jobs("uv", {"script": "https://gist.githubusercontent.com/user/id/raw/train.py"})
|
| 202 |
-
```
|
| 203 |
-
|
| 204 |
-
**To use local scripts:** Upload to HF Hub first:
|
| 205 |
-
```bash
|
| 206 |
-
huggingface-cli repo create my-training-scripts --type model
|
| 207 |
-
huggingface-cli upload my-training-scripts ./train.py train.py
|
| 208 |
-
# Use: https://huggingface.co/USERNAME/my-training-scripts/resolve/main/train.py
|
| 209 |
-
```
|
| 210 |
|
| 211 |
-
### Approach
|
| 212 |
|
| 213 |
TRL provides battle-tested scripts for all methods. Can be run from URLs:
|
| 214 |
|
| 215 |
```python
|
| 216 |
hf_jobs("uv", {
|
| 217 |
-
"script": "https://
|
| 218 |
"script_args": [
|
| 219 |
"--model_name_or_path", "Qwen/Qwen2.5-0.5B",
|
| 220 |
"--dataset_name", "trl-lib/Capybara",
|
|
@@ -230,7 +211,7 @@ hf_jobs("uv", {
|
|
| 230 |
|
| 231 |
**Benefits:** No code to write, maintained by TRL team, production-tested
|
| 232 |
**When to use:** Standard TRL training, quick experiments, don't need custom code
|
| 233 |
-
**Available:**
|
| 234 |
|
| 235 |
### Finding More UV Scripts on Hub
|
| 236 |
|
|
@@ -246,26 +227,6 @@ hub_repo_details(["uv-scripts/classification"], repo_type="dataset", include_rea
|
|
| 246 |
|
| 247 |
**Popular collections:** ocr, classification, synthetic-data, vllm, dataset-creation
|
| 248 |
|
| 249 |
-
### Approach 3: TRL Jobs Package (For Terminal Use)
|
| 250 |
-
|
| 251 |
-
The `trl-jobs` package provides optimized defaults and one-liner training. **Note: This approach uses bash commands, not `hf_jobs()` MCP tool.**
|
| 252 |
-
|
| 253 |
-
```bash
|
| 254 |
-
# Install (users only, not needed for this environment)
|
| 255 |
-
pip install trl-jobs
|
| 256 |
-
|
| 257 |
-
# Train with SFT (simplest possible)
|
| 258 |
-
trl-jobs sft \
|
| 259 |
-
--model_name Qwen/Qwen2.5-0.5B \
|
| 260 |
-
--dataset_name trl-lib/Capybara
|
| 261 |
-
```
|
| 262 |
-
|
| 263 |
-
**Benefits:** Pre-configured settings, automatic Trackio integration, automatic Hub push, one-line commands
|
| 264 |
-
**When to use:** User working in terminal directly (not Claude Code context), quick local experimentation
|
| 265 |
-
**Repository:** https://github.com/huggingface/trl-jobs
|
| 266 |
-
|
| 267 |
-
β οΈ **In Claude Code context, use Approach 1 (UV Scripts) with `hf_jobs()` instead.**
|
| 268 |
-
|
| 269 |
## Hardware Selection
|
| 270 |
|
| 271 |
| Model Size | Recommended Hardware | Cost (approx/hr) | Use Case |
|
|
@@ -398,85 +359,6 @@ hf_jobs("logs", {"job_id": "your-job-id"})
|
|
| 398 |
|
| 399 |
**Remember:** Wait for user to request status checks. Avoid polling repeatedly.
|
| 400 |
|
| 401 |
-
## Dataset Validation
|
| 402 |
-
|
| 403 |
-
**Validate dataset format BEFORE launching GPU training to prevent the #1 cause of training failures: format mismatches.**
|
| 404 |
-
|
| 405 |
-
### Why Validate
|
| 406 |
-
|
| 407 |
-
- 50%+ of training failures are due to dataset format issues
|
| 408 |
-
- DPO especially strict: requires exact column names (`prompt`, `chosen`, `rejected`)
|
| 409 |
-
- Failed GPU jobs waste $1-10 and 30-60 minutes
|
| 410 |
-
- Validation on CPU costs ~$0.01 and takes <1 minute
|
| 411 |
-
|
| 412 |
-
### When to Validate
|
| 413 |
-
|
| 414 |
-
**ALWAYS validate for:**
|
| 415 |
-
- Unknown or custom datasets
|
| 416 |
-
- DPO training (CRITICAL - 90% of datasets need mapping)
|
| 417 |
-
- Any dataset not explicitly TRL-compatible
|
| 418 |
-
|
| 419 |
-
**Skip validation for known TRL datasets:**
|
| 420 |
-
- `trl-lib/ultrachat_200k`, `trl-lib/Capybara`, `HuggingFaceH4/ultrachat_200k`, etc.
|
| 421 |
-
|
| 422 |
-
### Usage
|
| 423 |
-
|
| 424 |
-
```python
|
| 425 |
-
hf_jobs("uv", {
|
| 426 |
-
"script": "https://huggingface.co/datasets/mcp-tools/skills/raw/main/dataset_inspector.py",
|
| 427 |
-
"script_args": ["--dataset", "username/dataset-name", "--split", "train"]
|
| 428 |
-
})
|
| 429 |
-
```
|
| 430 |
-
|
| 431 |
-
The script is fast, and will usually complete synchronously.
|
| 432 |
-
|
| 433 |
-
### Reading Results
|
| 434 |
-
|
| 435 |
-
The output shows compatibility for each training method:
|
| 436 |
-
|
| 437 |
-
- **`β READY`** - Dataset is compatible, use directly
|
| 438 |
-
- **`β NEEDS MAPPING`** - Compatible but needs preprocessing (mapping code provided)
|
| 439 |
-
- **`β INCOMPATIBLE`** - Cannot be used for this method
|
| 440 |
-
|
| 441 |
-
When mapping is needed, the output includes a **"MAPPING CODE"** section with copy-paste ready Python code.
|
| 442 |
-
|
| 443 |
-
### Example Workflow
|
| 444 |
-
|
| 445 |
-
```python
|
| 446 |
-
# 1. Inspect dataset (costs ~$0.01, <1 min on CPU)
|
| 447 |
-
hf_jobs("uv", {
|
| 448 |
-
"script": "https://huggingface.co/datasets/mcp-tools/skills/raw/main/dataset_inspector.py",
|
| 449 |
-
"script_args": ["--dataset", "argilla/distilabel-math-preference-dpo", "--split", "train"]
|
| 450 |
-
})
|
| 451 |
-
|
| 452 |
-
# 2. Check output markers:
|
| 453 |
-
# β READY β proceed with training
|
| 454 |
-
# β NEEDS MAPPING β apply mapping code below
|
| 455 |
-
# β INCOMPATIBLE β choose different method/dataset
|
| 456 |
-
|
| 457 |
-
# 3. If mapping needed, apply before training:
|
| 458 |
-
def format_for_dpo(example):
|
| 459 |
-
return {
|
| 460 |
-
'prompt': example['instruction'],
|
| 461 |
-
'chosen': example['chosen_response'],
|
| 462 |
-
'rejected': example['rejected_response'],
|
| 463 |
-
}
|
| 464 |
-
dataset = dataset.map(format_for_dpo, remove_columns=dataset.column_names)
|
| 465 |
-
|
| 466 |
-
# 4. Launch training job with confidence
|
| 467 |
-
```
|
| 468 |
-
|
| 469 |
-
### Common Scenario: DPO Format Mismatch
|
| 470 |
-
|
| 471 |
-
Most DPO datasets use non-standard column names. Example:
|
| 472 |
-
|
| 473 |
-
```
|
| 474 |
-
Dataset has: instruction, chosen_response, rejected_response
|
| 475 |
-
DPO expects: prompt, chosen, rejected
|
| 476 |
-
```
|
| 477 |
-
|
| 478 |
-
The validator detects this and provides exact mapping code to fix it.
|
| 479 |
-
|
| 480 |
## Converting Models to GGUF
|
| 481 |
|
| 482 |
After training, convert models to **GGUF format** for use with llama.cpp, Ollama, LM Studio, and other local inference tools.
|
|
@@ -531,13 +413,15 @@ See `references/training_patterns.md` for detailed examples including:
|
|
| 531 |
### Dataset Misformatted
|
| 532 |
|
| 533 |
**Fix:**
|
| 534 |
-
1. Validate first
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 538 |
```
|
| 539 |
-
2. Check output for compatibility markers (β READY, β NEEDS MAPPING, β INCOMPATIBLE)
|
| 540 |
-
3. Apply mapping code from inspector output if needed
|
| 541 |
|
| 542 |
### Job Timeout
|
| 543 |
|
|
@@ -573,7 +457,7 @@ Add to PEP 723 header:
|
|
| 573 |
- Job times out β Increase timeout, reduce epochs/dataset, use smaller model/LoRA
|
| 574 |
- Model not saved to Hub β Check push_to_hub=True, hub_model_id, secrets=HF_TOKEN
|
| 575 |
- Out of Memory (OOM) β Reduce batch size, increase gradient accumulation, enable LoRA, use larger GPU
|
| 576 |
-
- Dataset format error β
|
| 577 |
- Import/module errors β Add PEP 723 header with dependencies, verify format
|
| 578 |
- Authentication errors β Check `mcp__huggingface__hf_whoami()`, token permissions, secrets parameter
|
| 579 |
|
|
@@ -586,6 +470,7 @@ Add to PEP 723 header:
|
|
| 586 |
- `references/training_patterns.md` - Common training patterns and examples
|
| 587 |
- `references/gguf_conversion.md` - Complete GGUF conversion guide
|
| 588 |
- `references/trackio_guide.md` - Trackio monitoring setup
|
|
|
|
| 589 |
- `references/hardware_guide.md` - Hardware specs and selection
|
| 590 |
- `references/hub_saving.md` - Hub authentication troubleshooting
|
| 591 |
- `references/troubleshooting.md` - Common issues and solutions
|
|
@@ -594,12 +479,10 @@ Add to PEP 723 header:
|
|
| 594 |
- `scripts/train_sft_example.py` - Production SFT template
|
| 595 |
- `scripts/train_dpo_example.py` - Production DPO template
|
| 596 |
- `scripts/train_grpo_example.py` - Production GRPO template
|
|
|
|
| 597 |
- `scripts/estimate_cost.py` - Estimate time and cost (offer when appropriate)
|
| 598 |
- `scripts/convert_to_gguf.py` - Complete GGUF conversion script
|
| 599 |
|
| 600 |
-
### External Scripts
|
| 601 |
-
- [Dataset Inspector](https://huggingface.co/datasets/mcp-tools/skills/raw/main/dataset_inspector.py) - Validate dataset format before training (use via `uv run` or `hf_jobs`)
|
| 602 |
-
|
| 603 |
### External Links
|
| 604 |
- [TRL Documentation](https://huggingface.co/docs/trl)
|
| 605 |
- [TRL Jobs Training Guide](https://huggingface.co/docs/trl/en/jobs_training)
|
|
@@ -617,7 +500,7 @@ Add to PEP 723 header:
|
|
| 617 |
4. **Always enable Hub push** - Environment is ephemeral; without push, all results lost
|
| 618 |
5. **Include Trackio** - Use example scripts as templates for real-time monitoring
|
| 619 |
6. **Offer cost estimation** - When parameters are known, use `scripts/estimate_cost.py`
|
| 620 |
-
7. **
|
| 621 |
-
8. **Use
|
| 622 |
-
9. **Validate dataset format** before training with
|
| 623 |
10. **Choose appropriate hardware** for model size; use LoRA for models >7B
|
|
|
|
| 1 |
---
|
| 2 |
name: trl
|
| 3 |
+
description: This skill should be used when users want to train or fine-tune language models using TRL (Transformer Reinforcement Learning) on Hugging Face Jobs infrastructure. Covers SFT, DPO, GRPO, KTO, reward modeling, and PPO training methods, plus GGUF conversion for local deployment. Includes guidance on the TRL Jobs package, UV scripts with PEP 723 format, dataset preparation and validation, hardware selection, cost estimation, Trackio monitoring, Hub authentication, and model persistence. Should be invoked for tasks involving cloud GPU training, GGUF conversion, or when users mention training on Hugging Face Jobs without local GPU setup.
|
| 4 |
license: Complete terms in LICENSE.txt
|
| 5 |
---
|
| 6 |
|
|
|
|
| 14 |
- **SFT** (Supervised Fine-Tuning) - Standard instruction tuning
|
| 15 |
- **DPO** (Direct Preference Optimization) - Alignment from preference data
|
| 16 |
- **GRPO** (Group Relative Policy Optimization) - Online RL training
|
| 17 |
+
- **KTO** (Kahneman-Tversky Optimization) - Preference tuning without paired data
|
| 18 |
- **Reward Modeling** - Train reward models for RLHF
|
| 19 |
+
- **PPO** (Proximal Policy Optimization) - Classic RLHF method
|
| 20 |
|
| 21 |
**For detailed TRL method documentation:**
|
| 22 |
```python
|
|
|
|
| 32 |
|
| 33 |
Use this skill when users want to:
|
| 34 |
- Fine-tune language models on cloud GPUs without local infrastructure
|
| 35 |
+
- Train with TRL methods (SFT, DPO, GRPO, KTO, etc.)
|
| 36 |
- Run training jobs on Hugging Face Jobs infrastructure
|
| 37 |
- Convert trained models to GGUF for local deployment (Ollama, LM Studio, llama.cpp)
|
| 38 |
- Ensure trained models are permanently saved to the Hub
|
|
|
|
| 42 |
|
| 43 |
When assisting with training jobs:
|
| 44 |
|
| 45 |
+
1. **Submit jobs directly with inline scripts** - The `script` parameter accepts Python code directly. Do NOT save to local files unless the user explicitly requests it. Pass the script content as a string to `hf_jobs()`. If user asks to "train a model", "fine-tune", or similar requests, you MUST create the training script AND submit the job immediately.
|
| 46 |
|
| 47 |
2. **Always include Trackio** - Every training script should include Trackio for real-time monitoring. Use example scripts in `scripts/` as templates.
|
| 48 |
|
|
|
|
| 52 |
|
| 53 |
## Local Script Dependencies
|
| 54 |
|
| 55 |
+
To run scripts locally (like `validate_dataset.py`, `estimate_cost.py`), install dependencies:
|
| 56 |
```bash
|
| 57 |
pip install -r requirements.txt
|
| 58 |
```
|
|
|
|
| 63 |
|
| 64 |
### β
**Account & Authentication**
|
| 65 |
- Hugging Face Account with [Pro](https://hf.co/pro), [Team](https://hf.co/enterprise), or [Enterprise](https://hf.co/enterprise) plan (Jobs require paid plan)
|
| 66 |
+
- Authenticated login: Check with `mcp__huggingface__hf_whoami()`
|
| 67 |
- **HF_TOKEN for Hub Push** β οΈ CRITICAL - Training environment is ephemeral, must push to Hub or ALL training results are lost
|
| 68 |
- Token must have write permissions and is automatically available as `$HF_TOKEN` in job secrets
|
| 69 |
|
| 70 |
### β
**Dataset Requirements**
|
| 71 |
- Dataset must exist on Hub or be loadable via `datasets.load_dataset()`
|
| 72 |
- Format must match training method (SFT: "messages"/text/prompt-completion; DPO: chosen/rejected; GRPO: prompt-only)
|
| 73 |
+
- Use `scripts/validate_dataset.py` to verify format or `hf_doc_fetch("https://huggingface.co/docs/trl/dataset_formats")` for complete reference
|
| 74 |
- Size appropriate for hardware (Demo: 50-100 examples on t4-small; Production: 1K-10K+ on a10g-large/a100-large)
|
| 75 |
|
| 76 |
### β οΈ **Critical Settings**
|
|
|
|
| 118 |
|
| 119 |
## Quick Start: Three Approaches
|
| 120 |
|
| 121 |
+
### Approach 1: TRL Jobs Package (EasiestβRecommended for Beginners)
|
| 122 |
|
| 123 |
+
The `trl-jobs` package provides optimized defaults and one-liner training:
|
| 124 |
+
|
| 125 |
+
```bash
|
| 126 |
+
# Install (users only, not needed for this environment)
|
| 127 |
+
pip install trl-jobs
|
| 128 |
+
|
| 129 |
+
# Train with SFT (simplest possible)
|
| 130 |
+
trl-jobs sft \
|
| 131 |
+
--model_name Qwen/Qwen2.5-0.5B \
|
| 132 |
+
--dataset_name trl-lib/Capybara
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
**Benefits:** Pre-configured settings, automatic Trackio integration, automatic Hub push, one-line commands
|
| 136 |
+
**When to use:** User is new to training, standard scenarios, quick experimentation
|
| 137 |
+
**Repository:** https://github.com/huggingface/trl-jobs
|
| 138 |
+
|
| 139 |
+
### Approach 2: UV Scripts (Recommended for Custom Training)
|
| 140 |
+
|
| 141 |
+
UV scripts use PEP 723 inline dependencies for clean, self-contained training. **Submit script content directly inline:**
|
| 142 |
|
| 143 |
```python
|
| 144 |
hf_jobs("uv", {
|
|
|
|
| 185 |
})
|
| 186 |
```
|
| 187 |
|
| 188 |
+
**Benefits:** Clean code, dependencies declared inline (PEP 723), no file saving required
|
| 189 |
+
**When to use:** Custom training logic, full control over training
|
| 190 |
+
**See:** `references/uv_scripts_guide.md` for complete UV scripts guide
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
|
| 192 |
+
### Approach 3: TRL Maintained Scripts (Run Official Examples)
|
| 193 |
|
| 194 |
TRL provides battle-tested scripts for all methods. Can be run from URLs:
|
| 195 |
|
| 196 |
```python
|
| 197 |
hf_jobs("uv", {
|
| 198 |
+
"script": "https://raw.githubusercontent.com/huggingface/trl/main/examples/scripts/sft.py",
|
| 199 |
"script_args": [
|
| 200 |
"--model_name_or_path", "Qwen/Qwen2.5-0.5B",
|
| 201 |
"--dataset_name", "trl-lib/Capybara",
|
|
|
|
| 211 |
|
| 212 |
**Benefits:** No code to write, maintained by TRL team, production-tested
|
| 213 |
**When to use:** Standard TRL training, quick experiments, don't need custom code
|
| 214 |
+
**Available:** sft.py, dpo.py, grpo.py, kto.py, reward.py, ppo.py - https://github.com/huggingface/trl/tree/main/examples/scripts
|
| 215 |
|
| 216 |
### Finding More UV Scripts on Hub
|
| 217 |
|
|
|
|
| 227 |
|
| 228 |
**Popular collections:** ocr, classification, synthetic-data, vllm, dataset-creation
|
| 229 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
## Hardware Selection
|
| 231 |
|
| 232 |
| Model Size | Recommended Hardware | Cost (approx/hr) | Use Case |
|
|
|
|
| 359 |
|
| 360 |
**Remember:** Wait for user to request status checks. Avoid polling repeatedly.
|
| 361 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 362 |
## Converting Models to GGUF
|
| 363 |
|
| 364 |
After training, convert models to **GGUF format** for use with llama.cpp, Ollama, LM Studio, and other local inference tools.
|
|
|
|
| 413 |
### Dataset Misformatted
|
| 414 |
|
| 415 |
**Fix:**
|
| 416 |
+
1. Validate first: `python scripts/validate_dataset.py --dataset name --method sft`
|
| 417 |
+
2. Check required columns:
|
| 418 |
+
- SFT: `messages` OR `text` OR `prompt`+`completion`
|
| 419 |
+
- DPO: `prompt`, `chosen`, `rejected`
|
| 420 |
+
- GRPO: `prompt` only
|
| 421 |
+
3. Apply formatting if needed:
|
| 422 |
+
```python
|
| 423 |
+
dataset = dataset.map(lambda x: {"text": f"User: {x['input']}\nBot: {x['output']}"})
|
| 424 |
```
|
|
|
|
|
|
|
| 425 |
|
| 426 |
### Job Timeout
|
| 427 |
|
|
|
|
| 457 |
- Job times out β Increase timeout, reduce epochs/dataset, use smaller model/LoRA
|
| 458 |
- Model not saved to Hub β Check push_to_hub=True, hub_model_id, secrets=HF_TOKEN
|
| 459 |
- Out of Memory (OOM) β Reduce batch size, increase gradient accumulation, enable LoRA, use larger GPU
|
| 460 |
+
- Dataset format error β Check format docs, validate dataset with `scripts/validate_dataset.py`
|
| 461 |
- Import/module errors β Add PEP 723 header with dependencies, verify format
|
| 462 |
- Authentication errors β Check `mcp__huggingface__hf_whoami()`, token permissions, secrets parameter
|
| 463 |
|
|
|
|
| 470 |
- `references/training_patterns.md` - Common training patterns and examples
|
| 471 |
- `references/gguf_conversion.md` - Complete GGUF conversion guide
|
| 472 |
- `references/trackio_guide.md` - Trackio monitoring setup
|
| 473 |
+
- `references/uv_scripts_guide.md` - Complete UV scripts guide
|
| 474 |
- `references/hardware_guide.md` - Hardware specs and selection
|
| 475 |
- `references/hub_saving.md` - Hub authentication troubleshooting
|
| 476 |
- `references/troubleshooting.md` - Common issues and solutions
|
|
|
|
| 479 |
- `scripts/train_sft_example.py` - Production SFT template
|
| 480 |
- `scripts/train_dpo_example.py` - Production DPO template
|
| 481 |
- `scripts/train_grpo_example.py` - Production GRPO template
|
| 482 |
+
- `scripts/validate_dataset.py` - Validate dataset format before training
|
| 483 |
- `scripts/estimate_cost.py` - Estimate time and cost (offer when appropriate)
|
| 484 |
- `scripts/convert_to_gguf.py` - Complete GGUF conversion script
|
| 485 |
|
|
|
|
|
|
|
|
|
|
| 486 |
### External Links
|
| 487 |
- [TRL Documentation](https://huggingface.co/docs/trl)
|
| 488 |
- [TRL Jobs Training Guide](https://huggingface.co/docs/trl/en/jobs_training)
|
|
|
|
| 500 |
4. **Always enable Hub push** - Environment is ephemeral; without push, all results lost
|
| 501 |
5. **Include Trackio** - Use example scripts as templates for real-time monitoring
|
| 502 |
6. **Offer cost estimation** - When parameters are known, use `scripts/estimate_cost.py`
|
| 503 |
+
7. **Three approaches available:** TRL Jobs package (easiest), UV scripts (custom, modern), TRL maintained scripts (official examples)
|
| 504 |
+
8. **Use doc-fetch/doc-search** for latest TRL documentation
|
| 505 |
+
9. **Validate dataset format** before training with `scripts/validate_dataset.py`
|
| 506 |
10. **Choose appropriate hardware** for model size; use LoRA for models >7B
|
trl/references/trackio_guide.md
CHANGED
|
@@ -1,12 +1,11 @@
|
|
| 1 |
# Trackio Integration for TRL Training
|
| 2 |
|
| 3 |
-
**Trackio** is
|
| 4 |
|
| 5 |
-
β οΈ **IMPORTANT**:
|
| 6 |
-
-
|
| 7 |
-
-
|
| 8 |
-
- Without a Space, metrics are
|
| 9 |
-
- The Space dashboard persists your training metrics permanently
|
| 10 |
|
| 11 |
## Setting Up Trackio for Jobs
|
| 12 |
|
|
@@ -36,7 +35,7 @@ import trackio
|
|
| 36 |
|
| 37 |
trackio.init(
|
| 38 |
project="my-training",
|
| 39 |
-
space_id="username/trackio", # CRITICAL for Jobs!
|
| 40 |
config={
|
| 41 |
"model": "Qwen/Qwen2.5-0.5B",
|
| 42 |
"dataset": "trl-lib/Capybara",
|
|
@@ -79,11 +78,23 @@ Trackio automatically logs:
|
|
| 79 |
## Viewing the Dashboard
|
| 80 |
|
| 81 |
After starting training:
|
| 82 |
-
1. Navigate to the Space: `https://huggingface.co/spaces/username/trackio`
|
| 83 |
2. The Gradio dashboard shows all tracked experiments
|
| 84 |
3. Filter by project, compare runs, view charts with smoothing
|
| 85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
## Recommendation
|
| 87 |
|
| 88 |
- **Trackio**: Best for real-time monitoring during long training runs
|
|
|
|
| 89 |
- **Weights & Biases**: Best for team collaboration, requires account
|
|
|
|
| 1 |
# Trackio Integration for TRL Training
|
| 2 |
|
| 3 |
+
**Trackio** is a local-first experiment tracking library that provides real-time metrics visualization via a Gradio dashboard.
|
| 4 |
|
| 5 |
+
β οΈ **IMPORTANT**: Trackio is local-first, which means:
|
| 6 |
+
- It runs a dashboard on the machine where training happens
|
| 7 |
+
- For Jobs training, sync to a Hugging Face Space to view metrics
|
| 8 |
+
- Without a Space, metrics are only accessible during the job (then lost)
|
|
|
|
| 9 |
|
| 10 |
## Setting Up Trackio for Jobs
|
| 11 |
|
|
|
|
| 35 |
|
| 36 |
trackio.init(
|
| 37 |
project="my-training",
|
| 38 |
+
space_id="username/my-trackio-dashboard", # CRITICAL for Jobs!
|
| 39 |
config={
|
| 40 |
"model": "Qwen/Qwen2.5-0.5B",
|
| 41 |
"dataset": "trl-lib/Capybara",
|
|
|
|
| 78 |
## Viewing the Dashboard
|
| 79 |
|
| 80 |
After starting training:
|
| 81 |
+
1. Navigate to the Space: `https://huggingface.co/spaces/username/my-trackio-dashboard`
|
| 82 |
2. The Gradio dashboard shows all tracked experiments
|
| 83 |
3. Filter by project, compare runs, view charts with smoothing
|
| 84 |
|
| 85 |
+
## Alternative: TensorBoard (Simpler for Jobs)
|
| 86 |
+
|
| 87 |
+
For simpler setup without needing a Space:
|
| 88 |
+
```python
|
| 89 |
+
SFTConfig(
|
| 90 |
+
report_to="tensorboard", # Logs saved with model to Hub
|
| 91 |
+
)
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
TensorBoard logs are automatically saved with the model and viewable via TensorBoard locally after downloading.
|
| 95 |
+
|
| 96 |
## Recommendation
|
| 97 |
|
| 98 |
- **Trackio**: Best for real-time monitoring during long training runs
|
| 99 |
+
- **TensorBoard**: Best for post-training analysis, simpler setup
|
| 100 |
- **Weights & Biases**: Best for team collaboration, requires account
|
trl/references/training_methods.md
CHANGED
|
@@ -94,6 +94,19 @@ hf_jobs("uv", {
|
|
| 94 |
|
| 95 |
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/grpo_trainer")`
|
| 96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
## Reward Modeling
|
| 98 |
|
| 99 |
**What it is:** Train a reward model to score responses, used as a component in RLHF pipelines.
|
|
@@ -107,6 +120,21 @@ hf_jobs("uv", {
|
|
| 107 |
|
| 108 |
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/reward_trainer")`
|
| 109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
## Method Selection Guide
|
| 111 |
|
| 112 |
| Method | Complexity | Data Required | Use Case |
|
|
@@ -114,7 +142,9 @@ hf_jobs("uv", {
|
|
| 114 |
| **SFT** | Low | Demonstrations | Initial fine-tuning |
|
| 115 |
| **DPO** | Medium | Paired preferences | Post-SFT alignment |
|
| 116 |
| **GRPO** | Medium | Prompts + reward fn | Online RL with automatic rewards |
|
|
|
|
| 117 |
| **Reward** | Medium | Paired preferences | Building RLHF pipeline |
|
|
|
|
| 118 |
|
| 119 |
## Recommended Pipeline
|
| 120 |
|
|
@@ -126,6 +156,7 @@ hf_jobs("uv", {
|
|
| 126 |
**For advanced RL scenarios:**
|
| 127 |
1. **Start with SFT** - Fine-tune base model
|
| 128 |
2. **Train reward model** - On preference data
|
|
|
|
| 129 |
|
| 130 |
## Dataset Format Reference
|
| 131 |
|
|
@@ -135,9 +166,8 @@ hf_doc_fetch("https://huggingface.co/docs/trl/dataset_formats")
|
|
| 135 |
```
|
| 136 |
|
| 137 |
Or validate your dataset:
|
| 138 |
-
```
|
| 139 |
-
|
| 140 |
-
--dataset your/dataset --split train
|
| 141 |
```
|
| 142 |
|
| 143 |
## See Also
|
|
@@ -145,4 +175,4 @@ uv run https://huggingface.co/datasets/mcp-tools/skills/raw/main/dataset_inspect
|
|
| 145 |
- `references/training_patterns.md` - Common training patterns and examples
|
| 146 |
- `scripts/train_sft_example.py` - Complete SFT template
|
| 147 |
- `scripts/train_dpo_example.py` - Complete DPO template
|
| 148 |
-
-
|
|
|
|
| 94 |
|
| 95 |
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/grpo_trainer")`
|
| 96 |
|
| 97 |
+
## Kahneman-Tversky Optimization (KTO)
|
| 98 |
+
|
| 99 |
+
**What it is:** Preference tuning without paired data - uses independent positive/negative examples.
|
| 100 |
+
|
| 101 |
+
**When to use:**
|
| 102 |
+
- Have preference data but not paired comparisons
|
| 103 |
+
- Simpler data collection than DPO
|
| 104 |
+
- Want to incorporate human feedback without explicit pairs
|
| 105 |
+
|
| 106 |
+
**Dataset format:** Examples with binary labels (desirable/undesirable) but not paired
|
| 107 |
+
|
| 108 |
+
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/kto_trainer")`
|
| 109 |
+
|
| 110 |
## Reward Modeling
|
| 111 |
|
| 112 |
**What it is:** Train a reward model to score responses, used as a component in RLHF pipelines.
|
|
|
|
| 120 |
|
| 121 |
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/reward_trainer")`
|
| 122 |
|
| 123 |
+
## Proximal Policy Optimization (PPO)
|
| 124 |
+
|
| 125 |
+
**What it is:** Classic RLHF method using a reward model to guide policy optimization.
|
| 126 |
+
|
| 127 |
+
**When to use:**
|
| 128 |
+
- Full RLHF pipeline
|
| 129 |
+
- Have trained reward model
|
| 130 |
+
- Need fine-grained control over optimization
|
| 131 |
+
|
| 132 |
+
**Requirements:** Pre-trained reward model
|
| 133 |
+
|
| 134 |
+
**Note:** PPO is more complex than DPO. For most use cases, start with DPO.
|
| 135 |
+
|
| 136 |
+
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/ppo_trainer")`
|
| 137 |
+
|
| 138 |
## Method Selection Guide
|
| 139 |
|
| 140 |
| Method | Complexity | Data Required | Use Case |
|
|
|
|
| 142 |
| **SFT** | Low | Demonstrations | Initial fine-tuning |
|
| 143 |
| **DPO** | Medium | Paired preferences | Post-SFT alignment |
|
| 144 |
| **GRPO** | Medium | Prompts + reward fn | Online RL with automatic rewards |
|
| 145 |
+
| **KTO** | Medium | Unpaired preferences | Alignment with simpler data |
|
| 146 |
| **Reward** | Medium | Paired preferences | Building RLHF pipeline |
|
| 147 |
+
| **PPO** | High | Demonstrations + reward model | Full RLHF |
|
| 148 |
|
| 149 |
## Recommended Pipeline
|
| 150 |
|
|
|
|
| 156 |
**For advanced RL scenarios:**
|
| 157 |
1. **Start with SFT** - Fine-tune base model
|
| 158 |
2. **Train reward model** - On preference data
|
| 159 |
+
3. **Apply GRPO or PPO** - Online RL with reward model
|
| 160 |
|
| 161 |
## Dataset Format Reference
|
| 162 |
|
|
|
|
| 166 |
```
|
| 167 |
|
| 168 |
Or validate your dataset:
|
| 169 |
+
```python
|
| 170 |
+
# See scripts/validate_dataset.py
|
|
|
|
| 171 |
```
|
| 172 |
|
| 173 |
## See Also
|
|
|
|
| 175 |
- `references/training_patterns.md` - Common training patterns and examples
|
| 176 |
- `scripts/train_sft_example.py` - Complete SFT template
|
| 177 |
- `scripts/train_dpo_example.py` - Complete DPO template
|
| 178 |
+
- `scripts/validate_dataset.py` - Dataset format validation tool
|
trl/references/training_patterns.md
CHANGED
|
@@ -39,7 +39,7 @@ from datasets import load_dataset
|
|
| 39 |
from trl import DPOTrainer, DPOConfig
|
| 40 |
import trackio
|
| 41 |
|
| 42 |
-
trackio.init(project="dpo-training", space_id="username/
|
| 43 |
|
| 44 |
dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train")
|
| 45 |
|
|
|
|
| 39 |
from trl import DPOTrainer, DPOConfig
|
| 40 |
import trackio
|
| 41 |
|
| 42 |
+
trackio.init(project="dpo-training", space_id="username/my-dashboard")
|
| 43 |
|
| 44 |
dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train")
|
| 45 |
|
trl/references/troubleshooting.md
CHANGED
|
@@ -103,15 +103,8 @@ trainer = SFTTrainer(
|
|
| 103 |
|
| 104 |
2. **Validate dataset before training:**
|
| 105 |
```bash
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
```
|
| 109 |
-
Or via hf_jobs:
|
| 110 |
-
```python
|
| 111 |
-
hf_jobs("uv", {
|
| 112 |
-
"script": "https://huggingface.co/datasets/mcp-tools/skills/raw/main/dataset_inspector.py",
|
| 113 |
-
"script_args": ["--dataset", "dataset-name", "--split", "train"]
|
| 114 |
-
})
|
| 115 |
```
|
| 116 |
|
| 117 |
3. **Verify field names:**
|
|
@@ -257,7 +250,7 @@ If issues persist:
|
|
| 257 |
3. **Review related guides:**
|
| 258 |
- `references/hub_saving.md` - Hub authentication issues
|
| 259 |
- `references/hardware_guide.md` - Hardware selection and specs
|
|
|
|
| 260 |
- `references/training_patterns.md` - Eval dataset requirements
|
| 261 |
-
- SKILL.md "Working with Scripts" section - Script format and URL issues
|
| 262 |
|
| 263 |
4. **Ask in HF forums:** https://discuss.huggingface.co/
|
|
|
|
| 103 |
|
| 104 |
2. **Validate dataset before training:**
|
| 105 |
```bash
|
| 106 |
+
python scripts/validate_dataset.py <dataset-name> <method>
|
| 107 |
+
# e.g., python scripts/validate_dataset.py trl-lib/Capybara sft
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
```
|
| 109 |
|
| 110 |
3. **Verify field names:**
|
|
|
|
| 250 |
3. **Review related guides:**
|
| 251 |
- `references/hub_saving.md` - Hub authentication issues
|
| 252 |
- `references/hardware_guide.md` - Hardware selection and specs
|
| 253 |
+
- `references/uv_scripts_guide.md` - UV script format issues
|
| 254 |
- `references/training_patterns.md` - Eval dataset requirements
|
|
|
|
| 255 |
|
| 256 |
4. **Ask in HF forums:** https://discuss.huggingface.co/
|
trl/references/uv_scripts_guide.md
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UV Scripts Guide for TRL Training
|
| 2 |
+
|
| 3 |
+
UV scripts are self-contained Python scripts with inline dependency declarations (PEP 723). They're the modern, recommended approach for custom TRL training.
|
| 4 |
+
|
| 5 |
+
## What are UV Scripts?
|
| 6 |
+
|
| 7 |
+
UV scripts declare dependencies at the top of the file using special comment syntax:
|
| 8 |
+
|
| 9 |
+
```python
|
| 10 |
+
# /// script
|
| 11 |
+
# dependencies = [
|
| 12 |
+
# "trl>=0.12.0",
|
| 13 |
+
# "transformers>=4.36.0",
|
| 14 |
+
# ]
|
| 15 |
+
# ///
|
| 16 |
+
|
| 17 |
+
# Your training code here
|
| 18 |
+
from trl import SFTTrainer
|
| 19 |
+
```
|
| 20 |
+
|
| 21 |
+
## Benefits
|
| 22 |
+
|
| 23 |
+
1. **Self-contained**: Dependencies are part of the script
|
| 24 |
+
2. **Version control**: Pin exact versions for reproducibility
|
| 25 |
+
3. **No setup files**: No requirements.txt or setup.py needed
|
| 26 |
+
4. **Portable**: Run anywhere UV is installed
|
| 27 |
+
5. **Clean**: Much cleaner than bash + pip + python strings
|
| 28 |
+
|
| 29 |
+
## Creating a UV Script
|
| 30 |
+
|
| 31 |
+
### Step 1: Define Dependencies
|
| 32 |
+
|
| 33 |
+
Start with dependency declaration:
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
# /// script
|
| 37 |
+
# dependencies = [
|
| 38 |
+
# "trl>=0.12.0", # TRL for training
|
| 39 |
+
# "transformers>=4.36.0", # Transformers library
|
| 40 |
+
# "datasets>=2.14.0", # Dataset loading
|
| 41 |
+
# "accelerate>=0.24.0", # Distributed training
|
| 42 |
+
# "peft>=0.7.0", # LoRA/PEFT (optional)
|
| 43 |
+
# ]
|
| 44 |
+
# ///
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
### Step 2: Add Training Code
|
| 48 |
+
|
| 49 |
+
```python
|
| 50 |
+
# /// script
|
| 51 |
+
# dependencies = ["trl", "peft"]
|
| 52 |
+
# ///
|
| 53 |
+
|
| 54 |
+
from datasets import load_dataset
|
| 55 |
+
from peft import LoraConfig
|
| 56 |
+
from trl import SFTTrainer, SFTConfig
|
| 57 |
+
|
| 58 |
+
# Load dataset
|
| 59 |
+
dataset = load_dataset("trl-lib/Capybara", split="train")
|
| 60 |
+
|
| 61 |
+
# Configure training
|
| 62 |
+
config = SFTConfig(
|
| 63 |
+
output_dir="my-model",
|
| 64 |
+
num_train_epochs=3,
|
| 65 |
+
push_to_hub=True,
|
| 66 |
+
hub_model_id="username/my-model",
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# Train
|
| 70 |
+
trainer = SFTTrainer(
|
| 71 |
+
model="Qwen/Qwen2.5-0.5B",
|
| 72 |
+
train_dataset=dataset,
|
| 73 |
+
args=config,
|
| 74 |
+
peft_config=LoraConfig(r=16, lora_alpha=32),
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
trainer.train()
|
| 78 |
+
trainer.push_to_hub()
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
### Step 3: Run on Jobs
|
| 82 |
+
|
| 83 |
+
```python
|
| 84 |
+
hf_jobs("uv", {
|
| 85 |
+
"script": "train.py", # or URL
|
| 86 |
+
"flavor": "a10g-large",
|
| 87 |
+
"timeout": "2h",
|
| 88 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"}
|
| 89 |
+
})
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
## Running Scripts from URLs
|
| 93 |
+
|
| 94 |
+
UV scripts can be run directly from URLs:
|
| 95 |
+
|
| 96 |
+
```python
|
| 97 |
+
hf_jobs("uv", {
|
| 98 |
+
"script": "https://gist.github.com/username/abc123/raw/train.py",
|
| 99 |
+
"flavor": "a10g-large",
|
| 100 |
+
"timeout": "2h",
|
| 101 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"}
|
| 102 |
+
})
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
**Benefits:**
|
| 106 |
+
- Share scripts via GitHub Gists
|
| 107 |
+
- Version control in Git repos
|
| 108 |
+
- Scripts accessible from anywhere
|
| 109 |
+
|
| 110 |
+
## Working with Local Scripts
|
| 111 |
+
|
| 112 |
+
β οΈ **Important:** The `hf_jobs("uv", ...)` command does NOT support local file paths directly. You must make scripts accessible via URL.
|
| 113 |
+
|
| 114 |
+
### Why Local Paths Don't Work
|
| 115 |
+
|
| 116 |
+
The Jobs API runs in isolated Docker containers without access to your local filesystem. Scripts must be:
|
| 117 |
+
- Publicly accessible URLs, OR
|
| 118 |
+
- Accessible via authentication (HF_TOKEN for private repos)
|
| 119 |
+
|
| 120 |
+
**Don't:**
|
| 121 |
+
```python
|
| 122 |
+
# β These will all fail
|
| 123 |
+
hf_jobs("uv", {"script": "train.py"})
|
| 124 |
+
hf_jobs("uv", {"script": "./scripts/train.py"})
|
| 125 |
+
hf_jobs("uv", {"script": "/path/to/train.py"})
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
**Do:**
|
| 129 |
+
```python
|
| 130 |
+
# β
These work
|
| 131 |
+
hf_jobs("uv", {"script": "https://huggingface.co/user/repo/resolve/main/train.py"})
|
| 132 |
+
hf_jobs("uv", {"script": "https://raw.githubusercontent.com/user/repo/main/train.py"})
|
| 133 |
+
hf_jobs("uv", {"script": "https://gist.githubusercontent.com/user/id/raw/train.py"})
|
| 134 |
+
```
|
| 135 |
+
|
| 136 |
+
### Recommended: Upload to Hugging Face Hub
|
| 137 |
+
|
| 138 |
+
The easiest way to use local scripts is to upload them to a Hugging Face repository:
|
| 139 |
+
|
| 140 |
+
```bash
|
| 141 |
+
# Create a dedicated scripts repo
|
| 142 |
+
huggingface-cli repo create my-training-scripts --type model
|
| 143 |
+
|
| 144 |
+
# Upload your script
|
| 145 |
+
huggingface-cli upload my-training-scripts ./train.py train.py
|
| 146 |
+
|
| 147 |
+
# If you update the script later
|
| 148 |
+
huggingface-cli upload my-training-scripts ./train.py train.py --commit-message "Updated training params"
|
| 149 |
+
|
| 150 |
+
# Use in jobs
|
| 151 |
+
script_url = "https://huggingface.co/USERNAME/my-training-scripts/resolve/main/train.py"
|
| 152 |
+
|
| 153 |
+
hf_jobs("uv", {
|
| 154 |
+
"script": script_url,
|
| 155 |
+
"flavor": "a10g-large",
|
| 156 |
+
"timeout": "2h",
|
| 157 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"}
|
| 158 |
+
})
|
| 159 |
+
```
|
| 160 |
+
|
| 161 |
+
**Benefits:**
|
| 162 |
+
- β
Version control via Git
|
| 163 |
+
- β
Private repos supported (with HF_TOKEN)
|
| 164 |
+
- β
Easy to share and update
|
| 165 |
+
- β
No external dependencies
|
| 166 |
+
- β
Integrates with HF ecosystem
|
| 167 |
+
|
| 168 |
+
**For Private Scripts:**
|
| 169 |
+
```python
|
| 170 |
+
# Your script is in a private repo
|
| 171 |
+
hf_jobs("uv", {
|
| 172 |
+
"script": "https://huggingface.co/USERNAME/private-scripts/resolve/main/train.py",
|
| 173 |
+
"flavor": "a10g-large",
|
| 174 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"} # Allows access to private repo
|
| 175 |
+
})
|
| 176 |
+
```
|
| 177 |
+
|
| 178 |
+
### Alternative: GitHub Gist
|
| 179 |
+
|
| 180 |
+
For quick scripts or one-off experiments:
|
| 181 |
+
|
| 182 |
+
```bash
|
| 183 |
+
# 1. Create a gist at https://gist.github.com
|
| 184 |
+
# 2. Paste your script
|
| 185 |
+
# 3. Click "Create public gist" (or secret gist)
|
| 186 |
+
# 4. Click the "Raw" button to get the raw URL
|
| 187 |
+
|
| 188 |
+
# Use in jobs
|
| 189 |
+
hf_jobs("uv", {
|
| 190 |
+
"script": "https://gist.githubusercontent.com/username/gist-id/raw/train.py",
|
| 191 |
+
"flavor": "a10g-large"
|
| 192 |
+
})
|
| 193 |
+
```
|
| 194 |
+
|
| 195 |
+
**Benefits:**
|
| 196 |
+
- β
Quick and easy
|
| 197 |
+
- β
No HF CLI setup needed
|
| 198 |
+
- β
Good for sharing examples
|
| 199 |
+
|
| 200 |
+
**Limitations:**
|
| 201 |
+
- β Less version control than Git repos
|
| 202 |
+
- β Secret gists are still publicly accessible via URL
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
## Using TRL Example Scripts
|
| 206 |
+
|
| 207 |
+
TRL provides maintained scripts that are UV-compatible:
|
| 208 |
+
|
| 209 |
+
```python
|
| 210 |
+
hf_jobs("uv", {
|
| 211 |
+
"script": "https://raw.githubusercontent.com/huggingface/trl/main/examples/scripts/sft.py",
|
| 212 |
+
"script_args": [
|
| 213 |
+
"--model_name_or_path", "Qwen/Qwen2.5-0.5B",
|
| 214 |
+
"--dataset_name", "trl-lib/Capybara",
|
| 215 |
+
"--output_dir", "my-model",
|
| 216 |
+
"--push_to_hub",
|
| 217 |
+
"--hub_model_id", "username/my-model"
|
| 218 |
+
],
|
| 219 |
+
"flavor": "a10g-large",
|
| 220 |
+
"timeout": "2h",
|
| 221 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"}
|
| 222 |
+
})
|
| 223 |
+
```
|
| 224 |
+
|
| 225 |
+
**Available TRL scripts:**
|
| 226 |
+
- `sft.py` - Supervised fine-tuning
|
| 227 |
+
- `dpo.py` - Direct Preference Optimization
|
| 228 |
+
- `kto.py` - KTO training
|
| 229 |
+
- `grpo.py` - GRPO training
|
| 230 |
+
- `reward.py` - Reward model training
|
| 231 |
+
- `prm.py` - Process reward model
|
| 232 |
+
|
| 233 |
+
All at: https://github.com/huggingface/trl/tree/main/examples/scripts
|
| 234 |
+
|
| 235 |
+
## Best Practices
|
| 236 |
+
|
| 237 |
+
### 1. Pin Versions
|
| 238 |
+
|
| 239 |
+
Always pin dependency versions for reproducibility:
|
| 240 |
+
|
| 241 |
+
```python
|
| 242 |
+
# /// script
|
| 243 |
+
# dependencies = [
|
| 244 |
+
# "trl==0.12.0", # Exact version
|
| 245 |
+
# "transformers>=4.36.0", # Minimum version
|
| 246 |
+
# ]
|
| 247 |
+
# ///
|
| 248 |
+
```
|
| 249 |
+
|
| 250 |
+
### 2. Add Logging
|
| 251 |
+
|
| 252 |
+
Include progress logging for monitoring:
|
| 253 |
+
|
| 254 |
+
```python
|
| 255 |
+
print("β
Dataset loaded")
|
| 256 |
+
print("π Starting training...")
|
| 257 |
+
print(f"π Training on {len(dataset)} examples")
|
| 258 |
+
```
|
| 259 |
+
|
| 260 |
+
### 3. Validate Inputs
|
| 261 |
+
|
| 262 |
+
Check dataset and configuration before training:
|
| 263 |
+
|
| 264 |
+
```python
|
| 265 |
+
dataset = load_dataset("trl-lib/Capybara", split="train")
|
| 266 |
+
assert len(dataset) > 0, "Dataset is empty!"
|
| 267 |
+
print(f"β
Dataset loaded: {len(dataset)} examples")
|
| 268 |
+
```
|
| 269 |
+
|
| 270 |
+
### 4. Add Comments
|
| 271 |
+
|
| 272 |
+
Document the script for future reference:
|
| 273 |
+
|
| 274 |
+
```python
|
| 275 |
+
# Train Qwen-0.5B on Capybara dataset using LoRA
|
| 276 |
+
# Expected runtime: ~2 hours on a10g-large
|
| 277 |
+
# Cost estimate: ~$6-8
|
| 278 |
+
```
|
| 279 |
+
|
| 280 |
+
### 5. Test Locally First
|
| 281 |
+
|
| 282 |
+
Test scripts locally before running on Jobs:
|
| 283 |
+
|
| 284 |
+
```bash
|
| 285 |
+
uv run train.py # Runs locally with uv
|
| 286 |
+
```
|
| 287 |
+
|
| 288 |
+
## Docker Images
|
| 289 |
+
|
| 290 |
+
### Default Image
|
| 291 |
+
|
| 292 |
+
UV scripts run on default Python image with UV installed.
|
| 293 |
+
|
| 294 |
+
### TRL Image
|
| 295 |
+
|
| 296 |
+
Use official TRL image for faster startup:
|
| 297 |
+
|
| 298 |
+
```python
|
| 299 |
+
hf_jobs("uv", {
|
| 300 |
+
"script": "train.py",
|
| 301 |
+
"image": "huggingface/trl", # Pre-installed TRL dependencies
|
| 302 |
+
"flavor": "a10g-large",
|
| 303 |
+
"timeout": "2h",
|
| 304 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"}
|
| 305 |
+
})
|
| 306 |
+
```
|
| 307 |
+
|
| 308 |
+
**Benefits:**
|
| 309 |
+
- Faster job startup (no pip install)
|
| 310 |
+
- All TRL dependencies pre-installed
|
| 311 |
+
- Tested and maintained by HF
|
| 312 |
+
|
| 313 |
+
## Template Scripts
|
| 314 |
+
|
| 315 |
+
### Basic SFT Template
|
| 316 |
+
|
| 317 |
+
```python
|
| 318 |
+
# /// script
|
| 319 |
+
# dependencies = ["trl>=0.12.0"]
|
| 320 |
+
# ///
|
| 321 |
+
|
| 322 |
+
from datasets import load_dataset
|
| 323 |
+
from trl import SFTTrainer, SFTConfig
|
| 324 |
+
|
| 325 |
+
dataset = load_dataset("DATASET_NAME", split="train")
|
| 326 |
+
|
| 327 |
+
trainer = SFTTrainer(
|
| 328 |
+
model="MODEL_NAME",
|
| 329 |
+
train_dataset=dataset,
|
| 330 |
+
args=SFTConfig(
|
| 331 |
+
output_dir="OUTPUT_DIR",
|
| 332 |
+
num_train_epochs=3,
|
| 333 |
+
push_to_hub=True,
|
| 334 |
+
hub_model_id="USERNAME/MODEL_NAME",
|
| 335 |
+
)
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
trainer.train()
|
| 339 |
+
trainer.push_to_hub()
|
| 340 |
+
```
|
| 341 |
+
|
| 342 |
+
### SFT with LoRA Template
|
| 343 |
+
|
| 344 |
+
```python
|
| 345 |
+
# /// script
|
| 346 |
+
# dependencies = ["trl>=0.12.0", "peft>=0.7.0"]
|
| 347 |
+
# ///
|
| 348 |
+
|
| 349 |
+
from datasets import load_dataset
|
| 350 |
+
from peft import LoraConfig
|
| 351 |
+
from trl import SFTTrainer, SFTConfig
|
| 352 |
+
|
| 353 |
+
dataset = load_dataset("DATASET_NAME", split="train")
|
| 354 |
+
|
| 355 |
+
trainer = SFTTrainer(
|
| 356 |
+
model="MODEL_NAME",
|
| 357 |
+
train_dataset=dataset,
|
| 358 |
+
peft_config=LoraConfig(r=16, lora_alpha=32),
|
| 359 |
+
args=SFTConfig(
|
| 360 |
+
output_dir="OUTPUT_DIR",
|
| 361 |
+
num_train_epochs=3,
|
| 362 |
+
push_to_hub=True,
|
| 363 |
+
hub_model_id="USERNAME/MODEL_NAME",
|
| 364 |
+
)
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
trainer.train()
|
| 368 |
+
trainer.push_to_hub()
|
| 369 |
+
```
|
| 370 |
+
|
| 371 |
+
### DPO Template
|
| 372 |
+
|
| 373 |
+
```python
|
| 374 |
+
# /// script
|
| 375 |
+
# dependencies = ["trl>=0.12.0"]
|
| 376 |
+
# ///
|
| 377 |
+
|
| 378 |
+
from datasets import load_dataset
|
| 379 |
+
from transformers import AutoTokenizer
|
| 380 |
+
from trl import DPOTrainer, DPOConfig
|
| 381 |
+
|
| 382 |
+
model_name = "MODEL_NAME"
|
| 383 |
+
dataset = load_dataset("DATASET_NAME", split="train")
|
| 384 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 385 |
+
|
| 386 |
+
trainer = DPOTrainer(
|
| 387 |
+
model=model_name,
|
| 388 |
+
train_dataset=dataset,
|
| 389 |
+
tokenizer=tokenizer,
|
| 390 |
+
args=DPOConfig(
|
| 391 |
+
output_dir="OUTPUT_DIR",
|
| 392 |
+
num_train_epochs=3,
|
| 393 |
+
push_to_hub=True,
|
| 394 |
+
hub_model_id="USERNAME/MODEL_NAME",
|
| 395 |
+
)
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
trainer.train()
|
| 399 |
+
trainer.push_to_hub()
|
| 400 |
+
```
|
| 401 |
+
|
| 402 |
+
## Troubleshooting
|
| 403 |
+
|
| 404 |
+
### Issue: Dependencies not installing
|
| 405 |
+
**Check:** Verify dependency names and versions are correct
|
| 406 |
+
|
| 407 |
+
### Issue: Script not found
|
| 408 |
+
**Check:** Verify URL is accessible and points to raw file
|
| 409 |
+
|
| 410 |
+
### Issue: Import errors
|
| 411 |
+
**Solution:** Add missing dependencies to `dependencies` list
|
| 412 |
+
|
| 413 |
+
### Issue: Slow startup
|
| 414 |
+
**Solution:** Use `image="huggingface/trl"` for pre-installed dependencies
|
trl/scripts/train_dpo_example.py
CHANGED
|
@@ -32,7 +32,7 @@ from trl import DPOTrainer, DPOConfig
|
|
| 32 |
# Initialize Trackio for real-time monitoring
|
| 33 |
trackio.init(
|
| 34 |
project="qwen-dpo-alignment",
|
| 35 |
-
space_id="username/trackio",
|
| 36 |
config={
|
| 37 |
"model": "Qwen/Qwen2.5-0.5B-Instruct",
|
| 38 |
"dataset": "trl-lib/ultrafeedback_binarized",
|
|
@@ -110,4 +110,4 @@ trainer.push_to_hub()
|
|
| 110 |
trackio.finish()
|
| 111 |
|
| 112 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-dpo-aligned")
|
| 113 |
-
print("π View metrics at: https://huggingface.co/spaces/username/trackio")
|
|
|
|
| 32 |
# Initialize Trackio for real-time monitoring
|
| 33 |
trackio.init(
|
| 34 |
project="qwen-dpo-alignment",
|
| 35 |
+
space_id="username/my-trackio-dashboard",
|
| 36 |
config={
|
| 37 |
"model": "Qwen/Qwen2.5-0.5B-Instruct",
|
| 38 |
"dataset": "trl-lib/ultrafeedback_binarized",
|
|
|
|
| 110 |
trackio.finish()
|
| 111 |
|
| 112 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-dpo-aligned")
|
| 113 |
+
print("π View metrics at: https://huggingface.co/spaces/username/my-trackio-dashboard")
|
trl/scripts/train_grpo_example.py
CHANGED
|
@@ -36,7 +36,7 @@ from trl import GRPOTrainer, GRPOConfig
|
|
| 36 |
# Initialize Trackio for real-time monitoring
|
| 37 |
trackio.init(
|
| 38 |
project="qwen-grpo-math",
|
| 39 |
-
space_id="username/trackio",
|
| 40 |
config={
|
| 41 |
"model": "Qwen/Qwen2.5-0.5B-Instruct",
|
| 42 |
"dataset": "trl-lib/math_shepherd",
|
|
@@ -94,4 +94,4 @@ trainer.push_to_hub()
|
|
| 94 |
trackio.finish()
|
| 95 |
|
| 96 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-grpo-math")
|
| 97 |
-
print("π View metrics at: https://huggingface.co/spaces/username/trackio")
|
|
|
|
| 36 |
# Initialize Trackio for real-time monitoring
|
| 37 |
trackio.init(
|
| 38 |
project="qwen-grpo-math",
|
| 39 |
+
space_id="username/my-trackio-dashboard",
|
| 40 |
config={
|
| 41 |
"model": "Qwen/Qwen2.5-0.5B-Instruct",
|
| 42 |
"dataset": "trl-lib/math_shepherd",
|
|
|
|
| 94 |
trackio.finish()
|
| 95 |
|
| 96 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-grpo-math")
|
| 97 |
+
print("π View metrics at: https://huggingface.co/spaces/username/my-trackio-dashboard")
|
trl/scripts/train_sft_example.py
CHANGED
|
@@ -39,7 +39,7 @@ from trl import SFTTrainer, SFTConfig
|
|
| 39 |
# Initialize Trackio for real-time monitoring
|
| 40 |
trackio.init(
|
| 41 |
project="qwen-capybara-sft",
|
| 42 |
-
space_id="username/trackio", # Creates Space if it doesn't exist
|
| 43 |
config={
|
| 44 |
"model": "Qwen/Qwen2.5-0.5B",
|
| 45 |
"dataset": "trl-lib/Capybara",
|
|
@@ -124,4 +124,4 @@ trainer.push_to_hub()
|
|
| 124 |
trackio.finish()
|
| 125 |
|
| 126 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-capybara-sft")
|
| 127 |
-
print("π View metrics at: https://huggingface.co/spaces/username/trackio")
|
|
|
|
| 39 |
# Initialize Trackio for real-time monitoring
|
| 40 |
trackio.init(
|
| 41 |
project="qwen-capybara-sft",
|
| 42 |
+
space_id="username/my-trackio-dashboard", # Creates Space if it doesn't exist
|
| 43 |
config={
|
| 44 |
"model": "Qwen/Qwen2.5-0.5B",
|
| 45 |
"dataset": "trl-lib/Capybara",
|
|
|
|
| 124 |
trackio.finish()
|
| 125 |
|
| 126 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-capybara-sft")
|
| 127 |
+
print("π View metrics at: https://huggingface.co/spaces/username/my-trackio-dashboard")
|
trl/scripts/validate_dataset.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# /// script
|
| 3 |
+
# dependencies = [
|
| 4 |
+
# "datasets>=2.14.0",
|
| 5 |
+
# ]
|
| 6 |
+
# ///
|
| 7 |
+
"""
|
| 8 |
+
Validate dataset format for TRL training.
|
| 9 |
+
|
| 10 |
+
Usage:
|
| 11 |
+
python validate_dataset.py <dataset_name> <method>
|
| 12 |
+
|
| 13 |
+
Examples:
|
| 14 |
+
python validate_dataset.py trl-lib/Capybara sft
|
| 15 |
+
python validate_dataset.py Anthropic/hh-rlhf dpo
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import sys
|
| 19 |
+
from datasets import load_dataset
|
| 20 |
+
|
| 21 |
+
def validate_sft_dataset(dataset):
|
| 22 |
+
"""Validate SFT dataset format."""
|
| 23 |
+
print("π Validating SFT dataset...")
|
| 24 |
+
|
| 25 |
+
# Check for common fields
|
| 26 |
+
columns = dataset.column_names
|
| 27 |
+
print(f"π Columns: {columns}")
|
| 28 |
+
|
| 29 |
+
has_messages = "messages" in columns
|
| 30 |
+
has_text = "text" in columns
|
| 31 |
+
|
| 32 |
+
if not (has_messages or has_text):
|
| 33 |
+
print("β Dataset must have 'messages' or 'text' field")
|
| 34 |
+
return False
|
| 35 |
+
|
| 36 |
+
# Check first example
|
| 37 |
+
example = dataset[0]
|
| 38 |
+
|
| 39 |
+
if has_messages:
|
| 40 |
+
messages = example["messages"]
|
| 41 |
+
if not isinstance(messages, list):
|
| 42 |
+
print("β 'messages' field must be a list")
|
| 43 |
+
return False
|
| 44 |
+
|
| 45 |
+
if len(messages) == 0:
|
| 46 |
+
print("β 'messages' field is empty")
|
| 47 |
+
return False
|
| 48 |
+
|
| 49 |
+
# Check message format
|
| 50 |
+
msg = messages[0]
|
| 51 |
+
if not isinstance(msg, dict):
|
| 52 |
+
print("β Messages must be dictionaries")
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
+
if "role" not in msg or "content" not in msg:
|
| 56 |
+
print("β Messages must have 'role' and 'content' keys")
|
| 57 |
+
return False
|
| 58 |
+
|
| 59 |
+
print("β
Messages format valid")
|
| 60 |
+
print(f" First message: {msg['role']}: {msg['content'][:50]}...")
|
| 61 |
+
|
| 62 |
+
if has_text:
|
| 63 |
+
text = example["text"]
|
| 64 |
+
if not isinstance(text, str):
|
| 65 |
+
print("β 'text' field must be a string")
|
| 66 |
+
return False
|
| 67 |
+
|
| 68 |
+
if len(text) == 0:
|
| 69 |
+
print("β 'text' field is empty")
|
| 70 |
+
return False
|
| 71 |
+
|
| 72 |
+
print("β
Text format valid")
|
| 73 |
+
print(f" First text: {text[:100]}...")
|
| 74 |
+
|
| 75 |
+
return True
|
| 76 |
+
|
| 77 |
+
def validate_dpo_dataset(dataset):
|
| 78 |
+
"""Validate DPO dataset format."""
|
| 79 |
+
print("π Validating DPO dataset...")
|
| 80 |
+
|
| 81 |
+
columns = dataset.column_names
|
| 82 |
+
print(f"π Columns: {columns}")
|
| 83 |
+
|
| 84 |
+
required = ["prompt", "chosen", "rejected"]
|
| 85 |
+
missing = [col for col in required if col not in columns]
|
| 86 |
+
|
| 87 |
+
if missing:
|
| 88 |
+
print(f"β Missing required fields: {missing}")
|
| 89 |
+
return False
|
| 90 |
+
|
| 91 |
+
# Check first example
|
| 92 |
+
example = dataset[0]
|
| 93 |
+
|
| 94 |
+
for field in required:
|
| 95 |
+
value = example[field]
|
| 96 |
+
if isinstance(value, str):
|
| 97 |
+
if len(value) == 0:
|
| 98 |
+
print(f"β '{field}' field is empty")
|
| 99 |
+
return False
|
| 100 |
+
print(f"β
'{field}' format valid (string)")
|
| 101 |
+
elif isinstance(value, list):
|
| 102 |
+
if len(value) == 0:
|
| 103 |
+
print(f"β '{field}' field is empty")
|
| 104 |
+
return False
|
| 105 |
+
print(f"β
'{field}' format valid (list of messages)")
|
| 106 |
+
else:
|
| 107 |
+
print(f"β '{field}' must be string or list")
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
return True
|
| 111 |
+
|
| 112 |
+
def validate_kto_dataset(dataset):
|
| 113 |
+
"""Validate KTO dataset format."""
|
| 114 |
+
print("π Validating KTO dataset...")
|
| 115 |
+
|
| 116 |
+
columns = dataset.column_names
|
| 117 |
+
print(f"π Columns: {columns}")
|
| 118 |
+
|
| 119 |
+
required = ["prompt", "completion", "label"]
|
| 120 |
+
missing = [col for col in required if col not in columns]
|
| 121 |
+
|
| 122 |
+
if missing:
|
| 123 |
+
print(f"β Missing required fields: {missing}")
|
| 124 |
+
return False
|
| 125 |
+
|
| 126 |
+
# Check first example
|
| 127 |
+
example = dataset[0]
|
| 128 |
+
|
| 129 |
+
if not isinstance(example["label"], bool):
|
| 130 |
+
print("β 'label' field must be boolean")
|
| 131 |
+
return False
|
| 132 |
+
|
| 133 |
+
print("β
KTO format valid")
|
| 134 |
+
return True
|
| 135 |
+
|
| 136 |
+
def main():
|
| 137 |
+
if len(sys.argv) != 3:
|
| 138 |
+
print("Usage: python validate_dataset.py <dataset_name> <method>")
|
| 139 |
+
print("Methods: sft, dpo, kto")
|
| 140 |
+
sys.exit(1)
|
| 141 |
+
|
| 142 |
+
dataset_name = sys.argv[1]
|
| 143 |
+
method = sys.argv[2].lower()
|
| 144 |
+
|
| 145 |
+
print(f"π¦ Loading dataset: {dataset_name}")
|
| 146 |
+
try:
|
| 147 |
+
dataset = load_dataset(dataset_name, split="train")
|
| 148 |
+
print(f"β
Dataset loaded: {len(dataset)} examples")
|
| 149 |
+
except Exception as e:
|
| 150 |
+
print(f"β Failed to load dataset: {e}")
|
| 151 |
+
sys.exit(1)
|
| 152 |
+
|
| 153 |
+
validators = {
|
| 154 |
+
"sft": validate_sft_dataset,
|
| 155 |
+
"dpo": validate_dpo_dataset,
|
| 156 |
+
"kto": validate_kto_dataset,
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
if method not in validators:
|
| 160 |
+
print(f"β Unknown method: {method}")
|
| 161 |
+
print(f"Supported methods: {list(validators.keys())}")
|
| 162 |
+
sys.exit(1)
|
| 163 |
+
|
| 164 |
+
validator = validators[method]
|
| 165 |
+
valid = validator(dataset)
|
| 166 |
+
|
| 167 |
+
if valid:
|
| 168 |
+
print(f"\nβ
Dataset is valid for {method.upper()} training")
|
| 169 |
+
sys.exit(0)
|
| 170 |
+
else:
|
| 171 |
+
print(f"\nβ Dataset is NOT valid for {method.upper()} training")
|
| 172 |
+
sys.exit(1)
|
| 173 |
+
|
| 174 |
+
if __name__ == "__main__":
|
| 175 |
+
main()
|