bird-of-paradise commited on
Commit
a90af63
Β·
verified Β·
1 Parent(s): a3ae39b

Adding naive `dist.all_gather` version for distributed computing

Browse files
Files changed (1) hide show
  1. MuonForOLMo.ipynb +70 -27
MuonForOLMo.ipynb CHANGED
@@ -16,7 +16,7 @@
16
  "cells": [
17
  {
18
  "cell_type": "code",
19
- "execution_count": 6,
20
  "metadata": {
21
  "id": "cCXb6F65XhI_"
22
  },
@@ -421,7 +421,7 @@
421
  "metadata": {
422
  "id": "o9dFXoh2YSVn"
423
  },
424
- "execution_count": 7,
425
  "outputs": []
426
  },
427
  {
@@ -542,6 +542,8 @@
542
  "\n",
543
  " for name, p in zip(group[\"param_names\"], group[\"params\"]):\n",
544
  " name = self._clean_param_name(name)\n",
 
 
545
  "\n",
546
  " if p.grad is None:\n",
547
  " if collecting_metrics:\n",
@@ -582,20 +584,55 @@
582
  " if isinstance(mask, torch.Tensor):\n",
583
  " update.mul_(mask)\n",
584
  "\n",
585
- " # Handle conv filters\n",
586
- " orig_shape = update.shape\n",
587
- " if update.ndim == 4:\n",
588
- " update = update.view(update.shape[0], -1)\n",
589
  "\n",
590
- " # Apply Newton-Schulz\n",
591
- " update = self.zeropower_via_newtonschulz5(update, steps=ns_steps)\n",
 
592
  "\n",
593
- " # Scale update\n",
594
- " update *= max(1, grad.size(-2) / grad.size(-1)) ** 0.5\n",
 
595
  "\n",
596
- " # Reshape if needed\n",
597
- " if len(orig_shape) == 4:\n",
598
- " update = update.view(orig_shape)\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
599
  "\n",
600
  " else:\n",
601
  " # --- AdamW Update Logic ---\n",
@@ -689,7 +726,7 @@
689
  "metadata": {
690
  "id": "UgBBhlu8YSOD"
691
  },
692
- "execution_count": 9,
693
  "outputs": []
694
  },
695
  {
@@ -746,16 +783,16 @@
746
  "base_uri": "https://localhost:8080/"
747
  },
748
  "id": "JsLd9EUbYfMw",
749
- "outputId": "447510b5-446c-48da-b10f-5ee35d1e137e"
750
  },
751
- "execution_count": 12,
752
  "outputs": [
753
  {
754
  "output_type": "stream",
755
  "name": "stdout",
756
  "text": [
757
- "Gradient norm: 40.4564\n",
758
- "Weight change: 0.0680\n"
759
  ]
760
  }
761
  ]
@@ -1128,16 +1165,28 @@
1128
  "base_uri": "https://localhost:8080/"
1129
  },
1130
  "id": "CrWv9OuRYfHl",
1131
- "outputId": "4a2ce32e-d9b8-43f3-ec0d-9c4f10a770ec"
1132
  },
1133
- "execution_count": 13,
1134
  "outputs": [
 
 
 
 
 
 
 
 
 
 
 
 
1135
  {
1136
  "output_type": "stream",
1137
  "name": "stderr",
1138
  "text": [
1139
  "----------------------------------------------------------------------\n",
1140
- "Ran 8 tests in 0.021s\n",
1141
  "\n",
1142
  "OK\n"
1143
  ]
@@ -1146,12 +1195,6 @@
1146
  "output_type": "stream",
1147
  "name": "stdout",
1148
  "text": [
1149
- "Running MuonW Optimizer Tests\n",
1150
- "==================================================\n",
1151
- "βœ“ Conv filters handled correctly\n",
1152
- "βœ“ Embedding parameters use AdamW update\n",
1153
- "βœ“ Matrix parameters use Muon update\n",
1154
- "βœ“ Multiple parameter groups work correctly\n",
1155
  "βœ“ Nesterov momentum works differently from standard momentum\n",
1156
  "βœ“ Scalar parameters use AdamW update\n",
1157
  "βœ“ Weight decay applied correctly\n",
 
16
  "cells": [
17
  {
18
  "cell_type": "code",
19
+ "execution_count": 1,
20
  "metadata": {
21
  "id": "cCXb6F65XhI_"
22
  },
 
421
  "metadata": {
422
  "id": "o9dFXoh2YSVn"
423
  },
424
+ "execution_count": 2,
425
  "outputs": []
426
  },
427
  {
 
542
  "\n",
543
  " for name, p in zip(group[\"param_names\"], group[\"params\"]):\n",
544
  " name = self._clean_param_name(name)\n",
545
+ " # Check if we're in FSDP mode\n",
546
+ " is_fsdp = hasattr(p, '_is_sharded') and p._is_sharded\n",
547
  "\n",
548
  " if p.grad is None:\n",
549
  " if collecting_metrics:\n",
 
584
  " if isinstance(mask, torch.Tensor):\n",
585
  " update.mul_(mask)\n",
586
  "\n",
 
 
 
 
587
  "\n",
588
+ " if is_fsdp:\n",
589
+ " # For FSDP, we need to gather the full gradient/update across ranks\n",
590
+ " import torch.distributed as dist\n",
591
  "\n",
592
+ " # Get world size and rank\n",
593
+ " world_size = dist.get_world_size()\n",
594
+ " rank = dist.get_rank()\n",
595
  "\n",
596
+ " # Gather update tensor from all ranks\n",
597
+ " update_list = [torch.empty_like(update) for _ in range(world_size)]\n",
598
+ " dist.all_gather(update_list, update)\n",
599
+ "\n",
600
+ " # Concatenate to get full update\n",
601
+ " full_update = torch.cat(update_list, dim=0) # Assuming sharding on dim 0\n",
602
+ "\n",
603
+ " # Perform Newton-Schulz on full matrix\n",
604
+ " orig_shape = full_update.shape\n",
605
+ " if full_update.ndim == 4:\n",
606
+ " full_update = full_update.view(full_update.shape[0], -1)\n",
607
+ "\n",
608
+ " full_update = self.zeropower_via_newtonschulz5(full_update, steps=ns_steps)\n",
609
+ " full_update *= max(1, grad.size(-2) / grad.size(-1)) ** 0.5\n",
610
+ "\n",
611
+ " if len(orig_shape) == 4:\n",
612
+ " full_update = full_update.view(orig_shape)\n",
613
+ "\n",
614
+ " # Extract this rank's shard from the orthogonalized update\n",
615
+ " shard_size = full_update.shape[0] // world_size\n",
616
+ " start_idx = rank * shard_size\n",
617
+ " end_idx = start_idx + shard_size\n",
618
+ " update = full_update[start_idx:end_idx]\n",
619
+ "\n",
620
+ " else:\n",
621
+ " # Non-FSDP path (single GPU)\n",
622
+ " # Handle conv filters\n",
623
+ " orig_shape = update.shape\n",
624
+ " if update.ndim == 4:\n",
625
+ " update = update.view(update.shape[0], -1)\n",
626
+ "\n",
627
+ " # Apply Newton-Schulz\n",
628
+ " update = self.zeropower_via_newtonschulz5(update, steps=ns_steps)\n",
629
+ "\n",
630
+ " # Scale update\n",
631
+ " update *= max(1, grad.size(-2) / grad.size(-1)) ** 0.5\n",
632
+ "\n",
633
+ " # Reshape if needed\n",
634
+ " if len(orig_shape) == 4:\n",
635
+ " update = update.view(orig_shape)\n",
636
  "\n",
637
  " else:\n",
638
  " # --- AdamW Update Logic ---\n",
 
726
  "metadata": {
727
  "id": "UgBBhlu8YSOD"
728
  },
729
+ "execution_count": 3,
730
  "outputs": []
731
  },
732
  {
 
783
  "base_uri": "https://localhost:8080/"
784
  },
785
  "id": "JsLd9EUbYfMw",
786
+ "outputId": "cc23c0fd-a6fb-4a38-bd59-96b198bdb3f9"
787
  },
788
+ "execution_count": 4,
789
  "outputs": [
790
  {
791
  "output_type": "stream",
792
  "name": "stdout",
793
  "text": [
794
+ "Gradient norm: 37.9263\n",
795
+ "Weight change: 0.0685\n"
796
  ]
797
  }
798
  ]
 
1165
  "base_uri": "https://localhost:8080/"
1166
  },
1167
  "id": "CrWv9OuRYfHl",
1168
+ "outputId": "c73f5e9f-f4ca-41fa-f8c1-db840f341800"
1169
  },
1170
+ "execution_count": 5,
1171
  "outputs": [
1172
+ {
1173
+ "output_type": "stream",
1174
+ "name": "stdout",
1175
+ "text": [
1176
+ "Running MuonW Optimizer Tests\n",
1177
+ "==================================================\n",
1178
+ "βœ“ Conv filters handled correctly\n",
1179
+ "βœ“ Embedding parameters use AdamW update\n",
1180
+ "βœ“ Matrix parameters use Muon update\n",
1181
+ "βœ“ Multiple parameter groups work correctly\n"
1182
+ ]
1183
+ },
1184
  {
1185
  "output_type": "stream",
1186
  "name": "stderr",
1187
  "text": [
1188
  "----------------------------------------------------------------------\n",
1189
+ "Ran 8 tests in 0.223s\n",
1190
  "\n",
1191
  "OK\n"
1192
  ]
 
1195
  "output_type": "stream",
1196
  "name": "stdout",
1197
  "text": [
 
 
 
 
 
 
1198
  "βœ“ Nesterov momentum works differently from standard momentum\n",
1199
  "βœ“ Scalar parameters use AdamW update\n",
1200
  "βœ“ Weight decay applied correctly\n",