cuda/pytorch matrix builds
Browse files- .github/workflows/main.yml +22 -4
.github/workflows/main.yml
CHANGED
|
@@ -10,6 +10,15 @@ jobs:
|
|
| 10 |
build-axolotl:
|
| 11 |
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
| 12 |
# this job needs to be run on self-hosted GPU runners...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
runs-on: self-hosted
|
| 14 |
steps:
|
| 15 |
- name: Checkout
|
|
@@ -31,10 +40,10 @@ jobs:
|
|
| 31 |
with:
|
| 32 |
context: .
|
| 33 |
build-args: |
|
| 34 |
-
BASE_TAG=${{ github.ref_name }}-base
|
| 35 |
file: ./docker/Dockerfile
|
| 36 |
push: ${{ github.event_name != 'pull_request' }}
|
| 37 |
-
tags: ${{ steps.metadata.outputs.tags }}
|
| 38 |
labels: ${{ steps.metadata.outputs.labels }}
|
| 39 |
cache-from: type=gha
|
| 40 |
cache-to: type=gha,mode=max
|
|
@@ -42,6 +51,15 @@ jobs:
|
|
| 42 |
needs: build-axolotl
|
| 43 |
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
| 44 |
# this job needs to be run on self-hosted GPU runners...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
runs-on: self-hosted
|
| 46 |
steps:
|
| 47 |
- name: Checkout
|
|
@@ -63,10 +81,10 @@ jobs:
|
|
| 63 |
with:
|
| 64 |
context: .
|
| 65 |
build-args: |
|
| 66 |
-
BASE_TAG=${{ github.ref_name }}
|
| 67 |
file: ./docker/Dockerfile-runpod
|
| 68 |
push: ${{ github.event_name != 'pull_request' }}
|
| 69 |
-
tags: ${{ steps.metadata.outputs.tags }}
|
| 70 |
labels: ${{ steps.metadata.outputs.labels }}
|
| 71 |
cache-from: type=gha
|
| 72 |
cache-to: type=gha,mode=max
|
|
|
|
| 10 |
build-axolotl:
|
| 11 |
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
| 12 |
# this job needs to be run on self-hosted GPU runners...
|
| 13 |
+
strategy:
|
| 14 |
+
matrix:
|
| 15 |
+
include:
|
| 16 |
+
- cuda: cu118
|
| 17 |
+
cuda_version: 11.8.0
|
| 18 |
+
pytorch: 2.0.0
|
| 19 |
+
- cuda: cu117
|
| 20 |
+
cuda_version: 11.7.0
|
| 21 |
+
pytorch: 1.13.1
|
| 22 |
runs-on: self-hosted
|
| 23 |
steps:
|
| 24 |
- name: Checkout
|
|
|
|
| 40 |
with:
|
| 41 |
context: .
|
| 42 |
build-args: |
|
| 43 |
+
BASE_TAG=${{ github.ref_name }}-base-${{ matrix.cuda }}-${{ matrix.pytorch }}
|
| 44 |
file: ./docker/Dockerfile
|
| 45 |
push: ${{ github.event_name != 'pull_request' }}
|
| 46 |
+
tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
|
| 47 |
labels: ${{ steps.metadata.outputs.labels }}
|
| 48 |
cache-from: type=gha
|
| 49 |
cache-to: type=gha,mode=max
|
|
|
|
| 51 |
needs: build-axolotl
|
| 52 |
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
| 53 |
# this job needs to be run on self-hosted GPU runners...
|
| 54 |
+
strategy:
|
| 55 |
+
matrix:
|
| 56 |
+
include:
|
| 57 |
+
- cuda: cu118
|
| 58 |
+
cuda_version: 11.8.0
|
| 59 |
+
pytorch: 2.0.0
|
| 60 |
+
- cuda: cu117
|
| 61 |
+
cuda_version: 11.7.0
|
| 62 |
+
pytorch: 1.13.1
|
| 63 |
runs-on: self-hosted
|
| 64 |
steps:
|
| 65 |
- name: Checkout
|
|
|
|
| 81 |
with:
|
| 82 |
context: .
|
| 83 |
build-args: |
|
| 84 |
+
BASE_TAG=${{ github.ref_name }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
|
| 85 |
file: ./docker/Dockerfile-runpod
|
| 86 |
push: ${{ github.event_name != 'pull_request' }}
|
| 87 |
+
tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
|
| 88 |
labels: ${{ steps.metadata.outputs.labels }}
|
| 89 |
cache-from: type=gha
|
| 90 |
cache-to: type=gha,mode=max
|