Update bf16 options
Browse files
README.md
CHANGED
|
@@ -172,7 +172,7 @@ gptq_model_v1: false # v1 or v2
|
|
| 172 |
load_in_8bit: true
|
| 173 |
|
| 174 |
# Use CUDA bf16
|
| 175 |
-
bf16: true
|
| 176 |
# Use CUDA fp16
|
| 177 |
fp16: true
|
| 178 |
# Use CUDA tf32
|
|
|
|
| 172 |
load_in_8bit: true
|
| 173 |
|
| 174 |
# Use CUDA bf16
|
| 175 |
+
bf16: true # bool or 'full' for `bf16_full_eval`
|
| 176 |
# Use CUDA fp16
|
| 177 |
fp16: true
|
| 178 |
# Use CUDA tf32
|