Nanobit commited on
Commit
5b712af
·
1 Parent(s): 9083910

Update bf16 options

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -172,7 +172,7 @@ gptq_model_v1: false # v1 or v2
172
  load_in_8bit: true
173
 
174
  # Use CUDA bf16
175
- bf16: true
176
  # Use CUDA fp16
177
  fp16: true
178
  # Use CUDA tf32
 
172
  load_in_8bit: true
173
 
174
  # Use CUDA bf16
175
+ bf16: true # bool or 'full' for `bf16_full_eval`
176
  # Use CUDA fp16
177
  fp16: true
178
  # Use CUDA tf32