|
# Train |
|
|
|
## Environment |
|
|
|
```bash |
|
cd scripts |
|
python -m venv venv |
|
source venv/bin/activate |
|
pip install -U -r requirements.in |
|
``` |
|
|
|
## Tokenizer |
|
|
|
```bash |
|
python -B train_tokenizer.py |
|
``` |
|
|
|
## Dataset |
|
|
|
```bash |
|
python -B prepare_pretrain_dataset.py |
|
``` |
|
|
|
```python |
|
from litdata import StreamingDataset, StreamingDataLoader, TokensLoader |
|
|
|
dataset = StreamingDataset( |
|
input_dir='../pretrain-data/', |
|
item_loader=TokensLoader(block_size=2048 + 1), |
|
) |
|
|
|
print(len(dataset)) |
|
``` |
|
|
|
## Model |
|
|
|
### Pretrain |
|
|
|
```bash |
|
litgpt pretrain --config ./pretrain-model.yaml |
|
``` |
|
|
|
```bash |
|
litgpt convert_from_litgpt out/pretrain/final/ out/converted_model |
|
cp config.json out/pretrain/final/ |
|
cp config.json out/converted_model/ |
|
``` |
|
|
|
```python |
|
import torch |
|
from safetensors.torch import save_file |
|
|
|
state_dict = torch.load('out/converted_model/model.pth', map_location='cpu') |
|
save_file(state_dict, 'out/converted_model/model.safetensors') |
|
``` |
|
|
|
## Evaluate |
|
|
|
```bash |
|
litgpt evaluate --tasks 'hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge' --out_dir 'evaluate-quick/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
|
|
litgpt evaluate --tasks 'leaderboard' --out_dir 'evaluate-leaderboard/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
|
|
litgpt evaluate --tasks 'bbh_zeroshot,bbh_fewshot,bbh_cot_fewshot,bbh_cot_zeroshot' --out_dir 'evaluate-bigbenchhard/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
|
|
litgpt evaluate --tasks 'mmlu,mmlu_pro' --out_dir 'evaluate-mmlu/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
|
|
litgpt evaluate --tasks 'arc_challenge,boolq,gpqa,hellaswag,openbookqa,piqa,truthfulqa_mc2,winogrande' --out_dir 'evaluate-reasoning/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
|
|
litgpt evaluate --tasks 'mmlu_multilingual,mgsm' --out_dir 'evaluate-multilinguals/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
|
|
litgpt evaluate --tasks 'gsm8k,mathqa' --out_dir 'evaluate-math/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
|
|
litgpt evaluate --tasks 'qasper' --out_dir 'evaluate-long/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
``` |
|
|