Safetensors
llama
keeeeenw commited on
Commit
f17e9c7
·
verified ·
1 Parent(s): 0b88607

Delete eval_llama3.sh

Browse files
Files changed (1) hide show
  1. eval_llama3.sh +0 -17
eval_llama3.sh DELETED
@@ -1,17 +0,0 @@
1
- # MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
2
- # ValueError: User-specified max_model_len (32768) is greater than the derived max_model_len (max_position_embeddings=2048 or model_max_length=None in model's config.json). This may lead
3
- # to incorrect model outputs or CUDA errors. To allow overriding this maximum, set the env var VLLM_ALLOW_LONG_MAX_MODEL_LEN=1
4
- # Only needed for MicroLlama V1
5
- # export VLLM_ALLOW_LONG_MAX_MODEL_LEN=1
6
-
7
- NUM_GPUS=4
8
- MODEL="/root/open-r1/data/meta-llama/Llama-3.2-1B-Instruct/checkpoint-900"
9
- MODEL_ARGS="pretrained=$MODEL,dtype=float16,data_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilisation=0.8"
10
- TASK=aime24
11
- OUTPUT_DIR=data/evals/$MODEL
12
-
13
- lighteval vllm $MODEL_ARGS "custom|$TASK|0|0" \
14
- --custom-tasks src/open_r1/evaluate.py \
15
- --use-chat-template \
16
- --system-prompt="Please reason step by step, and put your final answer within \boxed{}." \
17
- --output-dir $OUTPUT_DIR