diff --git a/finetune/finetune_lora.sh b/finetune/finetune_lora.sh index df3140a4..c57409fa 100644 --- a/finetune/finetune_lora.sh +++ b/finetune/finetune_lora.sh @@ -53,7 +53,7 @@ torchrun $DISTRIBUTED_ARGS finetune.py \ --per_device_train_batch_size 1 \ --per_device_eval_batch_size 1 \ --gradient_accumulation_steps 1 \ - --evaluation_strategy "steps" \ + --eval_strategy "steps" \ --save_strategy "steps" \ --save_steps 1000 \ --save_total_limit 10 \