forked from hiyouga/LLaMA-Factory
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathqwen_full_sft.sh
31 lines (30 loc) · 872 Bytes
/
qwen_full_sft.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
#!/bin/bash
CUDA_VISIBLE_DEVICES=0 python ./src/train_bash.py \
--stage sft \
--do_train \
--model_name_or_path ../model/Qwen1.5-4B \
--dataset universal_ner_all \
--dataset_dir ./data \
--template qwen \
--finetuning_type full \
--output_dir ../saves/qwen_sft/4b_full \
--overwrite_cache \
--overwrite_output_dir \
--cutoff_len 3072 \
--preprocessing_num_workers 32 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 32 \
--lr_scheduler_type cosine \
--logging_steps 10 \
--warmup_steps 200 \
--save_steps 1000 \
--eval_steps 1000 \
--save_strategy steps \
--evaluation_strategy steps \
--load_best_model_at_end \
--learning_rate 2e-5 \
--num_train_epochs 1.0 \
--val_size 0.1 \
--plot_loss \
--report_to wandb