-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllama-wp.yml
63 lines (52 loc) · 1.29 KB
/
llama-wp.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
base_model: meta-llama/Llama-3.1-8B
model_type: LlamaForCausalLM
tokenizer_type: AutoTokenizer
seed: 1
load_in_8bit: false
load_in_4bit: false
strict: false
datasets:
- path: /home/venkat/Desktop/intergroup-nfl/data/train_wp.json
ds_type: json
type: alpaca
split: train
test_datasets:
- path: /home/venkat/Desktop/intergroup-nfl/data/eval_wp.json
ds_type: json
type: alpaca
split: train
load_best_model_at_end: True
dataset_prepared_path: /home/venkat/Desktop/intergroup-nfl/data/
output_dir: /home/venkat/Desktop/intergroup-nfl/models/llama31-8b-wp-1/
sequence_len: 2560
sample_packing: true
pad_to_sequence_len: true
eval_sample_packing: false
gradient_accumulation_steps: 1
micro_batch_size: 2
eval_batch_size: 2
num_epochs: 2
optimizer: paged_adamw_8bit
lr_scheduler: cosine
learning_rate: 1e-5
cosine_min_lr_ratio: 0.1
weight_decay: 0.1
warmup_steps: 10
train_on_inputs: false
group_by_length: false
bf16: auto
tf32: false
gradient_checkpointing: true
gradient_checkpointing_kwargs:
use_reentrant: false
logging_steps: 10
flash_attention: true
eval_steps: 0.1
save_steps: 0.1
save_total_limit: 3
early_stopping_patience: 3
special_tokens:
pad_token: <|end_of_text|>
wandb_project: intergroup-bias
wandb_name: llama31-8b-fft-wp-2
# deepspeed: deepspeed_configs/zero3_bf16.json