-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathsmpo_config.py
81 lines (70 loc) · 4.54 KB
/
smpo_config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
from dataclasses import dataclass
from typing import Dict, Literal, Optional
from transformers import TrainingArguments
@dataclass
class SimpleMarginPOConfig(TrainingArguments):
r"""
SimpleMarginPOConfig collects all training arguments related to the [`MarginPOTrainer`] class.
Using [`HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
max_length (`int`, defaults to `None`):
The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator.
max_prompt_length (`int`, defaults to `None`):
The maximum length of the prompt. This argument is required if you want to use the default data collator.
max_target_length (`int`, defaults to `None`):
The maximum length of the target. This argument is required if you want to use the default data collator and your model is an encoder-decoder.
beta (`float`, defaults to 1.2):
The beta factor in SimpleMarginPO loss.
margin_min (`float`, defaults to 0.35):
The minimal target reward margin in SimpleMarginPO loss. Can be zero for sigmoid and hinge losses.
margin_delta (`float`, defaults to 0.2):
The delta of minmal and maximal target reward margin in SimpleMarginPO loss. Only applicable to `smooth_double_bound` loss.
chosen_sft_ratio (`float`, defaults to 0.8):
SFT loss balance weight between chosen and rejected, used in the SimpleMarginPO loss (1.0 will use maximum of chosen loss and zero of rejected loss).
loss_type (`str`, defaults to `smooth_lower_bound`):
The type of loss to use. This argument is required if you want to use the default data collator.
lower_clip_percentile (`Optional[float]`, defaults to 0.02):
Lower percentile of token log probs value allowed for PO loss calculation for rejected completions. Works like winsorizing. Recommended range [0.01, 0.05]
min_log_prob (`Optional[float]`, defaults to -2.3):
Lowest possible token log prob value allowed in rejected completions. Will clip all log probs, works after percentile winsorizing.
upper_clip_percentile (`Optional[float]`, defaults to `None`):
Upper percentile of token log probs value allowed for PO loss calculation for chosen completions. Works like winsorizing. Recommended range [0.95, 0.99]
label_pad_token_id (`int`, defaults to `-100`):
The label pad token id. This argument is required if you want to use the default data collator.
padding_value (`int`, defaults to `None`):
The padding value if it is different to the tokenizer's pad_token_id.
truncation_mode (`str`, defaults to `keep_end`):
The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator.
generate_during_eval (`bool`, defaults to `False`):
Whether to sample and log generations during evaluation step.
is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`):
If no model is provided, we need to know if the model_init returns an encoder-decoder.
disable_dropout (`bool`, defaults to `True`):
Whether or not to disable dropouts in `model`.
model_init_kwargs (`Optional[Dict]`, *optional*):
Dict of Optional kwargs to pass when instantiating the model from a string
dataset_num_proc (`Optional[int]`, *optional*):
The number of workers to use to tokenize the data. Defaults to None.
"""
max_length: Optional[int] = None
max_prompt_length: Optional[int] = None
max_completion_length: Optional[int] = None
max_target_length: Optional[int] = None
beta: float = 1.2
margin_min: float = 0.35
margin_delta: float = 0.2
chosen_sft_ratio: float = 0.8
lower_clip_percentile: Optional[float] = 0.02
upper_clip_percentile: Optional[float] = None
min_log_prob: Optional[float] = -2.3
loss_type: Literal['sigmoid', 'hinge', 'ipo', 'smooth_lower_bound', 'smooth_double_bound'] = "smooth_lower_bound"
disable_dropout: bool = True
label_pad_token_id: int = -100
padding_value: int = None
truncation_mode: str = "keep_end"
generate_during_eval: bool = False
is_encoder_decoder: Optional[bool] = None
model_init_kwargs: Optional[Dict] = None
dataset_num_proc: Optional[int] = None