|
@ -51,6 +51,14 @@ class ModelArguments: |
|
|
default=None, |
|
|
default=None, |
|
|
metadata={"help": "Path to the directory containing the checkpoints of the reward model."} |
|
|
metadata={"help": "Path to the directory containing the checkpoints of the reward model."} |
|
|
) |
|
|
) |
|
|
|
|
|
resume_lora_training: Optional[bool] = field( |
|
|
|
|
|
default=True, |
|
|
|
|
|
metadata={"help": "Whether to resume training from the last LoRA weights or create new weights after merging them."} |
|
|
|
|
|
) |
|
|
|
|
|
plot_loss: Optional[bool] = field( |
|
|
|
|
|
default=False, |
|
|
|
|
|
metadata={"help": "Whether to plot the training loss after fine-tuning or not."} |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
def __post_init__(self): |
|
|
def __post_init__(self): |
|
|
if self.checkpoint_dir is not None: # support merging lora weights |
|
|
if self.checkpoint_dir is not None: # support merging lora weights |
|
@ -173,14 +181,6 @@ class FinetuningArguments: |
|
|
default="q_proj,v_proj", |
|
|
default="q_proj,v_proj", |
|
|
metadata={"help": "Name(s) of target modules to apply LoRA. Use comma to separate multiple modules."} |
|
|
metadata={"help": "Name(s) of target modules to apply LoRA. Use comma to separate multiple modules."} |
|
|
) |
|
|
) |
|
|
resume_lora_training: Optional[bool] = field( |
|
|
|
|
|
default=True, |
|
|
|
|
|
metadata={"help": "Whether to resume training from the last LoRA weights or create new weights after merging them."} |
|
|
|
|
|
) |
|
|
|
|
|
plot_loss: Optional[bool] = field( |
|
|
|
|
|
default=False, |
|
|
|
|
|
metadata={"help": "Whether to plot the training loss after fine-tuning or not."} |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
def __post_init__(self): |
|
|
def __post_init__(self): |
|
|
if isinstance(self.lora_target, str): |
|
|
if isinstance(self.lora_target, str): |
|
|