From edafb977330767b82b6c9591d9ec180046155632 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Wed, 7 Jun 2023 12:58:14 +0800 Subject: [PATCH] tiny fix --- src/utils/common.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/utils/common.py b/src/utils/common.py index eaf1d00..a676b66 100644 --- a/src/utils/common.py +++ b/src/utils/common.py @@ -279,6 +279,9 @@ def prepare_args( if training_args.do_train and (not training_args.fp16): logger.warning("We recommend enable fp16 mixed precision training.") + if data_args.prompt_template == "alpaca": + logger.warning("Please specify `prompt_template` if you are using other pre-trained models.") + if training_args.local_rank != -1 and training_args.ddp_find_unused_parameters is None: logger.warning("`ddp_find_unused_parameters` needs to be set as False in DDP training.") training_args.ddp_find_unused_parameters = False @@ -318,6 +321,9 @@ def prepare_infer_args() -> Tuple[ModelArguments, DataTrainingArguments, Finetun if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora": raise ValueError("Quantization is only compatible with the LoRA method.") + if data_args.prompt_template == "alpaca": + logger.warning("Please specify `prompt_template` if you are using other pre-trained models.") + return model_args, data_args, finetuning_args