diff --git a/llm/run_finetune.py b/llm/run_finetune.py index 6711a328854d..06a85e095bab 100644 --- a/llm/run_finetune.py +++ b/llm/run_finetune.py @@ -41,7 +41,7 @@ VeRAConfig, VeRAModel, ) -from paddlenlp.trainer import PdArgumentParser, get_last_checkpoint +from paddlenlp.trainer import PdArgumentParser, get_last_checkpoint, set_seed from paddlenlp.trainer.trainer_callback import TrainerState from paddlenlp.transformers import ( AutoConfig, @@ -93,6 +93,7 @@ def main(): # Setup GPU & distributed training paddle.set_device(training_args.device) + set_seed(seed=training_args.seed) logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, world_size: {training_args.world_size}, " + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16 or training_args.bf16}"