logger.warning("Prompt tokenizer ('{model_args.prompt_tokenizer_name}') and description tokenizer ('{model_args.description_tokenizer_name}') are not the same. Saving only the prompt tokenizer.")
logger.info("max_steps is given, it will override any value given in num_train_epochs")
total_train_steps=int(training_args.max_steps)
# Setting a very large number of epochs so we go as many times as necessary over the iterator.
num_epochs=sys.maxsize
steps_per_epoch=total_train_steps
iftraining_args.eval_stepsisNone:
logger.info(
f"eval_steps is not set, evaluating at the end of each epoch"
)
eval_steps=steps_per_epoch
else:
else:
logger.warning("Prompt tokenizer ('{model_args.prompt_tokenizer_name}') and description tokenizer ('{model_args.description_tokenizer_name}') are not the same. Saving only the prompt tokenizer.")