"docs/vscode:/vscode.git/clone" did not exist on "b500df11559265857d6b51685affdc13822f625f"
Unverified Commit 4fbd310f authored by Sayak Paul's avatar Sayak Paul Committed by GitHub
Browse files

[Chore] switch to `logger.warning` (#7289)

switch to logger.warning
parent 2ea28d69
...@@ -1215,7 +1215,7 @@ def main(args): ...@@ -1215,7 +1215,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, " "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, "
"please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
...@@ -1366,14 +1366,14 @@ def main(args): ...@@ -1366,14 +1366,14 @@ def main(args):
# Optimizer creation # Optimizer creation
if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"):
logger.warn( logger.warning(
f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]."
"Defaulting to adamW" "Defaulting to adamW"
) )
args.optimizer = "adamw" args.optimizer = "adamw"
if args.use_8bit_adam and not args.optimizer.lower() == "adamw": if args.use_8bit_adam and not args.optimizer.lower() == "adamw":
logger.warn( logger.warning(
f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was "
f"set to {args.optimizer.lower()}" f"set to {args.optimizer.lower()}"
) )
...@@ -1407,11 +1407,11 @@ def main(args): ...@@ -1407,11 +1407,11 @@ def main(args):
optimizer_class = prodigyopt.Prodigy optimizer_class = prodigyopt.Prodigy
if args.learning_rate <= 0.1: if args.learning_rate <= 0.1:
logger.warn( logger.warning(
"Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0"
) )
if args.train_text_encoder and args.text_encoder_lr: if args.train_text_encoder and args.text_encoder_lr:
logger.warn( logger.warning(
f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:" f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:"
f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. " f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. "
f"When using prodigy only learning_rate is used as the initial learning rate." f"When using prodigy only learning_rate is used as the initial learning rate."
......
...@@ -1317,7 +1317,7 @@ def main(args): ...@@ -1317,7 +1317,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, " "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, "
"please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
...@@ -1522,14 +1522,14 @@ def main(args): ...@@ -1522,14 +1522,14 @@ def main(args):
# Optimizer creation # Optimizer creation
if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"):
logger.warn( logger.warning(
f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]."
"Defaulting to adamW" "Defaulting to adamW"
) )
args.optimizer = "adamw" args.optimizer = "adamw"
if args.use_8bit_adam and not args.optimizer.lower() == "adamw": if args.use_8bit_adam and not args.optimizer.lower() == "adamw":
logger.warn( logger.warning(
f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was "
f"set to {args.optimizer.lower()}" f"set to {args.optimizer.lower()}"
) )
...@@ -1563,11 +1563,11 @@ def main(args): ...@@ -1563,11 +1563,11 @@ def main(args):
optimizer_class = prodigyopt.Prodigy optimizer_class = prodigyopt.Prodigy
if args.learning_rate <= 0.1: if args.learning_rate <= 0.1:
logger.warn( logger.warning(
"Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0"
) )
if args.train_text_encoder and args.text_encoder_lr: if args.train_text_encoder and args.text_encoder_lr:
logger.warn( logger.warning(
f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:" f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:"
f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. " f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. "
f"When using prodigy only learning_rate is used as the initial learning rate." f"When using prodigy only learning_rate is used as the initial learning rate."
......
...@@ -452,7 +452,7 @@ class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline): ...@@ -452,7 +452,7 @@ class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
self.enable_xformers_memory_efficient_attention() self.enable_xformers_memory_efficient_attention()
......
...@@ -308,7 +308,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step): ...@@ -308,7 +308,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step):
tracker.log({"validation": formatted_images}) tracker.log({"validation": formatted_images})
else: else:
logger.warn(f"image logging not implemented for {tracker.name}") logger.warning(f"image logging not implemented for {tracker.name}")
del pipeline del pipeline
gc.collect() gc.collect()
...@@ -1068,7 +1068,7 @@ def main(args): ...@@ -1068,7 +1068,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -180,7 +180,7 @@ def log_validation(vae, args, accelerator, weight_dtype, step, unet=None, is_fin ...@@ -180,7 +180,7 @@ def log_validation(vae, args, accelerator, weight_dtype, step, unet=None, is_fin
logger_name = "test" if is_final_validation else "validation" logger_name = "test" if is_final_validation else "validation"
tracker.log({logger_name: formatted_images}) tracker.log({logger_name: formatted_images})
else: else:
logger.warn(f"image logging not implemented for {tracker.name}") logger.warning(f"image logging not implemented for {tracker.name}")
del pipeline del pipeline
gc.collect() gc.collect()
...@@ -928,7 +928,7 @@ def main(args): ...@@ -928,7 +928,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -325,7 +325,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step): ...@@ -325,7 +325,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step):
tracker.log({"validation": formatted_images}) tracker.log({"validation": formatted_images})
else: else:
logger.warn(f"image logging not implemented for {tracker.name}") logger.warning(f"image logging not implemented for {tracker.name}")
del pipeline del pipeline
gc.collect() gc.collect()
...@@ -1083,7 +1083,7 @@ def main(args): ...@@ -1083,7 +1083,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -285,7 +285,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step, name="targe ...@@ -285,7 +285,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step, name="targe
tracker.log({f"validation/{name}": formatted_images}) tracker.log({f"validation/{name}": formatted_images})
else: else:
logger.warn(f"image logging not implemented for {tracker.name}") logger.warning(f"image logging not implemented for {tracker.name}")
del pipeline del pipeline
gc.collect() gc.collect()
...@@ -1023,7 +1023,7 @@ def main(args): ...@@ -1023,7 +1023,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -303,7 +303,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step, name="targe ...@@ -303,7 +303,7 @@ def log_validation(vae, unet, args, accelerator, weight_dtype, step, name="targe
tracker.log({f"validation/{name}": formatted_images}) tracker.log({f"validation/{name}": formatted_images})
else: else:
logger.warn(f"image logging not implemented for {tracker.name}") logger.warning(f"image logging not implemented for {tracker.name}")
del pipeline del pipeline
gc.collect() gc.collect()
...@@ -1083,7 +1083,7 @@ def main(args): ...@@ -1083,7 +1083,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -178,7 +178,7 @@ def log_validation( ...@@ -178,7 +178,7 @@ def log_validation(
tracker.log({tracker_key: formatted_images}) tracker.log({tracker_key: formatted_images})
else: else:
logger.warn(f"image logging not implemented for {tracker.name}") logger.warning(f"image logging not implemented for {tracker.name}")
del pipeline del pipeline
gc.collect() gc.collect()
...@@ -861,7 +861,7 @@ def main(args): ...@@ -861,7 +861,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -128,7 +128,7 @@ def log_validation(pipeline, pipeline_params, controlnet_params, tokenizer, args ...@@ -128,7 +128,7 @@ def log_validation(pipeline, pipeline_params, controlnet_params, tokenizer, args
wandb.log({"validation": formatted_images}) wandb.log({"validation": formatted_images})
else: else:
logger.warn(f"image logging not implemented for {args.report_to}") logger.warning(f"image logging not implemented for {args.report_to}")
return image_logs return image_logs
......
...@@ -178,7 +178,7 @@ def log_validation(vae, unet, controlnet, args, accelerator, weight_dtype, step, ...@@ -178,7 +178,7 @@ def log_validation(vae, unet, controlnet, args, accelerator, weight_dtype, step,
tracker.log({tracker_key: formatted_images}) tracker.log({tracker_key: formatted_images})
else: else:
logger.warn(f"image logging not implemented for {tracker.name}") logger.warning(f"image logging not implemented for {tracker.name}")
del pipeline del pipeline
gc.collect() gc.collect()
...@@ -929,7 +929,7 @@ def main(args): ...@@ -929,7 +929,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -904,7 +904,7 @@ def main(args): ...@@ -904,7 +904,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
attention_class = CustomDiffusionXFormersAttnProcessor attention_class = CustomDiffusionXFormersAttnProcessor
......
...@@ -987,7 +987,7 @@ def main(args): ...@@ -987,7 +987,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -895,7 +895,7 @@ def main(args): ...@@ -895,7 +895,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -1141,7 +1141,7 @@ def main(args): ...@@ -1141,7 +1141,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, " "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, "
"please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
...@@ -1317,14 +1317,14 @@ def main(args): ...@@ -1317,14 +1317,14 @@ def main(args):
# Optimizer creation # Optimizer creation
if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"):
logger.warn( logger.warning(
f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]."
"Defaulting to adamW" "Defaulting to adamW"
) )
args.optimizer = "adamw" args.optimizer = "adamw"
if args.use_8bit_adam and not args.optimizer.lower() == "adamw": if args.use_8bit_adam and not args.optimizer.lower() == "adamw":
logger.warn( logger.warning(
f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was "
f"set to {args.optimizer.lower()}" f"set to {args.optimizer.lower()}"
) )
...@@ -1358,11 +1358,11 @@ def main(args): ...@@ -1358,11 +1358,11 @@ def main(args):
optimizer_class = prodigyopt.Prodigy optimizer_class = prodigyopt.Prodigy
if args.learning_rate <= 0.1: if args.learning_rate <= 0.1:
logger.warn( logger.warning(
"Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0"
) )
if args.train_text_encoder and args.text_encoder_lr: if args.train_text_encoder and args.text_encoder_lr:
logger.warn( logger.warning(
f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:" f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:"
f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. " f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. "
f"When using prodigy only learning_rate is used as the initial learning rate." f"When using prodigy only learning_rate is used as the initial learning rate."
......
...@@ -488,7 +488,7 @@ def main(): ...@@ -488,7 +488,7 @@ def main():
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -580,7 +580,7 @@ def main(): ...@@ -580,7 +580,7 @@ def main():
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -177,7 +177,7 @@ def log_validation(vae, image_encoder, image_processor, unet, args, accelerator, ...@@ -177,7 +177,7 @@ def log_validation(vae, image_encoder, image_processor, unet, args, accelerator,
} }
) )
else: else:
logger.warn(f"image logging not implemented for {tracker.name}") logger.warning(f"image logging not implemented for {tracker.name}")
del pipeline del pipeline
torch.cuda.empty_cache() torch.cuda.empty_cache()
...@@ -534,7 +534,7 @@ def main(): ...@@ -534,7 +534,7 @@ def main():
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
...@@ -180,7 +180,7 @@ def log_validation( ...@@ -180,7 +180,7 @@ def log_validation(
} }
) )
else: else:
logger.warn(f"image logging not implemented for {tracker.name}") logger.warning(f"image logging not implemented for {tracker.name}")
del pipeline del pipeline
torch.cuda.empty_cache() torch.cuda.empty_cache()
......
...@@ -219,7 +219,7 @@ def log_validation(unet, scheduler, args, accelerator, weight_dtype, step, name= ...@@ -219,7 +219,7 @@ def log_validation(unet, scheduler, args, accelerator, weight_dtype, step, name=
if args.num_classes is not None: if args.num_classes is not None:
class_labels = list(range(args.num_classes)) class_labels = list(range(args.num_classes))
else: else:
logger.warn( logger.warning(
"The model is class-conditional but the number of classes is not set. The generated images will be" "The model is class-conditional but the number of classes is not set. The generated images will be"
" unconditional rather than class-conditional." " unconditional rather than class-conditional."
) )
...@@ -266,7 +266,7 @@ def log_validation(unet, scheduler, args, accelerator, weight_dtype, step, name= ...@@ -266,7 +266,7 @@ def log_validation(unet, scheduler, args, accelerator, weight_dtype, step, name=
tracker.log({f"validation/{name}": formatted_images}) tracker.log({f"validation/{name}": formatted_images})
else: else:
logger.warn(f"image logging not implemented for {tracker.name}") logger.warning(f"image logging not implemented for {tracker.name}")
del pipeline del pipeline
gc.collect() gc.collect()
...@@ -863,14 +863,14 @@ def main(args): ...@@ -863,14 +863,14 @@ def main(args):
elif args.model_config_name_or_path is None: elif args.model_config_name_or_path is None:
# TODO: use default architectures from iCT paper # TODO: use default architectures from iCT paper
if not args.class_conditional and (args.num_classes is not None or args.class_embed_type is not None): if not args.class_conditional and (args.num_classes is not None or args.class_embed_type is not None):
logger.warn( logger.warning(
f"`--class_conditional` is set to `False` but `--num_classes` is set to {args.num_classes} and" f"`--class_conditional` is set to `False` but `--num_classes` is set to {args.num_classes} and"
f" `--class_embed_type` is set to {args.class_embed_type}. These values will be overridden to `None`." f" `--class_embed_type` is set to {args.class_embed_type}. These values will be overridden to `None`."
) )
args.num_classes = None args.num_classes = None
args.class_embed_type = None args.class_embed_type = None
elif args.class_conditional and args.num_classes is None and args.class_embed_type is None: elif args.class_conditional and args.num_classes is None and args.class_embed_type is None:
logger.warn( logger.warning(
"`--class_conditional` is set to `True` but neither `--num_classes` nor `--class_embed_type` is set." "`--class_conditional` is set to `True` but neither `--num_classes` nor `--class_embed_type` is set."
"`class_conditional` will be overridden to `False`." "`class_conditional` will be overridden to `False`."
) )
...@@ -996,7 +996,7 @@ def main(args): ...@@ -996,7 +996,7 @@ def main(args):
xformers_version = version.parse(xformers.__version__) xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"): if xformers_version == version.parse("0.0.16"):
logger.warn( logger.warning(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
) )
unet.enable_xformers_memory_efficient_attention() unet.enable_xformers_memory_efficient_attention()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment