Unverified Commit 79fa94ea authored by Zachary Mueller's avatar Zachary Mueller Committed by GitHub
Browse files

Apply deprecations from Accelerate (#3714)

Apply deprecations
parent a06317ab
...@@ -716,13 +716,14 @@ def collate_fn(examples): ...@@ -716,13 +716,14 @@ def collate_fn(examples):
def main(args): def main(args):
logging_dir = Path(args.output_dir, args.logging_dir) logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) accelerator_project_config = ProjectConfiguration(
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -637,13 +637,14 @@ def parse_args(input_args=None): ...@@ -637,13 +637,14 @@ def parse_args(input_args=None):
def main(args): def main(args):
logging_dir = Path(args.output_dir, args.logging_dir) logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) accelerator_project_config = ProjectConfiguration(
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -771,13 +771,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte ...@@ -771,13 +771,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte
def main(args): def main(args):
logging_dir = Path(args.output_dir, args.logging_dir) logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) accelerator_project_config = ProjectConfiguration(
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -653,13 +653,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte ...@@ -653,13 +653,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte
def main(args): def main(args):
logging_dir = Path(args.output_dir, args.logging_dir) logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) accelerator_project_config = ProjectConfiguration(
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -387,12 +387,13 @@ def main(): ...@@ -387,12 +387,13 @@ def main():
), ),
) )
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) accelerator_project_config = ProjectConfiguration(
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -405,13 +405,14 @@ def main(): ...@@ -405,13 +405,14 @@ def main():
args = parse_args() args = parse_args()
logging_dir = Path(args.output_dir, args.logging_dir) logging_dir = Path(args.output_dir, args.logging_dir)
project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) project_config = ProjectConfiguration(
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with="tensorboard", log_with="tensorboard",
logging_dir=logging_dir,
project_config=project_config, project_config=project_config,
) )
......
...@@ -404,13 +404,14 @@ def main(): ...@@ -404,13 +404,14 @@ def main():
args = parse_args() args = parse_args()
logging_dir = Path(args.output_dir, args.logging_dir) logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) accelerator_project_config = ProjectConfiguration(
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with="tensorboard", log_with="tensorboard",
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -13,7 +13,7 @@ import torch.nn.functional as F ...@@ -13,7 +13,7 @@ import torch.nn.functional as F
import torch.utils.checkpoint import torch.utils.checkpoint
from accelerate import Accelerator from accelerate import Accelerator
from accelerate.logging import get_logger from accelerate.logging import get_logger
from accelerate.utils import set_seed from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import create_repo, upload_folder from huggingface_hub import create_repo, upload_folder
# TODO: remove and import from diffusers.utils when the new version of diffusers is released # TODO: remove and import from diffusers.utils when the new version of diffusers is released
...@@ -363,12 +363,12 @@ def freeze_params(params): ...@@ -363,12 +363,12 @@ def freeze_params(params):
def main(): def main():
args = parse_args() args = parse_args()
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with="tensorboard", log_with=args.report_to,
logging_dir=logging_dir, project_config=accelerator_project_config,
) )
# If passed along, set the training seed now. # If passed along, set the training seed now.
......
...@@ -12,7 +12,7 @@ import torch ...@@ -12,7 +12,7 @@ import torch
import torch.nn.functional as F import torch.nn.functional as F
import torch.utils.checkpoint import torch.utils.checkpoint
from accelerate import Accelerator from accelerate import Accelerator
from accelerate.utils import set_seed from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import HfFolder, Repository, whoami from huggingface_hub import HfFolder, Repository, whoami
from neural_compressor.utils import logger from neural_compressor.utils import logger
from packaging import version from packaging import version
...@@ -458,11 +458,13 @@ def main(): ...@@ -458,11 +458,13 @@ def main():
args = parse_args() args = parse_args()
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with="tensorboard", log_with="tensorboard",
logging_dir=logging_dir, project_config=accelerator_project_config,
) )
# If passed along, set the training seed now. # If passed along, set the training seed now.
......
...@@ -394,13 +394,14 @@ def main(): ...@@ -394,13 +394,14 @@ def main():
args = parse_args() args = parse_args()
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) accelerator_project_config = ProjectConfiguration(
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
if args.report_to == "wandb": if args.report_to == "wandb":
......
...@@ -549,14 +549,14 @@ class TextualInversionDataset(Dataset): ...@@ -549,14 +549,14 @@ class TextualInversionDataset(Dataset):
def main(): def main():
args = parse_args() args = parse_args()
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -464,14 +464,13 @@ class PromptDataset(Dataset): ...@@ -464,14 +464,13 @@ class PromptDataset(Dataset):
def main(args): def main(args):
logging_dir = Path(args.output_dir, args.logging_dir) logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -422,14 +422,14 @@ def main(): ...@@ -422,14 +422,14 @@ def main():
), ),
) )
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -562,14 +562,14 @@ class TextualInversionDataset(Dataset): ...@@ -562,14 +562,14 @@ class TextualInversionDataset(Dataset):
def main(): def main():
args = parse_args() args = parse_args()
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -289,14 +289,14 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: ...@@ -289,14 +289,14 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token:
def main(args): def main(args):
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.logger, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -427,13 +427,14 @@ def main(): ...@@ -427,13 +427,14 @@ def main():
) )
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) accelerator_project_config = ProjectConfiguration(
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -366,15 +366,16 @@ DATASET_NAME_MAPPING = { ...@@ -366,15 +366,16 @@ DATASET_NAME_MAPPING = {
def main(): def main():
args = parse_args() args = parse_args()
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) accelerator_project_config = ProjectConfiguration(
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
if args.report_to == "wandb": if args.report_to == "wandb":
......
...@@ -566,14 +566,13 @@ class TextualInversionDataset(Dataset): ...@@ -566,14 +566,13 @@ class TextualInversionDataset(Dataset):
def main(): def main():
args = parse_args() args = parse_args()
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.report_to, log_with=args.report_to,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
...@@ -287,14 +287,14 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: ...@@ -287,14 +287,14 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token:
def main(args): def main(args):
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
)
accelerator = Accelerator( accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision, mixed_precision=args.mixed_precision,
log_with=args.logger, log_with=args.logger,
logging_dir=logging_dir,
project_config=accelerator_project_config, project_config=accelerator_project_config,
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment