Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
diffusers
Commits
79fa94ea
Unverified
Commit
79fa94ea
authored
Jun 08, 2023
by
Zachary Mueller
Committed by
GitHub
Jun 08, 2023
Browse files
Apply deprecations from Accelerate (#3714)
Apply deprecations
parent
a06317ab
Changes
19
Hide whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
61 additions
and
51 deletions
+61
-51
examples/controlnet/train_controlnet.py
examples/controlnet/train_controlnet.py
+3
-2
examples/custom_diffusion/train_custom_diffusion.py
examples/custom_diffusion/train_custom_diffusion.py
+3
-2
examples/dreambooth/train_dreambooth.py
examples/dreambooth/train_dreambooth.py
+3
-2
examples/dreambooth/train_dreambooth_lora.py
examples/dreambooth/train_dreambooth_lora.py
+3
-2
examples/instruct_pix2pix/train_instruct_pix2pix.py
examples/instruct_pix2pix/train_instruct_pix2pix.py
+3
-2
examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py
...h_projects/dreambooth_inpaint/train_dreambooth_inpaint.py
+3
-2
examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py
...jects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py
+3
-2
examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py
...ts/intel_opts/textual_inversion/textual_inversion_bf16.py
+4
-4
examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py
...cts/intel_opts/textual_inversion_dfq/textual_inversion.py
+4
-2
examples/research_projects/lora/train_text_to_image_lora.py
examples/research_projects/lora/train_text_to_image_lora.py
+3
-2
examples/research_projects/mulit_token_textual_inversion/textual_inversion.py
...ojects/mulit_token_textual_inversion/textual_inversion.py
+3
-3
examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py
...ulti_subject_dreambooth/train_multi_subject_dreambooth.py
+3
-4
examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py
...projects/onnxruntime/text_to_image/train_text_to_image.py
+3
-3
examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py
...ojects/onnxruntime/textual_inversion/textual_inversion.py
+3
-3
examples/research_projects/onnxruntime/unconditional_image_generation/train_unconditional.py
...ime/unconditional_image_generation/train_unconditional.py
+4
-4
examples/text_to_image/train_text_to_image.py
examples/text_to_image/train_text_to_image.py
+3
-2
examples/text_to_image/train_text_to_image_lora.py
examples/text_to_image/train_text_to_image_lora.py
+4
-3
examples/textual_inversion/textual_inversion.py
examples/textual_inversion/textual_inversion.py
+3
-4
examples/unconditional_image_generation/train_unconditional.py
...les/unconditional_image_generation/train_unconditional.py
+3
-3
No files found.
examples/controlnet/train_controlnet.py
View file @
79fa94ea
...
@@ -716,13 +716,14 @@ def collate_fn(examples):
...
@@ -716,13 +716,14 @@ def collate_fn(examples):
def
main
(
args
):
def
main
(
args
):
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/custom_diffusion/train_custom_diffusion.py
View file @
79fa94ea
...
@@ -637,13 +637,14 @@ def parse_args(input_args=None):
...
@@ -637,13 +637,14 @@ def parse_args(input_args=None):
def
main
(
args
):
def
main
(
args
):
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/dreambooth/train_dreambooth.py
View file @
79fa94ea
...
@@ -771,13 +771,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte
...
@@ -771,13 +771,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte
def
main
(
args
):
def
main
(
args
):
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/dreambooth/train_dreambooth_lora.py
View file @
79fa94ea
...
@@ -653,13 +653,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte
...
@@ -653,13 +653,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte
def
main
(
args
):
def
main
(
args
):
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/instruct_pix2pix/train_instruct_pix2pix.py
View file @
79fa94ea
...
@@ -387,12 +387,13 @@ def main():
...
@@ -387,12 +387,13 @@ def main():
),
),
)
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py
View file @
79fa94ea
...
@@ -405,13 +405,14 @@ def main():
...
@@ -405,13 +405,14 @@ def main():
args
=
parse_args
()
args
=
parse_args
()
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
"tensorboard"
,
log_with
=
"tensorboard"
,
logging_dir
=
logging_dir
,
project_config
=
project_config
,
project_config
=
project_config
,
)
)
...
...
examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py
View file @
79fa94ea
...
@@ -404,13 +404,14 @@ def main():
...
@@ -404,13 +404,14 @@ def main():
args
=
parse_args
()
args
=
parse_args
()
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
"tensorboard"
,
log_with
=
"tensorboard"
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py
View file @
79fa94ea
...
@@ -13,7 +13,7 @@ import torch.nn.functional as F
...
@@ -13,7 +13,7 @@ import torch.nn.functional as F
import
torch.utils.checkpoint
import
torch.utils.checkpoint
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.logging
import
get_logger
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
ProjectConfiguration
,
set_seed
from
huggingface_hub
import
create_repo
,
upload_folder
from
huggingface_hub
import
create_repo
,
upload_folder
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
...
@@ -363,12 +363,12 @@ def freeze_params(params):
...
@@ -363,12 +363,12 @@ def freeze_params(params):
def
main
():
def
main
():
args
=
parse_args
()
args
=
parse_args
()
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
"tensorboard"
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
)
)
# If passed along, set the training seed now.
# If passed along, set the training seed now.
...
...
examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py
View file @
79fa94ea
...
@@ -12,7 +12,7 @@ import torch
...
@@ -12,7 +12,7 @@ import torch
import
torch.nn.functional
as
F
import
torch.nn.functional
as
F
import
torch.utils.checkpoint
import
torch.utils.checkpoint
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
ProjectConfiguration
,
set_seed
from
huggingface_hub
import
HfFolder
,
Repository
,
whoami
from
huggingface_hub
import
HfFolder
,
Repository
,
whoami
from
neural_compressor.utils
import
logger
from
neural_compressor.utils
import
logger
from
packaging
import
version
from
packaging
import
version
...
@@ -458,11 +458,13 @@ def main():
...
@@ -458,11 +458,13 @@ def main():
args
=
parse_args
()
args
=
parse_args
()
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
"tensorboard"
,
log_with
=
"tensorboard"
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
)
)
# If passed along, set the training seed now.
# If passed along, set the training seed now.
...
...
examples/research_projects/lora/train_text_to_image_lora.py
View file @
79fa94ea
...
@@ -394,13 +394,14 @@ def main():
...
@@ -394,13 +394,14 @@ def main():
args
=
parse_args
()
args
=
parse_args
()
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
if
args
.
report_to
==
"wandb"
:
if
args
.
report_to
==
"wandb"
:
...
...
examples/research_projects/mulit_token_textual_inversion/textual_inversion.py
View file @
79fa94ea
...
@@ -549,14 +549,14 @@ class TextualInversionDataset(Dataset):
...
@@ -549,14 +549,14 @@ class TextualInversionDataset(Dataset):
def
main
():
def
main
():
args
=
parse_args
()
args
=
parse_args
()
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py
View file @
79fa94ea
...
@@ -464,14 +464,13 @@ class PromptDataset(Dataset):
...
@@ -464,14 +464,13 @@ class PromptDataset(Dataset):
def
main
(
args
):
def
main
(
args
):
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py
View file @
79fa94ea
...
@@ -422,14 +422,14 @@ def main():
...
@@ -422,14 +422,14 @@ def main():
),
),
)
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py
View file @
79fa94ea
...
@@ -562,14 +562,14 @@ class TextualInversionDataset(Dataset):
...
@@ -562,14 +562,14 @@ class TextualInversionDataset(Dataset):
def
main
():
def
main
():
args
=
parse_args
()
args
=
parse_args
()
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/research_projects/onnxruntime/unconditional_image_generation/train_unconditional.py
View file @
79fa94ea
...
@@ -289,14 +289,14 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token:
...
@@ -289,14 +289,14 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token:
def
main
(
args
):
def
main
(
args
):
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
logger
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/text_to_image/train_text_to_image.py
View file @
79fa94ea
...
@@ -427,13 +427,14 @@ def main():
...
@@ -427,13 +427,14 @@ def main():
)
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/text_to_image/train_text_to_image_lora.py
View file @
79fa94ea
...
@@ -366,15 +366,16 @@ DATASET_NAME_MAPPING = {
...
@@ -366,15 +366,16 @@ DATASET_NAME_MAPPING = {
def
main
():
def
main
():
args
=
parse_args
()
args
=
parse_args
()
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
Path
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
if
args
.
report_to
==
"wandb"
:
if
args
.
report_to
==
"wandb"
:
...
...
examples/textual_inversion/textual_inversion.py
View file @
79fa94ea
...
@@ -566,14 +566,13 @@ class TextualInversionDataset(Dataset):
...
@@ -566,14 +566,13 @@ class TextualInversionDataset(Dataset):
def
main
():
def
main
():
args
=
parse_args
()
args
=
parse_args
()
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
report_to
,
log_with
=
args
.
report_to
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
examples/unconditional_image_generation/train_unconditional.py
View file @
79fa94ea
...
@@ -287,14 +287,14 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token:
...
@@ -287,14 +287,14 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token:
def
main
(
args
):
def
main
(
args
):
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
logging_dir
=
os
.
path
.
join
(
args
.
output_dir
,
args
.
logging_dir
)
accelerator_project_config
=
ProjectConfiguration
(
accelerator_project_config
=
ProjectConfiguration
(
total_limit
=
args
.
checkpoints_total_limit
)
total_limit
=
args
.
checkpoints_total_limit
,
project_dir
=
args
.
output_dir
,
logging_dir
=
logging_dir
)
accelerator
=
Accelerator
(
accelerator
=
Accelerator
(
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
gradient_accumulation_steps
=
args
.
gradient_accumulation_steps
,
mixed_precision
=
args
.
mixed_precision
,
mixed_precision
=
args
.
mixed_precision
,
log_with
=
args
.
logger
,
log_with
=
args
.
logger
,
logging_dir
=
logging_dir
,
project_config
=
accelerator_project_config
,
project_config
=
accelerator_project_config
,
)
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment