Commit 3f9e3d8a authored by anton-l's avatar anton-l
Browse files

add EMA during training

parent c31736a4
......@@ -9,14 +9,14 @@ from accelerate import Accelerator
from datasets import load_dataset
from diffusers import DDPM, DDPMScheduler, UNetModel
from diffusers.hub_utils import init_git_repo, push_to_hub
from diffusers.modeling_utils import unwrap_model
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import logging
from torchvision.transforms import (
CenterCrop,
Compose,
InterpolationMode,
Lambda,
Normalize,
RandomHorizontalFlip,
Resize,
ToTensor,
......@@ -48,7 +48,7 @@ def main(args):
CenterCrop(args.resolution),
RandomHorizontalFlip(),
ToTensor(),
Lambda(lambda x: x * 2 - 1),
Normalize([0.5], [0.5]),
]
)
dataset = load_dataset(args.dataset, split="train")
......@@ -71,6 +71,8 @@ def main(args):
model, optimizer, train_dataloader, lr_scheduler
)
ema_model = EMAModel(model, inv_gamma=1.0, power=3 / 4)
if args.push_to_hub:
repo = init_git_repo(args, at_init=True)
......@@ -87,6 +89,7 @@ def main(args):
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
global_step = 0
for epoch in range(args.num_epochs):
model.train()
with tqdm(total=len(train_dataloader), unit="ba") as pbar:
......@@ -117,19 +120,22 @@ def main(args):
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
lr_scheduler.step()
ema_model.step(model, global_step)
optimizer.zero_grad()
pbar.update(1)
pbar.set_postfix(loss=loss.detach().item(), lr=optimizer.param_groups[0]["lr"])
pbar.set_postfix(
loss=loss.detach().item(), lr=optimizer.param_groups[0]["lr"], ema_decay=ema_model.decay
)
global_step += 1
optimizer.step()
if is_distributed:
torch.distributed.barrier()
accelerator.wait_for_everyone()
# Generate a sample image for visual inspection
if args.local_rank in [-1, 0]:
model.eval()
if accelerator.is_main_process:
with torch.no_grad():
pipeline = DDPM(unet=unwrap_model(model), noise_scheduler=noise_scheduler)
pipeline = DDPM(
unet=accelerator.unwrap_model(ema_model.averaged_model), noise_scheduler=noise_scheduler
)
generator = torch.manual_seed(0)
# run pipeline in inference (sample random noise and denoise)
......@@ -151,8 +157,7 @@ def main(args):
push_to_hub(args, pipeline, repo, commit_message=f"Epoch {epoch}", blocking=False)
else:
pipeline.save_pretrained(args.output_dir)
if is_distributed:
torch.distributed.barrier()
accelerator.wait_for_everyone()
if __name__ == "__main__":
......
import copy
import torch
class EMAModel:
"""
Exponential Moving Average of models weights
"""
def __init__(
self,
model,
update_after_step=0,
inv_gamma=1.0,
power=2 / 3,
min_value=0.0,
max_value=0.9999,
device=None,
):
"""
@crowsonkb's notes on EMA Warmup:
If gamma=1 and power=1, implements a simple average. gamma=1, power=2/3 are
good values for models you plan to train for a million or more steps (reaches decay
factor 0.999 at 31.6K steps, 0.9999 at 1M steps), gamma=1, power=3/4 for models
you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at
215.4k steps).
Args:
inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1.
power (float): Exponential factor of EMA warmup. Default: 2/3.
min_value (float): The minimum EMA decay rate. Default: 0.
"""
self.averaged_model = copy.deepcopy(model)
self.averaged_model.requires_grad_(False)
self.update_after_step = update_after_step
self.inv_gamma = inv_gamma
self.power = power
self.min_value = min_value
self.max_value = max_value
if device is not None:
self.averaged_model = self.averaged_model.to(device=device)
self.decay = 0.0
def get_decay(self, optimization_step):
"""
Compute the decay factor for the exponential moving average.
"""
step = max(0, optimization_step - self.update_after_step - 1)
value = 1 - (1 + step / self.inv_gamma) ** -self.power
if step <= 0:
return 0.0
return max(self.min_value, min(value, self.max_value))
@torch.no_grad()
def step(self, new_model, optimization_step):
ema_state_dict = {}
ema_params = self.averaged_model.state_dict()
self.decay = self.get_decay(optimization_step)
for key, param in new_model.named_parameters():
if isinstance(param, dict):
continue
try:
ema_param = ema_params[key]
except KeyError:
ema_param = param.float().clone() if param.ndim == 1 else copy.deepcopy(param)
ema_params[key] = ema_param
if not param.requires_grad:
ema_params[key].copy_(param.to(dtype=ema_param.dtype).data)
ema_param = ema_params[key]
else:
ema_param.mul_(self.decay)
ema_param.add_(param.data.to(dtype=ema_param.dtype), alpha=1 - self.decay)
ema_state_dict[key] = ema_param
for key, param in new_model.named_buffers():
ema_state_dict[key] = param
self.averaged_model.load_state_dict(ema_state_dict, strict=False)
......@@ -25,10 +25,10 @@ from diffusers import (
BDDM,
DDIM,
DDPM,
Glide,
PNDM,
DDIMScheduler,
DDPMScheduler,
Glide,
GlideSuperResUNetModel,
GlideTextToImageUNetModel,
GradTTS,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment