if'decoder.up_blocks.0.resnets.0.norm1.weight'insd.keys():#diffusers format
if'decoder.up_blocks.0.resnets.0.norm1.weight'insd.keys():#diffusers format
sd=diffusers_convert.convert_vae_state_dict(sd)
sd=diffusers_convert.convert_vae_state_dict(sd)
self.memory_used_encode=lambdashape:(2078*shape[2]*shape[3])*1.7#These are for AutoencoderKL and need tweaking
self.memory_used_encode=lambdashape,dtype:(1767*shape[2]*shape[3])*model_management.dtype_size(dtype)#These are for AutoencoderKL and need tweaking (should be lower)
memory_used=self.memory_used_encode(pixel_samples.shape)#NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change.