Unverified Commit 7ad70cee authored by Ryan Dick's avatar Ryan Dick Committed by GitHub
Browse files

Model loading speed optimization (#5635)

Move unchanging operation out of loop for speed benefit.
parent 60c5eb58
......@@ -134,6 +134,8 @@ def load_model_dict_into_meta(model, state_dict, device=None, dtype=None, model_
device = device or torch.device("cpu")
dtype = dtype or torch.float32
accepts_dtype = "dtype" in set(inspect.signature(set_module_tensor_to_device).parameters.keys())
unexpected_keys = []
empty_state_dict = model.state_dict()
for param_name, param in state_dict.items():
......@@ -147,7 +149,6 @@ def load_model_dict_into_meta(model, state_dict, device=None, dtype=None, model_
f"Cannot load {model_name_or_path_str}because {param_name} expected shape {empty_state_dict[param_name]}, but got {param.shape}. If you want to instead overwrite randomly initialized weights, please make sure to pass both `low_cpu_mem_usage=False` and `ignore_mismatched_sizes=True`. For more information, see also: https://github.com/huggingface/diffusers/issues/1619#issuecomment-1345604389 as an example."
)
accepts_dtype = "dtype" in set(inspect.signature(set_module_tensor_to_device).parameters.keys())
if accepts_dtype:
set_module_tensor_to_device(model, param_name, device, value=param, dtype=dtype)
else:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment