Commit 73f60740 authored by comfyanonymous's avatar comfyanonymous
Browse files

Slightly cleaner code.

parent 0108616b
...@@ -32,11 +32,10 @@ def load_model_from_config(config, ckpt, verbose=False, load_state_dict_to=[]): ...@@ -32,11 +32,10 @@ def load_model_from_config(config, ckpt, verbose=False, load_state_dict_to=[]):
y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.") y = x.replace("cond_stage_model.transformer.", "cond_stage_model.transformer.text_model.")
sd[y] = sd.pop(x) sd[y] = sd.pop(x)
try:
if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd: if 'cond_stage_model.transformer.text_model.embeddings.position_ids' in sd:
sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = sd['cond_stage_model.transformer.text_model.embeddings.position_ids'].round() ids = sd['cond_stage_model.transformer.text_model.embeddings.position_ids']
except: if ids.dtype == torch.float32:
pass sd['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round()
for x in load_state_dict_to: for x in load_state_dict_to:
x.load_state_dict(sd, strict=False) x.load_state_dict(sd, strict=False)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment