Commit fa2cca05 authored by comfyanonymous's avatar comfyanonymous
Browse files

Don't initialize CLIPVision weights to default values.

parent 6b774589
from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor, modeling_utils
from .utils import load_torch_file, transformers_convert from .utils import load_torch_file, transformers_convert
import os import os
import torch import torch
...@@ -6,7 +6,8 @@ import torch ...@@ -6,7 +6,8 @@ import torch
class ClipVisionModel(): class ClipVisionModel():
def __init__(self, json_config): def __init__(self, json_config):
config = CLIPVisionConfig.from_json_file(json_config) config = CLIPVisionConfig.from_json_file(json_config)
self.model = CLIPVisionModelWithProjection(config) with modeling_utils.no_init_weights():
self.model = CLIPVisionModelWithProjection(config)
self.processor = CLIPImageProcessor(crop_size=224, self.processor = CLIPImageProcessor(crop_size=224,
do_center_crop=True, do_center_crop=True,
do_convert_rgb=True, do_convert_rgb=True,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment