Commit 92eca60e authored by comfyanonymous's avatar comfyanonymous
Browse files

Fix for new transformers version.

parent 0f5352d9
from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor
from .utils import load_torch_file, transformers_convert from .utils import load_torch_file, transformers_convert
import os import os
import torch
class ClipVisionModel(): class ClipVisionModel():
def __init__(self, json_config): def __init__(self, json_config):
...@@ -20,7 +21,8 @@ class ClipVisionModel(): ...@@ -20,7 +21,8 @@ class ClipVisionModel():
self.model.load_state_dict(sd, strict=False) self.model.load_state_dict(sd, strict=False)
def encode_image(self, image): def encode_image(self, image):
inputs = self.processor(images=[image[0]], return_tensors="pt") img = torch.clip((255. * image[0]), 0, 255).round().int()
inputs = self.processor(images=[img], return_tensors="pt")
outputs = self.model(**inputs) outputs = self.model(**inputs)
return outputs return outputs
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment