Commit 58f0c616 authored by comfyanonymous's avatar comfyanonymous
Browse files

Fix clip vision issue with old transformers versions.

parent ae270f79
......@@ -25,8 +25,7 @@ class ClipVisionModel():
def encode_image(self, image):
img = torch.clip((255. * image), 0, 255).round().int()
if len(img.shape) == 3:
img = [img]
img = list(map(lambda a: a, img))
inputs = self.processor(images=img, return_tensors="pt")
outputs = self.model(**inputs)
return outputs
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment