Commit ae270f79 authored by comfyanonymous's avatar comfyanonymous
Browse files

Fix potential issue with batch size and clip vision.

parent 27b87c25
......@@ -25,6 +25,8 @@ class ClipVisionModel():
def encode_image(self, image):
img = torch.clip((255. * image), 0, 255).round().int()
if len(img.shape) == 3:
img = [img]
inputs = self.processor(images=img, return_tensors="pt")
outputs = self.model(**inputs)
return outputs
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment