"vscode:/vscode.git/clone" did not exist on "5d952e3f83cd5b8a6eeab09090f102fbfb5375ff"
Unverified Commit 3201903d authored by Isamu Isozaki's avatar Isamu Isozaki Committed by GitHub
Browse files

Retrieval Augmented Diffusion Models (#3297)



* Resetting rdm pr

* Fixed styles

* Fixed style

* Moved to rdm folder+fixed slight errors

* Removed config diff

* Started adding tests

* Adding retrieved images

* Fixed faiss import

* Fixed import errors

* Fixing tests

* Added require_faiss

* Updated dependency table

* Attempt solving consistency test

* Fixed truncation and vocab size issue

* Passed common tests

* Finished up cpu testing on pipeline

* Passed all tests locally

* Removed some slow tests

* Removed diffs from test_pipeline_common

* Remove logs

* Removed diffs from test_pipelines_common

* Fixed style

* Fully fixed styles on diffs

* Fixed name

* Proper rename

* Fixed dummies

* Fixed issue with dummyonnx

* Fixed black style

* Fixed dummies

* Changed ordering

* Fixed logging

* Fixing

* Fixing

* quality

* Debugging regex

* Fix dummies with guess

* Fixed typo

* Attempt fix dummies

* black

* ruff

* fixed ordering

* Logging

* Attempt fix

* Attempt fix dummy

* Attempt fixing styles

* Fixed faiss dependency

* Removed unnecessary deprecations

* Finished up main changes

* Added doc

* Passed tests

* Fixed tests

* Remove invisible watermark

* Fixed ruff errors

* Added prompt embed to tests

* Added tests and made retriever an optional component

* Fixed styles

* Made faiss a dependency of pipeline

* Logging

* Fixed dummies

* Make pipeline test work

* Fixed style

* Moved to research projects

* Remove diff

* Fixed style error

---------
Co-authored-by: default avatarPatrick von Platen <patrick.v.platen@gmail.com>
parent 705c592e
## Diffusers examples with ONNXRuntime optimizations
**This research project is not actively maintained by the diffusers team. For any questions or comments, please contact Isamu Isozaki(isamu-isozaki) on github with any questions.**
The aim of this project is to provide retrieval augmented diffusion models to diffusers!
\ No newline at end of file
import inspect
from typing import Callable, List, Optional, Union
import torch
from PIL import Image
from retriever import Retriever, normalize_images, preprocess_images
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
ImagePipelineOutput,
LMSDiscreteScheduler,
PNDMScheduler,
UNet2DConditionModel,
logging,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import is_accelerate_available, randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class RDMPipeline(DiffusionPipeline):
r"""
Pipeline for text-to-image generation using Retrieval Augmented Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
clip ([`CLIPModel`]):
Frozen CLIP model. Retrieval Augmented Diffusion uses the CLIP model, specifically the
[clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
def __init__(
self,
vae: AutoencoderKL,
clip: CLIPModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
],
feature_extractor: CLIPFeatureExtractor,
retriever: Optional[Retriever] = None,
):
super().__init__()
self.register_modules(
vae=vae,
clip=clip,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
feature_extractor=feature_extractor,
)
# Copy from statement here and all the methods we take from stable_diffusion_pipeline
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.retriever = retriever
def enable_xformers_memory_efficient_attention(self):
r"""
Enable memory efficient attention as implemented in xformers.
When this option is enabled, you should observe lower GPU memory usage and a potential speed up at inference
time. Speed up at training time is not guaranteed.
Warning: When Memory Efficient Attention and Sliced attention are both enabled, the Memory Efficient Attention
is used.
"""
self.unet.set_use_memory_efficient_attention_xformers(True)
def disable_xformers_memory_efficient_attention(self):
r"""
Disable memory efficient attention as implemented in xformers.
"""
self.unet.set_use_memory_efficient_attention_xformers(False)
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding.
When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding.
When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
"""
self.vae.enable_tiling()
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
`attention_head_dim` must be a multiple of `slice_size`.
"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
if isinstance(self.unet.config.attention_head_dim, int):
slice_size = self.unet.config.attention_head_dim // 2
else:
slice_size = self.unet.config.attention_head_dim[0] // 2
self.unet.set_attention_slice(slice_size)
def disable_attention_slicing(self):
r"""
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
back to computing attention in one step.
"""
# set slice_size = `None` to disable `attention slicing`
self.enable_attention_slicing(None)
def enable_sequential_cpu_offload(self):
r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
device = torch.device("cuda")
for cpu_offloaded_model in [self.unet, self.clip, self.vae]:
if cpu_offloaded_model is not None:
cpu_offload(cpu_offloaded_model, device)
@property
def _execution_device(self):
r"""
Returns the device on which the pipeline's models will be executed. After calling
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
hooks.
"""
if not hasattr(self.unet, "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(module, "_hf_hook")
and hasattr(module._hf_hook, "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def _encode_prompt(self, prompt):
# get prompt text embeddings
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
prompt_embeds = self.clip.get_text_features(text_input_ids.to(self.device))
prompt_embeds = prompt_embeds / torch.linalg.norm(prompt_embeds, dim=-1, keepdim=True)
prompt_embeds = prompt_embeds[:, None, :]
return prompt_embeds
def _encode_image(self, retrieved_images, batch_size):
if len(retrieved_images[0]) == 0:
return None
for i in range(len(retrieved_images)):
retrieved_images[i] = normalize_images(retrieved_images[i])
retrieved_images[i] = preprocess_images(retrieved_images[i], self.feature_extractor).to(
self.clip.device, dtype=self.clip.dtype
)
_, c, h, w = retrieved_images[0].shape
retrieved_images = torch.reshape(torch.cat(retrieved_images, dim=0), (-1, c, h, w))
image_embeddings = self.clip.get_image_features(retrieved_images)
image_embeddings = image_embeddings / torch.linalg.norm(image_embeddings, dim=-1, keepdim=True)
_, d = image_embeddings.shape
image_embeddings = torch.reshape(image_embeddings, (batch_size, -1, d))
return image_embeddings
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def retrieve_images(self, retrieved_images, prompt_embeds, knn=10):
if self.retriever is not None:
additional_images = self.retriever.retrieve_imgs_batch(prompt_embeds[:, 0].cpu(), knn).total_examples
for i in range(len(retrieved_images)):
retrieved_images[i] += additional_images[i][self.retriever.config.image_column]
return retrieved_images
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]],
retrieved_images: Optional[List[Image.Image]] = None,
height: int = 768,
width: int = 768,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[torch.Generator] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: Optional[int] = 1,
knn: Optional[int] = 10,
**kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
Returns:
[`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if retrieved_images is not None:
retrieved_images = [retrieved_images for _ in range(batch_size)]
else:
retrieved_images = [[] for _ in range(batch_size)]
device = self._execution_device
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt_embeds is None:
prompt_embeds = self._encode_prompt(prompt)
retrieved_images = self.retrieve_images(retrieved_images, prompt_embeds, knn=knn)
image_embeddings = self._encode_image(retrieved_images, batch_size)
if image_embeddings is not None:
prompt_embeds = torch.cat([prompt_embeds, image_embeddings], dim=1)
# duplicate text embeddings for each generation per prompt, using mps friendly method
bs_embed, seq_len, _ = prompt_embeds.shape
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
uncond_embeddings = torch.zeros_like(prompt_embeds).to(prompt_embeds.device)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
prompt_embeds = torch.cat([uncond_embeddings, prompt_embeds])
# get the initial random noise unless the user supplied it
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# set timesteps
self.scheduler.set_timesteps(num_inference_steps)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
timesteps_tensor = self.scheduler.timesteps.to(self.device)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
else:
image = latents
image = self.image_processor.postprocess(
image, output_type=output_type, do_denormalize=[True] * image.shape[0]
)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
import os
from typing import List
import faiss
import numpy as np
import torch
from datasets import Dataset, load_dataset
from PIL import Image
from transformers import CLIPFeatureExtractor, CLIPModel, PretrainedConfig
from diffusers import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def normalize_images(images: List[Image.Image]):
images = [np.array(image) for image in images]
images = [image / 127.5 - 1 for image in images]
return images
def preprocess_images(images: List[np.array], feature_extractor: CLIPFeatureExtractor) -> torch.FloatTensor:
"""
Preprocesses a list of images into a batch of tensors.
Args:
images (:obj:`List[Image.Image]`):
A list of images to preprocess.
Returns:
:obj:`torch.FloatTensor`: A batch of tensors.
"""
images = [np.array(image) for image in images]
images = [(image + 1.0) / 2.0 for image in images]
images = feature_extractor(images, return_tensors="pt").pixel_values
return images
class IndexConfig(PretrainedConfig):
def __init__(
self,
clip_name_or_path="openai/clip-vit-large-patch14",
dataset_name="Isamu136/oxford_pets_with_l14_emb",
image_column="image",
index_name="embeddings",
index_path=None,
dataset_set="train",
metric_type=faiss.METRIC_L2,
faiss_device=-1,
**kwargs,
):
super().__init__(**kwargs)
self.clip_name_or_path = clip_name_or_path
self.dataset_name = dataset_name
self.image_column = image_column
self.index_name = index_name
self.index_path = index_path
self.dataset_set = dataset_set
self.metric_type = metric_type
self.faiss_device = faiss_device
class Index:
"""
Each index for a retrieval model is specific to the clip model used and the dataset used.
"""
def __init__(self, config: IndexConfig, dataset: Dataset):
self.config = config
self.dataset = dataset
self.index_initialized = False
self.index_name = config.index_name
self.index_path = config.index_path
self.init_index()
def set_index_name(self, index_name: str):
self.index_name = index_name
def init_index(self):
if not self.index_initialized:
if self.index_path and self.index_name:
try:
self.dataset.add_faiss_index(
column=self.index_name, metric_type=self.config.metric_type, device=self.config.faiss_device
)
self.index_initialized = True
except Exception as e:
print(e)
logger.info("Index not initialized")
if self.index_name in self.dataset.features:
self.dataset.add_faiss_index(column=self.index_name)
self.index_initialized = True
def build_index(
self,
model=None,
feature_extractor: CLIPFeatureExtractor = None,
torch_dtype=torch.float32,
):
if not self.index_initialized:
model = model or CLIPModel.from_pretrained(self.config.clip_name_or_path).to(dtype=torch_dtype)
feature_extractor = feature_extractor or CLIPFeatureExtractor.from_pretrained(
self.config.clip_name_or_path
)
self.dataset = get_dataset_with_emb_from_clip_model(
self.dataset,
model,
feature_extractor,
image_column=self.config.image_column,
index_name=self.config.index_name,
)
self.init_index()
def retrieve_imgs(self, vec, k: int = 20):
vec = np.array(vec).astype(np.float32)
return self.dataset.get_nearest_examples(self.index_name, vec, k=k)
def retrieve_imgs_batch(self, vec, k: int = 20):
vec = np.array(vec).astype(np.float32)
return self.dataset.get_nearest_examples_batch(self.index_name, vec, k=k)
def retrieve_indices(self, vec, k: int = 20):
vec = np.array(vec).astype(np.float32)
return self.dataset.search(self.index_name, vec, k=k)
def retrieve_indices_batch(self, vec, k: int = 20):
vec = np.array(vec).astype(np.float32)
return self.dataset.search_batch(self.index_name, vec, k=k)
class Retriever:
def __init__(
self,
config: IndexConfig,
index: Index = None,
dataset: Dataset = None,
model=None,
feature_extractor: CLIPFeatureExtractor = None,
):
self.config = config
self.index = index or self._build_index(config, dataset, model=model, feature_extractor=feature_extractor)
@classmethod
def from_pretrained(
cls,
retriever_name_or_path: str,
index: Index = None,
dataset: Dataset = None,
model=None,
feature_extractor: CLIPFeatureExtractor = None,
**kwargs,
):
config = kwargs.pop("config", None) or IndexConfig.from_pretrained(retriever_name_or_path, **kwargs)
return cls(config, index=index, dataset=dataset, model=model, feature_extractor=feature_extractor)
@staticmethod
def _build_index(
config: IndexConfig, dataset: Dataset = None, model=None, feature_extractor: CLIPFeatureExtractor = None
):
dataset = dataset or load_dataset(config.dataset_name)
dataset = dataset[config.dataset_set]
index = Index(config, dataset)
index.build_index(model=model, feature_extractor=feature_extractor)
return index
def save_pretrained(self, save_directory):
os.makedirs(save_directory, exist_ok=True)
if self.config.index_path is None:
index_path = os.path.join(save_directory, "hf_dataset_index.faiss")
self.index.dataset.get_index(self.config.index_name).save(index_path)
self.config.index_path = index_path
self.config.save_pretrained(save_directory)
def init_retrieval(self):
logger.info("initializing retrieval")
self.index.init_index()
def retrieve_imgs(self, embeddings: np.ndarray, k: int):
return self.index.retrieve_imgs(embeddings, k)
def retrieve_imgs_batch(self, embeddings: np.ndarray, k: int):
return self.index.retrieve_imgs_batch(embeddings, k)
def retrieve_indices(self, embeddings: np.ndarray, k: int):
return self.index.retrieve_indices(embeddings, k)
def retrieve_indices_batch(self, embeddings: np.ndarray, k: int):
return self.index.retrieve_indices_batch(embeddings, k)
def __call__(
self,
embeddings,
k: int = 20,
):
return self.index.retrieve_imgs(embeddings, k)
def map_txt_to_clip_feature(clip_model, tokenizer, prompt):
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
if text_input_ids.shape[-1] > tokenizer.model_max_length:
removed_text = tokenizer.batch_decode(text_input_ids[:, tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
text_input_ids = text_input_ids[:, : tokenizer.model_max_length]
text_embeddings = clip_model.get_text_features(text_input_ids.to(clip_model.device))
text_embeddings = text_embeddings / torch.linalg.norm(text_embeddings, dim=-1, keepdim=True)
text_embeddings = text_embeddings[:, None, :]
return text_embeddings[0][0].cpu().detach().numpy()
def map_img_to_model_feature(model, feature_extractor, imgs, device):
for i, image in enumerate(imgs):
if not image.mode == "RGB":
imgs[i] = image.convert("RGB")
imgs = normalize_images(imgs)
retrieved_images = preprocess_images(imgs, feature_extractor).to(device)
image_embeddings = model(retrieved_images)
image_embeddings = image_embeddings / torch.linalg.norm(image_embeddings, dim=-1, keepdim=True)
image_embeddings = image_embeddings[None, ...]
return image_embeddings.cpu().detach().numpy()[0][0]
def get_dataset_with_emb_from_model(dataset, model, feature_extractor, image_column="image", index_name="embeddings"):
return dataset.map(
lambda example: {
index_name: map_img_to_model_feature(model, feature_extractor, [example[image_column]], model.device)
}
)
def get_dataset_with_emb_from_clip_model(
dataset, clip_model, feature_extractor, image_column="image", index_name="embeddings"
):
return dataset.map(
lambda example: {
index_name: map_img_to_model_feature(
clip_model.get_image_features, feature_extractor, [example[image_column]], clip_model.device
)
}
)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment