Unverified Commit c1e139c2 authored by Naman Garg's avatar Naman Garg Committed by GitHub
Browse files

Adding hiera (#30356)



* initialized Structure

* Updated variable names

* Added Config class, basic HF setup, convert_to_hf

* Fixed Convert function, added hiera to HF files, Initilized test files

* better naming for x in forward pass

* Moved utils to hiera

* Change hiera -> hiera_model

* Fixed integration into tranformers

* Fix: Convert Checkpoint

* added documentation for hiera

* added documentation for hiera

* added Docstings to models, Transformers based changes

* make style and quality

* make style and quality

* Integration & Block tests running

* Fixed bugs

* initialized Structure

* Updated variable names

* Added Config class, basic HF setup, convert_to_hf

* Fixed Convert function, added hiera to HF files, Initilized test files

* better naming for x in forward pass

* Moved utils to hiera

* Change hiera -> hiera_model

* Fixed integration into tranformers

* Fix: Convert Checkpoint

* added documentation for hiera

* added documentation for hiera

* added Docstings to models, Transformers based changes

* make style and quality

* make style and quality

* Integration & Block tests running

* Fixed bugs

* Removed tim dependency

* added HieraBlock

* fixed: Model name

* added tests for HieraModel, HieraBlock

* fixed imports

* fixed quality & copies

* Fixes

* Update docs/source/en/model_doc/hiera.md

Fix name
Co-authored-by: default avatarSteven Liu <59462357+stevhliu@users.noreply.github.com>

* Update docs/source/en/model_doc/hiera.md
Co-authored-by: default avatarSteven Liu <59462357+stevhliu@users.noreply.github.com>

* Update docs/source/en/model_doc/hiera.md
Co-authored-by: default avatarSteven Liu <59462357+stevhliu@users.noreply.github.com>

* Update src/transformers/models/hiera/configuration_hiera.py
Co-authored-by: default avatarSteven Liu <59462357+stevhliu@users.noreply.github.com>

* Update src/transformers/models/hiera/configuration_hiera.py
Co-authored-by: default avatarSteven Liu <59462357+stevhliu@users.noreply.github.com>

* Update src/transformers/models/hiera/modeling_hiera.py
Co-authored-by: default avatarSteven Liu <59462357+stevhliu@users.noreply.github.com>

* Update src/transformers/models/hiera/modeling_hiera.py
Co-authored-by: default avatarSteven Liu <59462357+stevhliu@users.noreply.github.com>

* Fixed formatting

* Code quality & Import differences

* quality and repo-consistency fix

* fixed no torch error

* Docstring fix

* Docstring fix

* doc string fix

* fixed example usage

* Resolved issues in modeling_hiera

* Removed Hiera MAE

* Added test and resolved bug

* fixed doc string

* First commit

* Finished conversion script and model forward working

* Resolved all issues

* nits

* Improving tests

* Nits

* More nits

* Improving HieraForMaskedImageModeling

* More improvements and nits

* Fixed docstrings of outputs

* More fixes

* More imrpovments

* Updated conversion script

* Fixed docstrings

* Improved tests

* Fixed attentou outputs test

* All tests green

* Removed unnecessary file

* contribution attribution

* Resolved a few issues

* Resolved Comments

* Updated model repo id and fixed bugs

* Removed loss print

* Make tests green

* Updated docstrings

* Fix style

* Fixed num_heads in config

* Removed unnecessary video checkpoint related code in the conversion script

* Fix style

* Changed atol in conversion script

* HieraConfig

* Fix copies

* Fixed typo

* Resolved few issues

* make

* converted conv_nd -> nn.Module

* Removed video complexities

* Removed video complexities

* fix style

* Addressing comments

* Update src/transformers/models/hiera/modeling_hiera.py
Co-authored-by: default avataramyeroberts <22614925+amyeroberts@users.noreply.github.com>

* Update src/transformers/models/hiera/modeling_hiera.py
Co-authored-by: default avataramyeroberts <22614925+amyeroberts@users.noreply.github.com>

* Update src/transformers/models/hiera/modeling_hiera.py
Co-authored-by: default avataramyeroberts <22614925+amyeroberts@users.noreply.github.com>

* Fix style

* Fixed tests

* Fixed typo

* Fixed interpolate test

* Made torch fx compatible

* Made sure imageprocesor is correct

* Addressed comments

* Noise directly as torch

* Remove unnecesary attr

* Added return_dit

* Update src/transformers/models/hiera/__init__.py
Co-authored-by: default avataramyeroberts <22614925+amyeroberts@users.noreply.github.com>

* Updated checkpoints

* [run_slow] hiera

* Fixed device mismatch

* [run_slow] hiera

* Fixed GPU tests

* [run_slow] hiera

---------
Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-29-50.us-east-2.compute.internal>
Co-authored-by: default avatarSteven Liu <59462357+stevhliu@users.noreply.github.com>
Co-authored-by: default avatarEduardo Pacheco <eduardo.pach@hotmail.com>
Co-authored-by: default avatarEduardo Pacheco <69953243+EduardoPach@users.noreply.github.com>
Co-authored-by: default avataramyeroberts <22614925+amyeroberts@users.noreply.github.com>
parent 574e68d5
......@@ -603,6 +603,8 @@
title: FocalNet
- local: model_doc/glpn
title: GLPN
- local: model_doc/hiera
title: Hiera
- local: model_doc/imagegpt
title: ImageGPT
- local: model_doc/levit
......@@ -680,6 +682,8 @@
title: CLAP
- local: model_doc/encodec
title: EnCodec
- local: model_doc/hiera
title: Hiera
- local: model_doc/hubert
title: Hubert
- local: model_doc/mctct
......
......@@ -159,6 +159,7 @@ Flax), PyTorch, and/or TensorFlow.
| [Grounding DINO](model_doc/grounding-dino) | ✅ | ❌ | ❌ |
| [GroupViT](model_doc/groupvit) | ✅ | ✅ | ❌ |
| [HerBERT](model_doc/herbert) | ✅ | ✅ | ✅ |
| [Hiera](model_doc/hiera) | ✅ | ❌ | ❌ |
| [Hubert](model_doc/hubert) | ✅ | ✅ | ❌ |
| [I-BERT](model_doc/ibert) | ✅ | ❌ | ❌ |
| [IDEFICS](model_doc/idefics) | ✅ | ✅ | ❌ |
......
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Hiera
## Overview
Hiera was proposed in [Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles](https://arxiv.org/abs/2306.00989) by Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan, Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed, Judy Hoffman, Jitendra Malik, Yanghao Li, Christoph Feichtenhofer
The paper introduces "Hiera," a hierarchical Vision Transformer that simplifies the architecture of modern hierarchical vision transformers by removing unnecessary components without compromising on accuracy or efficiency. Unlike traditional transformers that add complex vision-specific components to improve supervised classification performance, Hiera demonstrates that such additions, often termed "bells-and-whistles," are not essential for high accuracy. By leveraging a strong visual pretext task (MAE) for pretraining, Hiera retains simplicity and achieves superior accuracy and speed both in inference and training across various image and video recognition tasks. The approach suggests that spatial biases required for vision tasks can be effectively learned through proper pretraining, eliminating the need for added architectural complexity.
The abstract from the paper is the following:
*Modern hierarchical vision transformers have added several vision-specific components in the pursuit of supervised classification performance. While these components lead to effective accuracies and attractive FLOP counts, the added complexity actually makes these transformers slower than their vanilla ViT counterparts. In this paper, we argue that this additional bulk is unnecessary. By pretraining with a strong visual pretext task (MAE), we can strip out all the bells-and-whistles from a state-of-the-art multi-stage vision transformer without losing accuracy. In the process, we create Hiera, an extremely simple hierarchical vision transformer that is more accurate than previous models while being significantly faster both at inference and during training. We evaluate Hiera on a variety of tasks for image and video recognition. Our code and models are available at https://github.com/facebookresearch/hiera.*
This model was a joint contibution by [EduardoPacheco](https://huggingface.co/EduardoPacheco) and [namangarg110](https://huggingface.co/namangarg110). The original code can be found [here] (https://github.com/facebookresearch/hiera).
## HieraConfig
[[autodoc]] HieraConfig
## HieraModel
[[autodoc]] HieraModel
- forward
## HieraForPreTraining
[[autodoc]] HieraForPreTraining
- forward
## HieraForImageClassification
[[autodoc]] HieraForImageClassification
- forward
......@@ -462,6 +462,7 @@ _import_structure = {
"GroupViTVisionConfig",
],
"models.herbert": ["HerbertTokenizer"],
"models.hiera": ["HieraConfig"],
"models.hubert": ["HubertConfig"],
"models.ibert": ["IBertConfig"],
"models.idefics": ["IdeficsConfig"],
......@@ -2285,6 +2286,15 @@ else:
"GroupViTVisionModel",
]
)
_import_structure["models.hiera"].extend(
[
"HieraBackbone",
"HieraForImageClassification",
"HieraForPreTraining",
"HieraModel",
"HieraPreTrainedModel",
]
)
_import_structure["models.hubert"].extend(
[
"HubertForCTC",
......@@ -5112,6 +5122,7 @@ if TYPE_CHECKING:
GroupViTVisionConfig,
)
from .models.herbert import HerbertTokenizer
from .models.hiera import HieraConfig
from .models.hubert import HubertConfig
from .models.ibert import IBertConfig
from .models.idefics import (
......@@ -6795,6 +6806,13 @@ if TYPE_CHECKING:
GroupViTTextModel,
GroupViTVisionModel,
)
from .models.hiera import (
HieraBackbone,
HieraForImageClassification,
HieraForPreTraining,
HieraModel,
HieraPreTrainedModel,
)
from .models.hubert import (
HubertForCTC,
HubertForSequenceClassification,
......
......@@ -105,6 +105,7 @@ from . import (
grounding_dino,
groupvit,
herbert,
hiera,
hubert,
ibert,
idefics,
......
......@@ -122,6 +122,7 @@ CONFIG_MAPPING_NAMES = OrderedDict(
("graphormer", "GraphormerConfig"),
("grounding-dino", "GroundingDinoConfig"),
("groupvit", "GroupViTConfig"),
("hiera", "HieraConfig"),
("hubert", "HubertConfig"),
("ibert", "IBertConfig"),
("idefics", "IdeficsConfig"),
......@@ -403,6 +404,7 @@ MODEL_NAMES_MAPPING = OrderedDict(
("grounding-dino", "Grounding DINO"),
("groupvit", "GroupViT"),
("herbert", "HerBERT"),
("hiera", "Hiera"),
("hubert", "Hubert"),
("ibert", "I-BERT"),
("idefics", "IDEFICS"),
......
......@@ -85,6 +85,7 @@ else:
("glpn", ("GLPNImageProcessor",)),
("grounding-dino", ("GroundingDinoImageProcessor",)),
("groupvit", ("CLIPImageProcessor",)),
("hiera", ("BitImageProcessor",)),
("idefics", ("IdeficsImageProcessor",)),
("idefics2", ("Idefics2ImageProcessor",)),
("imagegpt", ("ImageGPTImageProcessor",)),
......
......@@ -119,6 +119,7 @@ MODEL_MAPPING_NAMES = OrderedDict(
("graphormer", "GraphormerModel"),
("grounding-dino", "GroundingDinoModel"),
("groupvit", "GroupViTModel"),
("hiera", "HieraModel"),
("hubert", "HubertModel"),
("ibert", "IBertModel"),
("idefics", "IdeficsModel"),
......@@ -295,6 +296,7 @@ MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
("gpt2", "GPT2LMHeadModel"),
("gpt_bigcode", "GPTBigCodeForCausalLM"),
("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
("hiera", "HieraForPreTraining"),
("ibert", "IBertForMaskedLM"),
("idefics", "IdeficsForVisionText2Text"),
("idefics2", "Idefics2ForConditionalGeneration"),
......@@ -535,6 +537,7 @@ MODEL_FOR_IMAGE_MAPPING_NAMES = OrderedDict(
("efficientnet", "EfficientNetModel"),
("focalnet", "FocalNetModel"),
("glpn", "GLPNModel"),
("hiera", "HieraModel"),
("imagegpt", "ImageGPTModel"),
("levit", "LevitModel"),
("mobilenet_v1", "MobileNetV1Model"),
......@@ -610,6 +613,7 @@ MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
),
("efficientnet", "EfficientNetForImageClassification"),
("focalnet", "FocalNetForImageClassification"),
("hiera", "HieraForImageClassification"),
("imagegpt", "ImageGPTForImageClassification"),
(
"levit",
......@@ -1258,6 +1262,7 @@ MODEL_FOR_BACKBONE_MAPPING_NAMES = OrderedDict(
("dinat", "DinatBackbone"),
("dinov2", "Dinov2Backbone"),
("focalnet", "FocalNetBackbone"),
("hiera", "HieraBackbone"),
("maskformer-swin", "MaskFormerSwinBackbone"),
("nat", "NatBackbone"),
("pvt_v2", "PvtV2Backbone"),
......
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_import_structure = {"configuration_hiera": ["HieraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_hiera"] = [
"HieraForImageClassification",
"HieraForPreTraining",
"HieraBackbone",
"HieraModel",
"HieraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_hiera import HieraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_hiera import (
HieraBackbone,
HieraForImageClassification,
HieraForPreTraining,
HieraModel,
HieraPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hiera model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
logger = logging.get_logger(__name__)
class HieraConfig(BackboneConfigMixin, PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`HieraModel`]. It is used to instantiate a Hiera
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Hiera
[facebook/hiera-base-224](https://huggingface.co/facebook/hiera-base-224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
image_size (`list(int)`, *optional*, defaults to `[224, 224]`):
The size (resolution) of input in the format (height, width) for images
and (frames, height, width) for videos.
patch_size (`list(int)`, *optional*, defaults to `[7, 7]`):
The size (resolution) of each patch.
patch_stride (`list(int)`, *optional*, defaults to `[4, 4]`):
The stride of the patch.
patch_padding (`list(int)`, *optional*, defaults to `[3, 3]`):
The padding of the patch.
mlp_ratio (`float`, *optional*, defaults to 4.0):
The ratio of mlp hidden dim to embedding dim.
depths (`list(int)`, *optional*, defaults to `[2, 3, 16, 3]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[1, 2, 4, 8]`):
Number of attention heads in each layer of the Transformer encoder.
embed_dim_multiplier (`float`, *optional*, defaults to 2.0):
The multiplier to the dimensionality of patch embedding in each layer of the Transformer encoder.
num_query_pool (`int`, *optional*, defaults to 3):
The number of query pool stages.
query_stride (`list(int)`, *optional*, defaults to `[2, 2]`):
The stride of the query pool.
masked_unit_size (`list(int)`, *optional*, defaults to `[8, 8]`):
The size of the masked unit.
masked_unit_attention (`list(bool)`, *optional*, defaults to `[True, True, False, False]`):
Whether to use masked unit attention in each layer of the Transformer encoder.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The drop path rate.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices and
the zero_initializer for initializing all bias vectors.
layer_norm_init (`float`, *optional*, defaults to 1.0):
The initial weight value for layer normalization layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
decoder_hidden_size (`int`, *optional*):
Dimensionality of decoder embeddings for MAE pretraining.
decoder_depth (`int`, *optional*):
Depth of the decoder for MAE pretraining.
decoder_num_heads (`int`, *optional*):
Number of attention heads in each layer of the decoder for MAE pretraining.
normalize_pixel_loss (`bool`, *optional*, defaults to `True`):
Whether to normalize the pixel loss by the number of pixels.
mask_ratio (`float`, *optional*, defaults to 0.6):
The ratio of masked tokens in the input.
out_features (`List[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`List[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import HieraConfig, HieraModel
>>> # Initializing a Hiera hiera-base-patch16-224 style configuration
>>> configuration = HieraConfig()
>>> # Initializing a model (with random weights) from the hiera-base-patch16-224 style configuration
>>> model = HieraModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "hiera"
attribute_map = {"num_hidden_layers": "num_layers"}
def __init__(
self,
embed_dim=96,
image_size=[224, 224],
patch_size=[7, 7],
patch_stride=[4, 4],
patch_padding=[3, 3],
mlp_ratio=4.0,
depths=[2, 3, 16, 3],
num_heads=[1, 2, 4, 8],
embed_dim_multiplier=2.0,
num_query_pool=3,
query_stride=[2, 2],
masked_unit_size=[8, 8],
masked_unit_attention=[True, True, False, False],
drop_path_rate=0.0,
num_channels=3,
hidden_act="gelu",
initializer_range=0.02,
layer_norm_init=1.0,
layer_norm_eps=1e-6,
decoder_hidden_size=None,
decoder_depth=None,
decoder_num_heads=None,
normalize_pixel_loss=True,
mask_ratio=0.6,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
if masked_unit_size[0] % query_stride[0] ** (len(depths) - 1) != 0:
raise ValueError(
f"masked_unit_size[0] ({masked_unit_size[0]}) must be divisible by query_stride[0] ({query_stride[0]}) "
f"raised to the power of the number of layers ({len(depths) - 1})"
)
if num_query_pool >= len(depths):
raise ValueError(
f"num_query_pool ({num_query_pool}) must be less than the number of layers ({len(depths)})"
)
self.embed_dim = embed_dim
self.image_size = image_size
self.patch_size = patch_size
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.mlp_ratio = mlp_ratio
self.depths = depths
self.num_heads = num_heads
self.num_layers = len(depths)
self.embed_dim_multiplier = embed_dim_multiplier
self.num_query_pool = num_query_pool
self.query_stride = query_stride
self.masked_unit_size = masked_unit_size
self.masked_unit_attention = masked_unit_attention
self.drop_path_rate = drop_path_rate
self.num_channels = num_channels
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_init = layer_norm_init
self.layer_norm_eps = layer_norm_eps
self.decoder_hidden_size = decoder_hidden_size
self.decoder_depth = decoder_depth
self.decoder_num_heads = decoder_num_heads
self.normalize_pixel_loss = normalize_pixel_loss
self.mask_ratio = mask_ratio
# we set the hidden_size attribute in order to make Hiera work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
self.hidden_size = int(embed_dim * embed_dim_multiplier ** (len(depths) - 1))
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Hiera checkpoints from the original repository.
URL: https://github.com/facebookresearch/hiera
"""
import argparse
import json
import math
from typing import Dict, Tuple
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, HieraConfig, HieraForImageClassification, HieraForPreTraining, HieraModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
def create_rename_keys(config: HieraConfig, base_model: bool, mae_model: bool):
rename_keys = []
# fmt: off
num_stages = len(config.depths)
# embedding dimensions for input and stages
dims = [config.embed_dim] + [int(config.embed_dim * config.embed_dim_multiplier**i) for i in range(num_stages)]
global_layer_idx = 0
for stage_idx in range(num_stages):
dim_in = dims[stage_idx]
dim_out = dims[stage_idx + 1]
for layer_idx in range(config.depths[stage_idx]):
rename_keys.append((f"blocks.{global_layer_idx}.norm1.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_before.weight"))
rename_keys.append((f"blocks.{global_layer_idx}.norm1.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_before.bias"))
rename_keys.append((f"blocks.{global_layer_idx}.attn.qkv.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.qkv.weight"))
rename_keys.append((f"blocks.{global_layer_idx}.attn.qkv.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.qkv.bias"))
rename_keys.append((f"blocks.{global_layer_idx}.attn.proj.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.proj.weight"))
rename_keys.append((f"blocks.{global_layer_idx}.attn.proj.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.proj.bias"))
rename_keys.append((f"blocks.{global_layer_idx}.norm2.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_after.weight"))
rename_keys.append((f"blocks.{global_layer_idx}.norm2.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_after.bias"))
rename_keys.append((f"blocks.{global_layer_idx}.mlp.fc1.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc1.weight"))
rename_keys.append((f"blocks.{global_layer_idx}.mlp.fc1.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc1.bias"))
rename_keys.append((f"blocks.{global_layer_idx}.mlp.fc2.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc2.weight"))
rename_keys.append((f"blocks.{global_layer_idx}.mlp.fc2.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc2.bias"))
# projection layer only for the first layer of each stage boundary (except the first stage)
if dim_out != dim_in and layer_idx == 0:
rename_keys.append((f"blocks.{global_layer_idx}.proj.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.proj.weight"))
rename_keys.append((f"blocks.{global_layer_idx}.proj.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.proj.bias"))
global_layer_idx += 1
# projection layer + position embeddings
rename_keys.extend(
[
("patch_embed.proj.weight", "hiera.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "hiera.embeddings.patch_embeddings.projection.bias")
]
)
rename_keys.append(("pos_embed", "hiera.embeddings.position_embeddings"))
if base_model:
# layernorm + pooler
rename_keys.extend([("norm.weight", "pooler.layernorm.weight"), ("norm.bias", "pooler.layernorm.bias")])
# if just the base model, we should remove "hiera" from all keys that start with "hiera"
rename_keys = [(pair[0], pair[1][6:]) if pair[1].startswith("hiera") else pair for pair in rename_keys]
elif mae_model:
rename_keys.extend(
[
("encoder_norm.weight", "encoder_norm.weight"),
("encoder_norm.bias", "encoder_norm.bias"),
("mask_token", "decoder.mask_token"),
("decoder_pos_embed", "decoder.decoder_position_embeddings"),
("decoder_norm.weight", "decoder.decoder_norm.weight"),
("decoder_norm.bias", "decoder.decoder_norm.bias"),
("decoder_pred.weight", "decoder.decoder_pred.weight"),
("decoder_pred.bias", "decoder.decoder_pred.bias"),
("decoder_embed.weight", "decoder.decoder_embeddings.weight"),
("decoder_embed.bias", "decoder.decoder_embeddings.bias")
]
)
for i in range(config.decoder_depth):
rename_keys.extend(
[
(f"decoder_blocks.{i}.norm1.weight", f"decoder.decoder_block.layers.{i}.layernorm_before.weight"),
(f"decoder_blocks.{i}.norm1.bias", f"decoder.decoder_block.layers.{i}.layernorm_before.bias"),
(f"decoder_blocks.{i}.attn.qkv.weight", f"decoder.decoder_block.layers.{i}.attn.qkv.weight"),
(f"decoder_blocks.{i}.attn.qkv.bias", f"decoder.decoder_block.layers.{i}.attn.qkv.bias"),
(f"decoder_blocks.{i}.attn.proj.weight", f"decoder.decoder_block.layers.{i}.attn.proj.weight"),
(f"decoder_blocks.{i}.attn.proj.bias", f"decoder.decoder_block.layers.{i}.attn.proj.bias"),
(f"decoder_blocks.{i}.norm2.weight", f"decoder.decoder_block.layers.{i}.layernorm_after.weight"),
(f"decoder_blocks.{i}.norm2.bias", f"decoder.decoder_block.layers.{i}.layernorm_after.bias"),
(f"decoder_blocks.{i}.mlp.fc1.weight", f"decoder.decoder_block.layers.{i}.mlp.fc1.weight"),
(f"decoder_blocks.{i}.mlp.fc1.bias", f"decoder.decoder_block.layers.{i}.mlp.fc1.bias"),
(f"decoder_blocks.{i}.mlp.fc2.weight", f"decoder.decoder_block.layers.{i}.mlp.fc2.weight"),
(f"decoder_blocks.{i}.mlp.fc2.bias", f"decoder.decoder_block.layers.{i}.mlp.fc2.bias"),
]
)
for i in range(config.num_query_pool):
rename_keys.extend(
[
(f"multi_scale_fusion_heads.{i}.weight", f"multiscale_fusion.multi_scale_fusion_heads.{i}.weight"),
(f"multi_scale_fusion_heads.{i}.bias", f"multiscale_fusion.multi_scale_fusion_heads.{i}.bias")
]
)
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "hiera.pooler.layernorm.weight"),
("norm.bias", "hiera.pooler.layernorm.bias"),
("head.projection.weight", "classifier.weight"),
("head.projection.bias", "classifier.bias"),
]
)
# fmt: on
return rename_keys
def remove_classification_head_(state_dict):
ignore_keys = ["head.projection.weight", "head.projection.bias"]
for k in ignore_keys:
state_dict.pop(k, None)
def rename_key(dct, old, new):
val = dct.pop(old)
dct[new] = val
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
def get_labels_for_classifier(model_name: str) -> Tuple[Dict[int, str], Dict[str, int], int]:
repo_id = "huggingface/label-files"
filename = "imagenet-1k-id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
label2id = {v: k for k, v in id2label.items()}
num_labels = len(id2label)
return id2label, label2id, num_labels
def get_hiera_config(model_name: str, base_model: bool, mae_model: bool) -> HieraConfig:
if model_name == "hiera-tiny-224":
config = HieraConfig(depths=[1, 2, 7, 2])
elif model_name == "hiera-small-224":
config = HieraConfig(depths=[1, 2, 11, 2])
elif model_name == "hiera-base-224":
config = HieraConfig()
elif model_name == "hiera-base-plus-224":
config = HieraConfig(embed_dim=112, num_heads=[2, 4, 8, 16])
elif model_name == "hiera-large-224":
config = HieraConfig(embed_dim=144, num_heads=[2, 4, 8, 16], depths=[2, 6, 36, 4])
elif model_name == "hiera-huge-224":
config = HieraConfig(embed_dim=256, num_heads=[4, 8, 16, 32], depths=[2, 6, 36, 4])
else:
raise ValueError(f"Unrecognized model name: {model_name}")
if base_model:
pass
elif mae_model:
config.num_query_pool = 2
config.decoder_hidden_size = 512
config.decoder_depth = 8
config.decoder_num_heads = 16
# Table 3b from Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles
config.mask_ratio = 0.6
else:
id2label, label2id, num_labels = get_labels_for_classifier(model_name)
config.id2label = id2label
config.label2id = label2id
config.num_labels = num_labels
return config
@torch.no_grad()
def convert_hiera_checkpoint(args):
model_name = args.model_name
base_model = args.base_model
pytorch_dump_folder_path = args.pytorch_dump_folder_path
push_to_hub = args.push_to_hub
mae_model = args.mae_model
config = get_hiera_config(model_name, base_model, mae_model)
# Load original hiera model
original_model_name = model_name.replace("-", "_")
original_model_name = f"mae_{original_model_name}" if mae_model else original_model_name
original_checkpoint_name = "mae_in1k_ft_in1k" if not (base_model or mae_model) else "mae_in1k"
original_model = torch.hub.load(
"facebookresearch/hiera",
model=original_model_name,
pretrained=True,
checkpoint=original_checkpoint_name,
)
original_model.eval()
original_state_dict = original_model.state_dict()
# Don't need to remove head for MAE because original implementation doesn't have it on MAE
if base_model:
remove_classification_head_(original_state_dict)
# # Rename keys
new_state_dict = original_state_dict.copy()
rename_keys = create_rename_keys(config, base_model, mae_model)
for src, dest in rename_keys:
rename_key(new_state_dict, src, dest)
# Load HF hiera model
if base_model:
model = HieraModel(config)
elif mae_model:
model = HieraForPreTraining(config)
else:
model = HieraForImageClassification(config)
model.eval()
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
print("Missing keys:", missing_keys)
print("Unexpected keys:", unexpected_keys)
input_image = prepare_img()
original_image_preprocessor = transforms.Compose(
[
transforms.Resize(int((256 / 224) * 224), interpolation=transforms.functional.InterpolationMode.BICUBIC),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD),
]
)
image_processor = BitImageProcessor(
image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD, size={"shortest_edge": 256}
)
inputs = image_processor(images=input_image, return_tensors="pt")
expected_pixel_values = original_image_preprocessor(input_image).unsqueeze(0)
input_image = prepare_img()
inputs = image_processor(images=input_image, return_tensors="pt")
expected_pixel_values = original_image_preprocessor(input_image).unsqueeze(0)
assert torch.allclose(inputs.pixel_values, expected_pixel_values, atol=1e-4)
print("Pixel values look good!")
print(f"{inputs.pixel_values[0, :3, :3, :3]=}")
# If is MAE we pass a noise to generate a random mask
mask_spatial_shape = [
i // s // ms for i, s, ms in zip(config.image_size, config.patch_stride, config.masked_unit_size)
]
num_windows = math.prod(mask_spatial_shape)
torch.manual_seed(2)
noise = torch.rand(1, num_windows)
outputs = model(**inputs) if not mae_model else model(noise=noise, **inputs)
# original implementation returns logits.softmax(dim=-1)
if base_model:
expected_prob, expected_intermediates = original_model(expected_pixel_values, return_intermediates=True)
expected_last_hidden = expected_intermediates[-1]
batch_size, _, _, hidden_dim = expected_last_hidden.shape
expected_last_hidden = expected_last_hidden.reshape(batch_size, -1, hidden_dim)
assert torch.allclose(outputs.last_hidden_state, expected_last_hidden, atol=1e-3)
print("Base Model looks good as hidden states match original implementation!")
print(f"{outputs.last_hidden_state[0, :3, :3]=}")
elif mae_model:
# get mask from noise to be able to compare outputs
mask, _ = model.hiera.embeddings.patch_embeddings.random_masking(expected_pixel_values, noise)
expected_loss, _, _, _ = original_model(expected_pixel_values, mask=mask.bool())
assert torch.allclose(outputs.loss, expected_loss, atol=1e-3)
print("MAE Model looks good as loss matches original implementation!")
else:
expected_prob = original_model(expected_pixel_values)
assert torch.allclose(outputs.logits.softmax(dim=-1), expected_prob, atol=1e-3)
print("Classifier looks good as probs match original implementation")
print(f"{outputs.logits[:, :5]=}")
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor for {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
image_processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
hub_name = model_name
if base_model:
hub_name = model_name
elif mae_model:
hub_name = f"{model_name}-mae"
else:
hub_name = f"{model_name}-in1k"
repo_id = f"EduardoPacheco/{hub_name}"
print(f"Pushing model and processor for {model_name} to hub at {repo_id}")
model.push_to_hub(repo_id)
image_processor.push_to_hub(repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model-name",
default="hiera-tiny-224",
type=str,
choices=[
"hiera-tiny-224",
"hiera-small-224",
"hiera-base-224",
"hiera-base-plus-224",
"hiera-large-224",
"hiera-huge-224",
],
help="Name of the Hiera model you'd like to convert.",
)
parser.add_argument(
"--pytorch-dump-folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--verify-logits",
action="store_true",
help="Whether or not to verify the logits against the original implementation.",
)
parser.add_argument(
"--push-to-hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
parser.add_argument(
"--base-model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.add_argument(
"--mae-model", action="store_true", help="Whether to convert to MAE checkpoint to HieraForPreTraining."
)
args = parser.parse_args()
convert_hiera_checkpoint(args)
This diff is collapsed.
......@@ -4583,6 +4583,41 @@ class GroupViTVisionModel(metaclass=DummyObject):
requires_backends(self, ["torch"])
class HieraBackbone(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HieraForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HieraForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HieraModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HieraPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HubertForCTC(metaclass=DummyObject):
_backends = ["torch"]
......
......@@ -138,6 +138,7 @@ _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS = [
"gpt2",
"gpt_neo",
"gptj",
"hiera",
"hubert",
"layoutlm",
"llama",
......
This diff is collapsed.
......@@ -16,6 +16,7 @@ import collections
import copy
import gc
import inspect
import math
import os
import os.path
import random
......@@ -55,6 +56,7 @@ from transformers.models.auto.modeling_auto import (
MODEL_FOR_MASKED_LM_MAPPING_NAMES,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES,
MODEL_FOR_PRETRAINING_MAPPING_NAMES,
MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
......@@ -194,6 +196,14 @@ class ModelTesterMixin:
}
elif model_class.__name__ in get_values(MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES):
inputs_dict.pop("attention_mask")
elif model_class.__name__ == MODEL_FOR_PRETRAINING_MAPPING_NAMES["hiera"]:
config = self.model_tester.get_config()
mask_spatial_shape = [
i // s // ms for i, s, ms in zip(config.image_size, config.patch_stride, config.masked_unit_size)
]
num_windows = math.prod(mask_spatial_shape)
torch.manual_seed(0)
inputs_dict["noise"] = torch.rand(self.model_tester.batch_size, num_windows)
if return_labels:
if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES):
......@@ -1163,6 +1173,7 @@ class ModelTesterMixin:
"token_type_ids",
"visual_feats",
"visual_pos",
"noise",
]
labels = inputs.get("labels", None)
......
......@@ -997,6 +997,7 @@ SHOULD_HAVE_THEIR_OWN_PAGE = [
"DinatBackbone",
"Dinov2Backbone",
"FocalNetBackbone",
"HieraBackbone",
"MaskFormerSwinBackbone",
"MaskFormerSwinConfig",
"MaskFormerSwinModel",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment