Unverified Commit 651408a0 authored by Arthur's avatar Arthur Committed by GitHub
Browse files

[`Styling`] stylify using ruff (#27144)



* try to stylify using ruff

* might need to remove these changes?

* use ruf format andruff check

* use isinstance instead of type comparision

* use # fmt: skip

* use # fmt: skip

* nits

* soem styling changes

* update ci job

* nits isinstance

* more files update

* nits

* more nits

* small nits

* check and format

* revert wrong changes

* actually use formatter instead of checker

* nits

* well docbuilder is overwriting this commit

* revert notebook changes

* try to nuke docbuilder

* style

* fix feature exrtaction test

* remve `indent-width = 4`

* fixup

* more nits

* update the ruff version that we use

* style

* nuke docbuilder styling

* leve the print for detected changes

* nits

* Remove file I/O
Co-authored-by: default avatarcharliermarsh <charlie.r.marsh@gmail.com>

* style

* nits

* revert notebook changes

* Add # fmt skip when possible

* Add # fmt skip when possible

* Fix

* More `  # fmt: skip` usage

* More `  # fmt: skip` usage

* More `  # fmt: skip` usage

* NIts

* more fixes

* fix tapas

* Another way to skip

* Recommended way

* Fix two more fiels

* Remove asynch
Remove asynch

---------
Co-authored-by: default avatarcharliermarsh <charlie.r.marsh@gmail.com>
parent acb5b4af
...@@ -36,6 +36,7 @@ class Speech2TextProcessor(ProcessorMixin): ...@@ -36,6 +36,7 @@ class Speech2TextProcessor(ProcessorMixin):
tokenizer (`Speech2TextTokenizer`): tokenizer (`Speech2TextTokenizer`):
An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input. An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input.
""" """
feature_extractor_class = "Speech2TextFeatureExtractor" feature_extractor_class = "Speech2TextFeatureExtractor"
tokenizer_class = "Speech2TextTokenizer" tokenizer_class = "Speech2TextTokenizer"
......
...@@ -86,6 +86,7 @@ class Speech2Text2Config(PretrainedConfig): ...@@ -86,6 +86,7 @@ class Speech2Text2Config(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "speech_to_text_2" model_type = "speech_to_text_2"
keys_to_ignore_at_inference = ["past_key_values"] keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} attribute_map = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
......
...@@ -35,6 +35,7 @@ class Speech2Text2Processor(ProcessorMixin): ...@@ -35,6 +35,7 @@ class Speech2Text2Processor(ProcessorMixin):
tokenizer (`Speech2Text2Tokenizer`): tokenizer (`Speech2Text2Tokenizer`):
An instance of [`Speech2Text2Tokenizer`]. The tokenizer is a required input. An instance of [`Speech2Text2Tokenizer`]. The tokenizer is a required input.
""" """
feature_extractor_class = "AutoFeatureExtractor" feature_extractor_class = "AutoFeatureExtractor"
tokenizer_class = "Speech2Text2Tokenizer" tokenizer_class = "Speech2Text2Tokenizer"
......
...@@ -194,6 +194,7 @@ class SpeechT5Config(PretrainedConfig): ...@@ -194,6 +194,7 @@ class SpeechT5Config(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "speecht5" model_type = "speecht5"
attribute_map = {"num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers"} attribute_map = {"num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers"}
...@@ -398,6 +399,7 @@ class SpeechT5HifiGanConfig(PretrainedConfig): ...@@ -398,6 +399,7 @@ class SpeechT5HifiGanConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "hifigan" model_type = "hifigan"
def __init__( def __init__(
......
...@@ -30,6 +30,7 @@ class SpeechT5Processor(ProcessorMixin): ...@@ -30,6 +30,7 @@ class SpeechT5Processor(ProcessorMixin):
tokenizer (`SpeechT5Tokenizer`): tokenizer (`SpeechT5Tokenizer`):
An instance of [`SpeechT5Tokenizer`]. The tokenizer is a required input. An instance of [`SpeechT5Tokenizer`]. The tokenizer is a required input.
""" """
feature_extractor_class = "SpeechT5FeatureExtractor" feature_extractor_class = "SpeechT5FeatureExtractor"
tokenizer_class = "SpeechT5Tokenizer" tokenizer_class = "SpeechT5Tokenizer"
......
...@@ -88,6 +88,7 @@ class SplinterConfig(PretrainedConfig): ...@@ -88,6 +88,7 @@ class SplinterConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "splinter" model_type = "splinter"
def __init__( def __init__(
......
...@@ -109,6 +109,7 @@ class SqueezeBertConfig(PretrainedConfig): ...@@ -109,6 +109,7 @@ class SqueezeBertConfig(PretrainedConfig):
Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained
checkpoints. checkpoints.
""" """
pretrained_config_archive_map = SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP pretrained_config_archive_map = SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "squeezebert" model_type = "squeezebert"
......
...@@ -266,8 +266,8 @@ class SqueezeBertTokenizer(PreTrainedTokenizer): ...@@ -266,8 +266,8 @@ class SqueezeBertTokenizer(PreTrainedTokenizer):
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]: ) -> List[int]:
""" """
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT sequence
sequence pair mask has the following format: pair mask has the following format:
``` ```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
......
...@@ -182,8 +182,8 @@ class SqueezeBertTokenizerFast(PreTrainedTokenizerFast): ...@@ -182,8 +182,8 @@ class SqueezeBertTokenizerFast(PreTrainedTokenizerFast):
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]: ) -> List[int]:
""" """
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT sequence
sequence pair mask has the following format: pair mask has the following format:
``` ```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
......
...@@ -85,6 +85,7 @@ class SwiftFormerConfig(PretrainedConfig): ...@@ -85,6 +85,7 @@ class SwiftFormerConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "swiftformer" model_type = "swiftformer"
def __init__( def __init__(
......
...@@ -105,6 +105,7 @@ class SwinConfig(BackboneConfigMixin, PretrainedConfig): ...@@ -105,6 +105,7 @@ class SwinConfig(BackboneConfigMixin, PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "swin" model_type = "swin"
attribute_map = { attribute_map = {
......
...@@ -97,6 +97,7 @@ class Swin2SRConfig(PretrainedConfig): ...@@ -97,6 +97,7 @@ class Swin2SRConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "swin2sr" model_type = "swin2sr"
attribute_map = { attribute_map = {
......
...@@ -89,6 +89,7 @@ class Swinv2Config(PretrainedConfig): ...@@ -89,6 +89,7 @@ class Swinv2Config(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "swinv2" model_type = "swinv2"
attribute_map = { attribute_map = {
......
...@@ -94,6 +94,7 @@ class SwitchTransformersConfig(PretrainedConfig): ...@@ -94,6 +94,7 @@ class SwitchTransformersConfig(PretrainedConfig):
use_cache (`bool`, *optional*, defaults to `True`): use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Whether or not the model should return the last key/values attentions (not used by all models).
""" """
model_type = "switch_transformers" model_type = "switch_transformers"
keys_to_ignore_at_inference = ["past_key_values"] keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
......
...@@ -77,6 +77,7 @@ class T5Config(PretrainedConfig): ...@@ -77,6 +77,7 @@ class T5Config(PretrainedConfig):
use_cache (`bool`, *optional*, defaults to `True`): use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Whether or not the model should return the last key/values attentions (not used by all models).
""" """
model_type = "t5" model_type = "t5"
keys_to_ignore_at_inference = ["past_key_values"] keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
......
...@@ -132,6 +132,7 @@ class TableTransformerConfig(PretrainedConfig): ...@@ -132,6 +132,7 @@ class TableTransformerConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "table-transformer" model_type = "table-transformer"
keys_to_ignore_at_inference = ["past_key_values"] keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = { attribute_map = {
......
...@@ -65,10 +65,9 @@ TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ ...@@ -65,10 +65,9 @@ TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
# Copied from transformers.models.detr.modeling_detr.DetrDecoderOutput with DETR->TABLE_TRANSFORMER,Detr->TableTransformer # Copied from transformers.models.detr.modeling_detr.DetrDecoderOutput with DETR->TABLE_TRANSFORMER,Detr->TableTransformer
class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions): class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions):
""" """
Base class for outputs of the TABLE_TRANSFORMER decoder. This class adds one attribute to Base class for outputs of the TABLE_TRANSFORMER decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions,
BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them
of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary gone through a layernorm. This is useful when training the model with auxiliary decoding losses.
decoding losses.
Args: Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
...@@ -97,10 +96,9 @@ class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions): ...@@ -97,10 +96,9 @@ class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions):
# Copied from transformers.models.detr.modeling_detr.DetrModelOutput with DETR->TABLE_TRANSFORMER,Detr->TableTransformer # Copied from transformers.models.detr.modeling_detr.DetrModelOutput with DETR->TABLE_TRANSFORMER,Detr->TableTransformer
class TableTransformerModelOutput(Seq2SeqModelOutput): class TableTransformerModelOutput(Seq2SeqModelOutput):
""" """
Base class for outputs of the TABLE_TRANSFORMER encoder-decoder model. This class adds one attribute to Base class for outputs of the TABLE_TRANSFORMER encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput,
Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them
layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding gone through a layernorm. This is useful when training the model with auxiliary decoding losses.
losses.
Args: Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
...@@ -153,8 +151,8 @@ class TableTransformerObjectDetectionOutput(ModelOutput): ...@@ -153,8 +151,8 @@ class TableTransformerObjectDetectionOutput(ModelOutput):
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~TableTransformerImageProcessor.post_process_object_detection`] to possible padding). You can use [`~TableTransformerImageProcessor.post_process_object_detection`] to retrieve the
retrieve the unnormalized bounding boxes. unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*): auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
...@@ -1583,15 +1581,15 @@ def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: f ...@@ -1583,15 +1581,15 @@ def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: f
# Copied from transformers.models.detr.modeling_detr.DetrLoss with Detr->TableTransformer,detr->table_transformer # Copied from transformers.models.detr.modeling_detr.DetrLoss with Detr->TableTransformer,detr->table_transformer
class TableTransformerLoss(nn.Module): class TableTransformerLoss(nn.Module):
""" """
This class computes the losses for TableTransformerForObjectDetection/TableTransformerForSegmentation. The process This class computes the losses for TableTransformerForObjectDetection/TableTransformerForSegmentation. The process happens in two steps: 1)
happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair
we supervise each pair of matched ground-truth / prediction (supervise class and box). of matched ground-truth / prediction (supervise class and box).
A note on the `num_classes` argument (copied from original repo in table_transformer.py): "the naming of the A note on the `num_classes` argument (copied from original repo in table_transformer.py): "the naming of the `num_classes`
`num_classes` parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where `max_obj_id` is
`max_obj_id` is the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass `num_classes` to
`num_classes` to be 91. As another example, for a dataset that has a single class with `id` 1, you should pass be 91. As another example, for a dataset that has a single class with `id` 1, you should pass `num_classes` to be 2
`num_classes` to be 2 (`max_obj_id` + 1). For more details on this, check the following discussion (`max_obj_id` + 1). For more details on this, check the following discussion
https://github.com/facebookresearch/table_transformer/issues/108#issuecomment-650269223" https://github.com/facebookresearch/table_transformer/issues/108#issuecomment-650269223"
......
...@@ -127,6 +127,7 @@ class TimeSeriesTransformerConfig(PretrainedConfig): ...@@ -127,6 +127,7 @@ class TimeSeriesTransformerConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "time_series_transformer" model_type = "time_series_transformer"
attribute_map = { attribute_map = {
"hidden_size": "d_model", "hidden_size": "d_model",
......
...@@ -85,6 +85,7 @@ class TimesformerConfig(PretrainedConfig): ...@@ -85,6 +85,7 @@ class TimesformerConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "timesformer" model_type = "timesformer"
def __init__( def __init__(
......
...@@ -60,6 +60,7 @@ class TimmBackboneConfig(PretrainedConfig): ...@@ -60,6 +60,7 @@ class TimmBackboneConfig(PretrainedConfig):
>>> configuration = model.config >>> configuration = model.config
``` ```
""" """
model_type = "timm_backbone" model_type = "timm_backbone"
def __init__( def __init__(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment