Unverified Commit 651408a0 authored by Arthur's avatar Arthur Committed by GitHub
Browse files

[`Styling`] stylify using ruff (#27144)



* try to stylify using ruff

* might need to remove these changes?

* use ruf format andruff check

* use isinstance instead of type comparision

* use # fmt: skip

* use # fmt: skip

* nits

* soem styling changes

* update ci job

* nits isinstance

* more files update

* nits

* more nits

* small nits

* check and format

* revert wrong changes

* actually use formatter instead of checker

* nits

* well docbuilder is overwriting this commit

* revert notebook changes

* try to nuke docbuilder

* style

* fix feature exrtaction test

* remve `indent-width = 4`

* fixup

* more nits

* update the ruff version that we use

* style

* nuke docbuilder styling

* leve the print for detected changes

* nits

* Remove file I/O
Co-authored-by: default avatarcharliermarsh <charlie.r.marsh@gmail.com>

* style

* nits

* revert notebook changes

* Add # fmt skip when possible

* Add # fmt skip when possible

* Fix

* More `  # fmt: skip` usage

* More `  # fmt: skip` usage

* More `  # fmt: skip` usage

* NIts

* more fixes

* fix tapas

* Another way to skip

* Recommended way

* Fix two more fiels

* Remove asynch
Remove asynch

---------
Co-authored-by: default avatarcharliermarsh <charlie.r.marsh@gmail.com>
parent acb5b4af
......@@ -168,6 +168,7 @@ class Data2VecAudioConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "data2vec-audio"
def __init__(
......
......@@ -95,6 +95,7 @@ class Data2VecTextConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "data2vec-text"
def __init__(
......
......@@ -111,6 +111,7 @@ class Data2VecVisionConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "data2vec-vision"
def __init__(
......
......@@ -289,8 +289,8 @@ class Data2VecVisionSelfAttention(nn.Module):
# Copied from transformers.models.beit.modeling_beit.BeitSelfOutput with Beit->Data2VecVision
class Data2VecVisionSelfOutput(nn.Module):
"""
The residual connection is defined in Data2VecVisionLayer instead of here (as is the case with other models), due
to the layernorm applied before each block.
The residual connection is defined in Data2VecVisionLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
......
......@@ -105,6 +105,7 @@ class DebertaConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deberta"
def __init__(
......@@ -148,7 +149,7 @@ class DebertaConfig(PretrainedConfig):
self.position_biased_input = position_biased_input
# Backwards compatibility
if type(pos_att_type) == str:
if isinstance(pos_att_type, str):
pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
self.pos_att_type = pos_att_type
......
......@@ -107,6 +107,7 @@ class DebertaV2Config(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deberta-v2"
def __init__(
......@@ -150,7 +151,7 @@ class DebertaV2Config(PretrainedConfig):
self.position_biased_input = position_biased_input
# Backwards compatibility
if type(pos_att_type) == str:
if isinstance(pos_att_type, str):
pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
self.pos_att_type = pos_att_type
......
......@@ -780,15 +780,11 @@ class DisentangledSelfAttention(nn.Module):
if "c2p" in self.pos_att_type:
pos_key_layer = self.transpose_for_scores(
self.pos_key_proj(rel_embeddings), self.num_attention_heads
).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
) # .split(self.all_head_size, dim=-1)
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1)
if "p2c" in self.pos_att_type:
pos_query_layer = self.transpose_for_scores(
self.pos_query_proj(rel_embeddings), self.num_attention_heads
).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
) # .split(self.all_head_size, dim=-1)
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1)
score = 0
# content->position
......
......@@ -143,6 +143,7 @@ class DeformableDetrConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deformable_detr"
attribute_map = {
"hidden_size": "d_model",
......
......@@ -91,6 +91,7 @@ class DeiTConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deit"
def __init__(
......
......@@ -114,6 +114,7 @@ class MCTCTConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mctct"
def __init__(
......
......@@ -34,6 +34,7 @@ class MCTCTProcessor(ProcessorMixin):
tokenizer (`AutoTokenizer`):
An instance of [`AutoTokenizer`]. The tokenizer is a required input.
"""
feature_extractor_class = "MCTCTFeatureExtractor"
tokenizer_class = "AutoTokenizer"
......
......@@ -90,6 +90,7 @@ class OpenLlamaConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "open-llama"
def __init__(
......
......@@ -72,6 +72,7 @@ class RetriBertConfig(PretrainedConfig):
projection_dim (`int`, *optional*, defaults to 128):
Final dimension of the query and document representation after projection
"""
model_type = "retribert"
def __init__(
......
......@@ -100,6 +100,7 @@ class TrajectoryTransformerConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "trajectory_transformer"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
......
......@@ -77,6 +77,7 @@ class VanConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "van"
def __init__(
......
......@@ -124,6 +124,7 @@ class DetaConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deta"
attribute_map = {
"hidden_size": "d_model",
......
......@@ -70,8 +70,8 @@ DETA_PRETRAINED_MODEL_ARCHIVE_LIST = [
# Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrDecoderOutput with DeformableDetr->Deta
class DetaDecoderOutput(ModelOutput):
"""
Base class for outputs of the DetaDecoder. This class adds two attributes to BaseModelOutputWithCrossAttentions,
namely:
Base class for outputs of the DetaDecoder. This class adds two attributes to
BaseModelOutputWithCrossAttentions, namely:
- a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer)
- a stacked tensor of intermediate reference points.
......
......@@ -132,6 +132,7 @@ class DetrConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "detr"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
......
......@@ -94,6 +94,7 @@ class DinatConfig(BackboneConfigMixin, PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dinat"
attribute_map = {
......
......@@ -105,6 +105,7 @@ class Dinov2Config(BackboneConfigMixin, PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dinov2"
def __init__(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment