Unverified Commit 651408a0 authored by Arthur's avatar Arthur Committed by GitHub
Browse files

[`Styling`] stylify using ruff (#27144)



* try to stylify using ruff

* might need to remove these changes?

* use ruf format andruff check

* use isinstance instead of type comparision

* use # fmt: skip

* use # fmt: skip

* nits

* soem styling changes

* update ci job

* nits isinstance

* more files update

* nits

* more nits

* small nits

* check and format

* revert wrong changes

* actually use formatter instead of checker

* nits

* well docbuilder is overwriting this commit

* revert notebook changes

* try to nuke docbuilder

* style

* fix feature exrtaction test

* remve `indent-width = 4`

* fixup

* more nits

* update the ruff version that we use

* style

* nuke docbuilder styling

* leve the print for detected changes

* nits

* Remove file I/O
Co-authored-by: default avatarcharliermarsh <charlie.r.marsh@gmail.com>

* style

* nits

* revert notebook changes

* Add # fmt skip when possible

* Add # fmt skip when possible

...
parent acb5b4af
...@@ -98,6 +98,7 @@ class DistilBertConfig(PretrainedConfig): ...@@ -98,6 +98,7 @@ class DistilBertConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "distilbert" model_type = "distilbert"
attribute_map = { attribute_map = {
"hidden_size": "dim", "hidden_size": "dim",
......
...@@ -85,6 +85,7 @@ class DonutSwinConfig(PretrainedConfig): ...@@ -85,6 +85,7 @@ class DonutSwinConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "donut-swin" model_type = "donut-swin"
attribute_map = { attribute_map = {
......
...@@ -37,6 +37,7 @@ class DonutProcessor(ProcessorMixin): ...@@ -37,6 +37,7 @@ class DonutProcessor(ProcessorMixin):
tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*): tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*):
An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input. An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input.
""" """
attributes = ["image_processor", "tokenizer"] attributes = ["image_processor", "tokenizer"]
image_processor_class = "AutoImageProcessor" image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer" tokenizer_class = "AutoTokenizer"
......
...@@ -109,6 +109,7 @@ class DPRConfig(PretrainedConfig): ...@@ -109,6 +109,7 @@ class DPRConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "dpr" model_type = "dpr"
def __init__( def __init__(
......
...@@ -126,6 +126,7 @@ class DPTConfig(PretrainedConfig): ...@@ -126,6 +126,7 @@ class DPTConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "dpt" model_type = "dpt"
def __init__( def __init__(
......
...@@ -100,6 +100,7 @@ class EfficientNetConfig(PretrainedConfig): ...@@ -100,6 +100,7 @@ class EfficientNetConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "efficientnet" model_type = "efficientnet"
def __init__( def __init__(
......
...@@ -130,6 +130,7 @@ class ElectraConfig(PretrainedConfig): ...@@ -130,6 +130,7 @@ class ElectraConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "electra" model_type = "electra"
def __init__( def __init__(
......
...@@ -1196,6 +1196,7 @@ class FlaxElectraSequenceSummary(nn.Module): ...@@ -1196,6 +1196,7 @@ class FlaxElectraSequenceSummary(nn.Module):
- **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
- **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
""" """
config: ElectraConfig config: ElectraConfig
dtype: jnp.dtype = jnp.float32 dtype: jnp.dtype = jnp.float32
......
...@@ -280,8 +280,8 @@ class ElectraTokenizer(PreTrainedTokenizer): ...@@ -280,8 +280,8 @@ class ElectraTokenizer(PreTrainedTokenizer):
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]: ) -> List[int]:
""" """
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Electra Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Electra sequence
sequence pair mask has the following format: pair mask has the following format:
``` ```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
......
...@@ -201,8 +201,8 @@ class ElectraTokenizerFast(PreTrainedTokenizerFast): ...@@ -201,8 +201,8 @@ class ElectraTokenizerFast(PreTrainedTokenizerFast):
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]: ) -> List[int]:
""" """
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ELECTRA Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ELECTRA sequence
sequence pair mask has the following format: pair mask has the following format:
``` ```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
......
...@@ -108,6 +108,7 @@ class EncodecConfig(PretrainedConfig): ...@@ -108,6 +108,7 @@ class EncodecConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "encodec" model_type = "encodec"
def __init__( def __init__(
......
...@@ -68,6 +68,7 @@ class EncoderDecoderConfig(PretrainedConfig): ...@@ -68,6 +68,7 @@ class EncoderDecoderConfig(PretrainedConfig):
>>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model") >>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model")
>>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config) >>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
```""" ```"""
model_type = "encoder-decoder" model_type = "encoder-decoder"
is_composition = True is_composition = True
......
...@@ -174,6 +174,7 @@ class EncoderDecoderModel(PreTrainedModel): ...@@ -174,6 +174,7 @@ class EncoderDecoderModel(PreTrainedModel):
:meth*~transformers.AutoModel.from_pretrained* class method for the encoder and :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
:meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder. :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
""" """
config_class = EncoderDecoderConfig config_class = EncoderDecoderConfig
base_model_prefix = "encoder_decoder" base_model_prefix = "encoder_decoder"
main_input_name = "input_ids" main_input_name = "input_ids"
......
...@@ -306,6 +306,7 @@ class FlaxEncoderDecoderModel(FlaxPreTrainedModel): ...@@ -306,6 +306,7 @@ class FlaxEncoderDecoderModel(FlaxPreTrainedModel):
decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the
encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder. encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder.
""" """
config_class = EncoderDecoderConfig config_class = EncoderDecoderConfig
base_model_prefix = "encoder_decoder" base_model_prefix = "encoder_decoder"
module_class = FlaxEncoderDecoderModule module_class = FlaxEncoderDecoderModule
......
...@@ -197,6 +197,7 @@ class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss): ...@@ -197,6 +197,7 @@ class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss):
[`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class [`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class
method for the decoder. method for the decoder.
""" """
config_class = EncoderDecoderConfig config_class = EncoderDecoderConfig
base_model_prefix = "encoder_decoder" base_model_prefix = "encoder_decoder"
load_weight_prefix = "tf_encoder_decoder_model" load_weight_prefix = "tf_encoder_decoder_model"
......
...@@ -109,6 +109,7 @@ class ErnieConfig(PretrainedConfig): ...@@ -109,6 +109,7 @@ class ErnieConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "ernie" model_type = "ernie"
def __init__( def __init__(
......
...@@ -78,6 +78,7 @@ class ErnieMConfig(PretrainedConfig): ...@@ -78,6 +78,7 @@ class ErnieMConfig(PretrainedConfig):
A normal_initializer initializes weight matrices as normal distributions. See A normal_initializer initializes weight matrices as normal distributions. See
`ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`. `ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`.
""" """
model_type = "ernie_m" model_type = "ernie_m"
attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
......
...@@ -97,6 +97,7 @@ class EsmConfig(PretrainedConfig): ...@@ -97,6 +97,7 @@ class EsmConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> configuration = model.config >>> # Accessing the model configuration >>> configuration = model.config
```""" ```"""
model_type = "esm" model_type = "esm"
def __init__( def __init__(
......
...@@ -229,7 +229,7 @@ def dict_multimap(fn, dicts): ...@@ -229,7 +229,7 @@ def dict_multimap(fn, dicts):
new_dict = {} new_dict = {}
for k, v in first.items(): for k, v in first.items():
all_v = [d[k] for d in dicts] all_v = [d[k] for d in dicts]
if type(v) is dict: if isinstance(v, dict):
new_dict[k] = dict_multimap(fn, all_v) new_dict[k] = dict_multimap(fn, all_v)
else: else:
new_dict[k] = fn(all_v) new_dict[k] = fn(all_v)
...@@ -1060,7 +1060,7 @@ class EsmFoldDropout(nn.Module): ...@@ -1060,7 +1060,7 @@ class EsmFoldDropout(nn.Module):
super().__init__() super().__init__()
self.r = r self.r = r
if type(batch_dim) == int: if isinstance(batch_dim, int):
batch_dim = [batch_dim] batch_dim = [batch_dim]
self.batch_dim = batch_dim self.batch_dim = batch_dim
self.dropout = nn.Dropout(self.r) self.dropout = nn.Dropout(self.r)
...@@ -2254,7 +2254,7 @@ class EsmForProteinFolding(EsmPreTrainedModel): ...@@ -2254,7 +2254,7 @@ class EsmForProteinFolding(EsmPreTrainedModel):
seqs: Union[str, List[str]], seqs: Union[str, List[str]],
position_ids=None, position_ids=None,
): ):
if type(seqs) is str: if isinstance(seqs, str):
lst = [seqs] lst = [seqs]
else: else:
lst = seqs lst = seqs
...@@ -2312,7 +2312,7 @@ class EsmForProteinFolding(EsmPreTrainedModel): ...@@ -2312,7 +2312,7 @@ class EsmForProteinFolding(EsmPreTrainedModel):
def infer_pdb(self, seqs, *args, **kwargs) -> str: def infer_pdb(self, seqs, *args, **kwargs) -> str:
"""Returns the pdb (file) string from the model given an input sequence.""" """Returns the pdb (file) string from the model given an input sequence."""
assert type(seqs) is str assert isinstance(seqs, str)
output = self.infer(seqs, *args, **kwargs) output = self.infer(seqs, *args, **kwargs)
return self.output_to_pdb(output)[0] return self.output_to_pdb(output)[0]
......
...@@ -104,6 +104,7 @@ class FalconConfig(PretrainedConfig): ...@@ -104,6 +104,7 @@ class FalconConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> # Accessing the model configuration
>>> configuration = model.config >>> configuration = model.config
```""" ```"""
model_type = "falcon" model_type = "falcon"
keys_to_ignore_at_inference = ["past_key_values"] keys_to_ignore_at_inference = ["past_key_values"]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment