Unverified Commit 651408a0 authored by Arthur's avatar Arthur Committed by GitHub
Browse files

[`Styling`] stylify using ruff (#27144)



* try to stylify using ruff

* might need to remove these changes?

* use ruf format andruff check

* use isinstance instead of type comparision

* use # fmt: skip

* use # fmt: skip

* nits

* soem styling changes

* update ci job

* nits isinstance

* more files update

* nits

* more nits

* small nits

* check and format

* revert wrong changes

* actually use formatter instead of checker

* nits

* well docbuilder is overwriting this commit

* revert notebook changes

* try to nuke docbuilder

* style

* fix feature exrtaction test

* remve `indent-width = 4`

* fixup

* more nits

* update the ruff version that we use

* style

* nuke docbuilder styling

* leve the print for detected changes

* nits

* Remove file I/O
Co-authored-by: default avatarcharliermarsh <charlie.r.marsh@gmail.com>

* style

* nits

* revert notebook changes

* Add # fmt skip when possible

* Add # fmt skip when possible

* Fix

* More `  # fmt: skip` usage

* More `  # fmt: skip` usage

* More `  # fmt: skip` usage

* NIts

* more fixes

* fix tapas

* Another way to skip

* Recommended way

* Fix two more fiels

* Remove asynch
Remove asynch

---------
Co-authored-by: default avatarcharliermarsh <charlie.r.marsh@gmail.com>
parent acb5b4af
......@@ -98,6 +98,7 @@ class DistilBertConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "distilbert"
attribute_map = {
"hidden_size": "dim",
......
......@@ -85,6 +85,7 @@ class DonutSwinConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "donut-swin"
attribute_map = {
......
......@@ -37,6 +37,7 @@ class DonutProcessor(ProcessorMixin):
tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*):
An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"
......
......@@ -109,6 +109,7 @@ class DPRConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dpr"
def __init__(
......
......@@ -126,6 +126,7 @@ class DPTConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dpt"
def __init__(
......
......@@ -100,6 +100,7 @@ class EfficientNetConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "efficientnet"
def __init__(
......
......@@ -130,6 +130,7 @@ class ElectraConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "electra"
def __init__(
......
......@@ -1196,6 +1196,7 @@ class FlaxElectraSequenceSummary(nn.Module):
- **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
- **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
"""
config: ElectraConfig
dtype: jnp.dtype = jnp.float32
......
......@@ -280,8 +280,8 @@ class ElectraTokenizer(PreTrainedTokenizer):
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Electra
sequence pair mask has the following format:
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Electra sequence
pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
......
......@@ -201,8 +201,8 @@ class ElectraTokenizerFast(PreTrainedTokenizerFast):
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ELECTRA
sequence pair mask has the following format:
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ELECTRA sequence
pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
......
......@@ -108,6 +108,7 @@ class EncodecConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "encodec"
def __init__(
......
......@@ -68,6 +68,7 @@ class EncoderDecoderConfig(PretrainedConfig):
>>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model")
>>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
```"""
model_type = "encoder-decoder"
is_composition = True
......
......@@ -174,6 +174,7 @@ class EncoderDecoderModel(PreTrainedModel):
:meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
:meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
"""
config_class = EncoderDecoderConfig
base_model_prefix = "encoder_decoder"
main_input_name = "input_ids"
......
......@@ -306,6 +306,7 @@ class FlaxEncoderDecoderModel(FlaxPreTrainedModel):
decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the
encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder.
"""
config_class = EncoderDecoderConfig
base_model_prefix = "encoder_decoder"
module_class = FlaxEncoderDecoderModule
......
......@@ -197,6 +197,7 @@ class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss):
[`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class
method for the decoder.
"""
config_class = EncoderDecoderConfig
base_model_prefix = "encoder_decoder"
load_weight_prefix = "tf_encoder_decoder_model"
......
......@@ -109,6 +109,7 @@ class ErnieConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "ernie"
def __init__(
......
......@@ -78,6 +78,7 @@ class ErnieMConfig(PretrainedConfig):
A normal_initializer initializes weight matrices as normal distributions. See
`ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`.
"""
model_type = "ernie_m"
attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
......
......@@ -97,6 +97,7 @@ class EsmConfig(PretrainedConfig):
>>> # Accessing the model configuration >>> configuration = model.config
```"""
model_type = "esm"
def __init__(
......
......@@ -229,7 +229,7 @@ def dict_multimap(fn, dicts):
new_dict = {}
for k, v in first.items():
all_v = [d[k] for d in dicts]
if type(v) is dict:
if isinstance(v, dict):
new_dict[k] = dict_multimap(fn, all_v)
else:
new_dict[k] = fn(all_v)
......@@ -1060,7 +1060,7 @@ class EsmFoldDropout(nn.Module):
super().__init__()
self.r = r
if type(batch_dim) == int:
if isinstance(batch_dim, int):
batch_dim = [batch_dim]
self.batch_dim = batch_dim
self.dropout = nn.Dropout(self.r)
......@@ -2254,7 +2254,7 @@ class EsmForProteinFolding(EsmPreTrainedModel):
seqs: Union[str, List[str]],
position_ids=None,
):
if type(seqs) is str:
if isinstance(seqs, str):
lst = [seqs]
else:
lst = seqs
......@@ -2312,7 +2312,7 @@ class EsmForProteinFolding(EsmPreTrainedModel):
def infer_pdb(self, seqs, *args, **kwargs) -> str:
"""Returns the pdb (file) string from the model given an input sequence."""
assert type(seqs) is str
assert isinstance(seqs, str)
output = self.infer(seqs, *args, **kwargs)
return self.output_to_pdb(output)[0]
......
......@@ -104,6 +104,7 @@ class FalconConfig(PretrainedConfig):
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "falcon"
keys_to_ignore_at_inference = ["past_key_values"]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment