Unverified Commit b8e5cd53 authored by Sai-Suraj-27's avatar Sai-Suraj-27 Committed by GitHub
Browse files

Refactor: Removed un-necessary `object` base class (#32230)

* Refactored to remove un-necessary object base class.

* small fix.
parent 1c7ebf1d
......@@ -511,7 +511,7 @@ class TransfoXLTokenizer(PreTrainedTokenizer):
return symbols
class LMOrderedIterator(object):
class LMOrderedIterator:
def __init__(self, data, bsz, bptt, device="cpu", ext_len=None):
"""
data -- LongTensor -- the LongTensor is strictly ordered
......@@ -570,7 +570,7 @@ class LMOrderedIterator(object):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
class LMShuffledIterator:
def __init__(self, data, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
......@@ -679,7 +679,7 @@ class LMMultiFileIterator(LMShuffledIterator):
yield batch
class TransfoXLCorpus(object):
class TransfoXLCorpus:
@classmethod
@torch_only_method
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
......
......@@ -2292,7 +2292,7 @@ def _max_by_axis(the_list):
return maxes
class NestedTensor(object):
class NestedTensor:
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
......
......@@ -295,7 +295,7 @@ class DistilBertTokenizer(PreTrainedTokenizer):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -457,7 +457,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -200,7 +200,7 @@ name_to_url = {
def get_original_pixel_values(image):
class CenterPadding(object):
class CenterPadding:
def __init__(self, multiple):
super().__init__()
self.multiple = multiple
......
......@@ -284,7 +284,7 @@ class ElectraTokenizer(PreTrainedTokenizer):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -446,7 +446,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -315,7 +315,7 @@ class FunnelTokenizer(PreTrainedTokenizer):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -477,7 +477,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -192,7 +192,7 @@ class GPTNeoXJapaneseTokenizer(PreTrainedTokenizer):
return vocab_file, emoji_file
class SubWordJapaneseTokenizer(object):
class SubWordJapaneseTokenizer:
"""
https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT Lisence according to the
original repository.
......
......@@ -2617,7 +2617,7 @@ def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: f
# Copied from transformers.models.detr.modeling_detr.NestedTensor
class NestedTensor(object):
class NestedTensor:
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
......
......@@ -113,7 +113,7 @@ def whitespace_tokenize(text):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......
......@@ -285,7 +285,7 @@ class LayoutLMTokenizer(PreTrainedTokenizer):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -447,7 +447,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -1323,7 +1323,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -1485,7 +1485,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -284,7 +284,7 @@ class LxmertTokenizer(PreTrainedTokenizer):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -446,7 +446,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -286,7 +286,7 @@ class MobileBertTokenizer(PreTrainedTokenizer):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -448,7 +448,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -310,7 +310,7 @@ class MPNetTokenizer(PreTrainedTokenizer):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -472,7 +472,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -43,7 +43,7 @@ def whitespace_tokenize(text):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......
......@@ -38,7 +38,7 @@ def whitespace_tokenize(text):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -200,7 +200,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -889,7 +889,7 @@ class RoCBertTokenizer(PreTrainedTokenizer):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer with BasicTokenizer->RoCBertBasicTokenizer
class RoCBertBasicTokenizer(object):
class RoCBertBasicTokenizer:
"""
Constructs a RoCBertBasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -1051,7 +1051,7 @@ class RoCBertBasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer with WordpieceTokenizer->RoCBertWordpieceTokenizer
class RoCBertWordpieceTokenizer(object):
class RoCBertWordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -51,7 +51,7 @@ def whitespace_tokenize(text):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -213,7 +213,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -2452,7 +2452,7 @@ def _max_by_axis(the_list):
# Copied from transformers.models.detr.modeling_detr.NestedTensor
class NestedTensor(object):
class NestedTensor:
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
......
......@@ -555,7 +555,7 @@ class XSoftmax(torch.autograd.Function):
# Copied from transformers.models.deberta.modeling_deberta.DropoutContext
class DropoutContext(object):
class DropoutContext:
def __init__(self):
self.dropout = 0
self.mask = None
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment