Unverified Commit b8e5cd53 authored by Sai-Suraj-27's avatar Sai-Suraj-27 Committed by GitHub
Browse files

Refactor: Removed un-necessary `object` base class (#32230)

* Refactored to remove un-necessary object base class.

* small fix.
parent 1c7ebf1d
......@@ -557,7 +557,7 @@ class MultiHeadedAttention(nn.Module):
return context
class DecoderState(object):
class DecoderState:
"""Interface for grouping together the current state of a recurrent
decoder. In the simplest case just represents the hidden state of
the model. But can also be used for implementing various forms of
......@@ -694,7 +694,7 @@ def build_predictor(args, tokenizer, symbols, model, logger=None):
return translator
class GNMTGlobalScorer(object):
class GNMTGlobalScorer:
"""
NMT re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`
......@@ -717,7 +717,7 @@ class GNMTGlobalScorer(object):
return normalized_probs
class PenaltyBuilder(object):
class PenaltyBuilder:
"""
Returns the Length and Coverage Penalty function for Beam Search.
......@@ -763,7 +763,7 @@ class PenaltyBuilder(object):
return logprobs
class Translator(object):
class Translator:
"""
Uses a model to translate a batch of sentences.
......@@ -1002,7 +1002,7 @@ def tile(x, count, dim=0):
#
class BertSumOptimizer(object):
class BertSumOptimizer:
"""Specific optimizer for BertSum.
As described in [1], the authors fine-tune BertSum for abstractive
......
......@@ -3,7 +3,7 @@ import torch
from transformers import AutoTokenizer
class FSNERTokenizerUtils(object):
class FSNERTokenizerUtils:
def __init__(self, pretrained_model_name_or_path):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path)
......
......@@ -417,7 +417,7 @@ class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride
return super().__new__(cls, channels, height, width, stride)
class Box2BoxTransform(object):
class Box2BoxTransform:
"""
This R-CNN transformation scales the box's width and height
by exp(dw), exp(dh) and shifts a box's center by the offset
......@@ -519,7 +519,7 @@ class Box2BoxTransform(object):
return pred_boxes
class Matcher(object):
class Matcher:
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
......@@ -622,7 +622,7 @@ class Matcher(object):
match_labels[pred_inds_with_highest_quality] = 1
class RPNOutputs(object):
class RPNOutputs:
def __init__(
self,
box2box_transform,
......@@ -1132,7 +1132,7 @@ class ROIPooler(nn.Module):
return output
class ROIOutputs(object):
class ROIOutputs:
def __init__(self, cfg, training=False):
self.smooth_l1_beta = cfg.ROI_BOX_HEAD.SMOOTH_L1_BETA
self.box2box_transform = Box2BoxTransform(weights=cfg.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
......
......@@ -108,7 +108,7 @@ class TopKBinarizer(autograd.Function):
return gradOutput, None
class MagnitudeBinarizer(object):
class MagnitudeBinarizer:
"""
Magnitude Binarizer.
Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}`
......
......@@ -284,7 +284,7 @@ def make_fast_generalized_attention(
return attention_fn
class RandomMatrix(object):
class RandomMatrix:
r"""
Abstract class providing a method for constructing 2D random arrays. Class is responsible for constructing 2D
random arrays.
......@@ -348,7 +348,7 @@ class GaussianOrthogonalRandomMatrix(RandomMatrix):
return jnp.matmul(jnp.diag(multiplier), final_matrix)
class FastAttention(object):
class FastAttention:
r"""
Abstract class providing a method for fast attention. Class is responsible for providing a method
<dot_product_attention> for fast approximate attention.
......
......@@ -417,7 +417,7 @@ class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride
return super().__new__(cls, channels, height, width, stride)
class Box2BoxTransform(object):
class Box2BoxTransform:
"""
This R-CNN transformation scales the box's width and height
by exp(dw), exp(dh) and shifts a box's center by the offset
......@@ -519,7 +519,7 @@ class Box2BoxTransform(object):
return pred_boxes
class Matcher(object):
class Matcher:
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
......@@ -622,7 +622,7 @@ class Matcher(object):
match_labels[pred_inds_with_highest_quality] = 1
class RPNOutputs(object):
class RPNOutputs:
def __init__(
self,
box2box_transform,
......@@ -1132,7 +1132,7 @@ class ROIPooler(nn.Module):
return output
class ROIOutputs(object):
class ROIOutputs:
def __init__(self, cfg, training=False):
self.smooth_l1_beta = cfg.ROI_BOX_HEAD.SMOOTH_L1_BETA
self.box2box_transform = Box2BoxTransform(weights=cfg.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
......
......@@ -281,7 +281,7 @@ class BertTokenizer(PreTrainedTokenizer):
return (vocab_file,)
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -442,7 +442,7 @@ class BasicTokenizer(object):
return "".join(output)
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -691,7 +691,7 @@ class CharacterTokenizer:
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -853,7 +853,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......@@ -910,7 +910,7 @@ class WordpieceTokenizer(object):
return output_tokens
class SentencepieceTokenizer(object):
class SentencepieceTokenizer:
"""
Runs sentencepiece tokenization. Based on transformers.models.albert.tokenization_albert.AlbertTokenizer.
"""
......
......@@ -90,7 +90,7 @@ def whitespace_tokenize(text):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......
......@@ -2596,7 +2596,7 @@ def _max_by_axis(the_list):
# Copied from transformers.models.detr.modeling_detr.NestedTensor
class NestedTensor(object):
class NestedTensor:
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
......
......@@ -285,7 +285,7 @@ class ConvBertTokenizer(PreTrainedTokenizer):
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -447,7 +447,7 @@ class BasicTokenizer(object):
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -44,7 +44,7 @@ def load_vocab(vocab_file):
return vocab
class WordpieceTokenizer(object):
class WordpieceTokenizer:
def __init__(self, vocab, unk_token="<unk>", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
......
......@@ -138,7 +138,7 @@ class XSoftmax(torch.autograd.Function):
return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool)))
class DropoutContext(object):
class DropoutContext:
def __init__(self):
self.dropout = 0
self.mask = None
......
......@@ -133,7 +133,7 @@ class XSoftmax(torch.autograd.Function):
# Copied from transformers.models.deberta.modeling_deberta.DropoutContext
class DropoutContext(object):
class DropoutContext:
def __init__(self):
self.dropout = 0
self.mask = None
......
......@@ -2491,7 +2491,7 @@ def _max_by_axis(the_list):
# Copied from transformers.models.detr.modeling_detr.NestedTensor
class NestedTensor(object):
class NestedTensor:
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
......
......@@ -2516,7 +2516,7 @@ def nonzero_tuple(x):
# from https://github.com/facebookresearch/detectron2/blob/9921a2caa585d4fa66c4b534b6fab6e74d89b582/detectron2/modeling/matcher.py#L9
class DetaMatcher(object):
class DetaMatcher:
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will
have exactly zero or one matches; each ground-truth element may be matched to zero or more predicted elements.
......
......@@ -365,7 +365,7 @@ class GPTSanJapaneseTokenizer(PreTrainedTokenizer):
)
class SubWordJapaneseTokenizer(object):
class SubWordJapaneseTokenizer:
"""
This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications
- Decoding byte0~byte255 tokens correctly
......
......@@ -21,7 +21,7 @@ from ....utils import logging
logger = logging.get_logger(__name__)
class MMBTConfig(object):
class MMBTConfig:
"""
This is the configuration class to store the configuration of a [`MMBTModel`]. It is used to instantiate a MMBT
model according to the specified arguments, defining the model architecture.
......
......@@ -354,7 +354,7 @@ class RealmTokenizer(PreTrainedTokenizer):
return (vocab_file,)
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -503,7 +503,7 @@ class BasicTokenizer(object):
return "".join(output)
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
......@@ -283,7 +283,7 @@ class RetriBertTokenizer(PreTrainedTokenizer):
return (vocab_file,)
class BasicTokenizer(object):
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
......@@ -444,7 +444,7 @@ class BasicTokenizer(object):
return "".join(output)
class WordpieceTokenizer(object):
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment