Commit 0cd65242 authored by Mandeep Singh Baines's avatar Mandeep Singh Baines
Browse files

Initial commit

parents
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .. import Tensor, _size
from typing import Any, Optional, Tuple, Dict, List, Callable
from .common_types import _ratio_any_t
# 'TypedDict' is a new accepted type that represents a dictionary with a fixed set of allowed keys.
# It is standards-track but not in `typing` yet. We leave this hear to be uncommented once the feature
# is wide-spread.
# from mypy_extensions import TypedDict
# GRID_SAMPLE_INTERPOLATION_MODES = TypedDict('GRID_SAMPLE_INTERPOLATION_MODES', {'bilinear': int, 'nearest': int})
# GRID_SAMPLE_PADDING_MODES = TypedDict('GRID_SAMPLE_PADDING_MODES', {'zeros': int, 'border': int, 'reflection': int})
GRID_SAMPLE_INTERPOLATION_MODES = Dict[str, int]
GRID_SAMPLE_PADDING_MODES = Dict[str, int]
# These stubs were generated by running stubgen (`stubgen --parse-only functional.py`), followed by manual cleaning.
#
# The 'BroadcastingList{1,2,3}' types were replaced by `_size` or _output_ratio, as appropriate.
# This was necessary since the JIT uses BroadcastingList* types but static checking with mypy etc requires a `Sequence`
# type. There is no way to express the expected lengths of these lists in the current Python typing system.
#
# Functions created via `_add_docstr` in `functional.py` where merely typed as `Any` by `stubgen`, so those were
# deleted from the stub and replaced by generated declarations. See `gen_pyi` for the implementation of the code
# generation logic for those functions. In the future, it might be worth looking into using the mypy plugin system
# to encode the type semantics of `_add_docstr`, should that system ever become widespread.
def fractional_max_pool2d_with_indices(input: Tensor, kernel_size: _size, output_size: Optional[_size] = ...,
output_ratio: Optional[_ratio_any_t] = ..., return_indices: bool = ...,
_random_samples: Optional[Tensor] = ...) -> Tuple[Tensor, Tensor]: ...
def fractional_max_pool3d_with_indices(input: Tensor, kernel_size: _size, output_size: Optional[_size] = ...,
output_ratio: Optional[_ratio_any_t] = ..., return_indices: bool = ...,
_random_samples: Optional[Tensor] = ...) -> Tuple[Tensor, Tensor]: ...
def max_pool1d_with_indices(input: Tensor, kernel_size: _size, stride: Optional[_size] = ..., padding: _size = ...,
dilation: _size = ..., ceil_mode: bool = ..., return_indices: bool = ...) -> Tuple[
Tensor, Tensor]: ...
def max_pool2d_with_indices(input: Tensor, kernel_size: _size, stride: Optional[_size] = ..., padding: _size = ...,
dilation: _size = ..., ceil_mode: bool = ..., return_indices: bool = ...) -> Tuple[
Tensor, Tensor]: ...
def max_pool3d_with_indices(input: Tensor, kernel_size: _size, stride: Optional[_size] = ..., padding: _size = ...,
dilation: _size = ..., ceil_mode: bool = ..., return_indices: bool = ...) -> Tuple[
Tensor, Tensor]: ...
def max_unpool1d(input: Tensor, indices: Tensor, kernel_size: _size, stride: Optional[_size] = ...,
padding: _size = ..., output_size: Optional[_size] = ...) -> Tensor: ...
def max_unpool2d(input: Tensor, indices: Tensor, kernel_size: _size, stride: Optional[_size] = ...,
padding: _size = ..., output_size: Optional[_size] = ...) -> Tensor: ...
def max_unpool3d(input: Tensor, indices: Tensor, kernel_size: _size, stride: Optional[_size] = ...,
padding: _size = ..., output_size: Optional[_size] = ...) -> Tensor: ...
def lp_pool2d(input: Tensor, norm_type: float, kernel_size: int, stride: Optional[_size] = ...,
ceil_mode: bool = ...) -> Tensor: ...
def lp_pool1d(input: Tensor, norm_type: float, kernel_size: int, stride: Optional[_size] = ...,
ceil_mode: bool = ...) -> Tensor: ...
def adaptive_max_pool1d_with_indices(input: Tensor, output_size: _size, return_indices: bool = ...) -> Tuple[
Tensor, Tensor]: ...
def adaptive_max_pool2d_with_indices(input: Tensor, output_size: _size, return_indices: bool = ...) -> Tuple[
Tensor, Tensor]: ...
def adaptive_max_pool3d_with_indices(input: Tensor, output_size: _size, return_indices: bool = ...) -> Tuple[
Tensor, Tensor]: ...
def adaptive_avg_pool2d(input: Tensor, output_size: _size) -> Tensor: ...
def adaptive_avg_pool3d(input: Tensor, output_size: _size) -> Tensor: ...
def dropout(input: Tensor, p: float = ..., training: bool = ..., inplace: bool = ...) -> Tensor: ...
def alpha_dropout(input: Tensor, p: float = ..., training: bool = ..., inplace: bool = ...) -> Tensor: ...
def dropout2d(input: Tensor, p: float = ..., training: bool = ..., inplace: bool = ...) -> Tensor: ...
def dropout3d(input: Tensor, p: float = ..., training: bool = ..., inplace: bool = ...) -> Tensor: ...
def feature_alpha_dropout(input: Tensor, p: float = ..., training: bool = ..., inplace: bool = ...) -> Tensor: ...
def threshold(input: Tensor, threshold: float, value: float, inplace: bool = ...) -> Tensor: ...
def relu(input: Tensor, inplace: bool = ...) -> Tensor: ...
def glu(input: Tensor, dim: int = ...) -> Tensor: ...
def hardtanh(input: Tensor, min_val: float = ..., max_val: float = ..., inplace: bool = ...) -> Tensor: ...
def relu6(input: Tensor, inplace: bool = ...) -> Tensor: ...
def elu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ...
def selu(input: Tensor, inplace: bool = ...) -> Tensor: ...
def celu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ...
def leaky_relu(input: Tensor, negative_slope: float = ..., inplace: bool = ...) -> Tensor: ...
def prelu(input: Tensor, weight: Tensor) -> Tensor: ...
def rrelu(input: Tensor, lower: float = ..., upper: float = ..., training: bool = ...,
inplace: bool = ...) -> Tensor: ...
def gelu(input: Any): ...
def hardshrink(input: Tensor, lambd: float = ...) -> Tensor: ...
def tanhshrink(input: Any): ...
def softsign(input: Any): ...
def softmin(input: Tensor, dim: Optional[int] = ..., _stacklevel: int = ..., dtype: Optional[int] = ...) -> Tensor: ...
def softmax(input: Tensor, dim: Optional[int] = ..., _stacklevel: int = ..., dtype: Optional[int] = ...) -> Tensor: ...
def gumbel_softmax(logits: Tensor, tau: float = ..., hard: bool = ..., eps: float = ..., dim: int = ...) -> Tensor: ...
def log_softmax(input: Tensor, dim: Optional[int] = ..., _stacklevel: int = ...,
dtype: Optional[int] = ...) -> Tensor: ...
def tanh(input: Any): ...
def sigmoid(input: Any): ...
def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = ...) -> Tensor: ...
def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor] = ...) -> Tensor: ...
def embedding(input: Tensor, weight: Tensor, padding_idx: Optional[int] = ..., max_norm: Optional[float] = ...,
norm_type: float = ..., scale_grad_by_freq: bool = ..., sparse: bool = ...) -> Tensor: ...
def embedding_bag(input: Tensor, weight: Tensor, offsets: Optional[Tensor] = ..., max_norm: Optional[float] = ...,
norm_type: float = ..., scale_grad_by_freq: bool = ..., mode: str = ...,
sparse: bool = ...) -> Tensor: ...
def batch_norm(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor],
weight: Optional[Tensor] = ..., bias: Optional[Tensor] = ..., training: bool = ...,
momentum: float = ..., eps: float = ...) -> Tensor: ...
def instance_norm(input: Tensor, running_mean: Optional[Tensor] = ..., running_var: Optional[Tensor] = ...,
weight: Optional[Tensor] = ..., bias: Optional[Tensor] = ..., use_input_stats: bool = ...,
momentum: float = ..., eps: float = ...) -> Tensor: ...
def layer_norm(input: Tensor, normalized_shape: List[int], weight: Optional[Tensor] = ..., bias: Optional[Tensor] = ...,
eps: float = ...) -> Tensor: ...
def group_norm(input: Tensor, num_groups: int, weight: Optional[Tensor] = ..., bias: Optional[Tensor] = ...,
eps: float = ...) -> Tensor: ...
def local_response_norm(input: Tensor, size: int, alpha: float = ..., beta: float = ..., k: float = ...) -> Tensor: ...
def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: int = ...,
reduction: str = ..., zero_infinity: bool = ...) -> Tensor: ...
def nll_loss(input: Tensor, target: Tensor, weight: Optional[Tensor] = ..., size_average: Optional[bool] = ...,
ignore_index: int = ..., reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
def poisson_nll_loss(input: Tensor, target: Tensor, log_input: bool = ..., full: bool = ...,
size_average: Optional[bool] = ..., eps: float = ..., reduce: Optional[bool] = ...,
reduction: str = ...) -> Tensor: ...
def kl_div(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ...,
reduction: str = ...) -> Tensor: ...
def cross_entropy(input: Tensor, target: Tensor, weight: Optional[Tensor] = ..., size_average: Optional[bool] = ...,
ignore_index: int = ..., reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
def binary_cross_entropy(input: Tensor, target: Tensor, weight: Optional[Tensor] = ...,
size_average: Optional[bool] = ..., reduce: Optional[bool] = ...,
reduction: str = ...) -> Tensor: ...
def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor] = ...,
size_average: Optional[bool] = ..., reduce: Optional[bool] = ...,
reduction: str = ..., pos_weight: Optional[Tensor] = ...) -> Tensor: ...
def smooth_l1_loss(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ...,
reduction: str = ...) -> Tensor: ...
def l1_loss(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ...,
reduction: str = ...) -> Tensor: ...
def mse_loss(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ...,
reduction: str = ...) -> Tensor: ...
def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: float = ...,
size_average: Optional[bool] = ..., reduce: Optional[bool] = ...,
reduction: str = ...) -> Tensor: ...
def hinge_embedding_loss(input: Tensor, target: Tensor, margin: float = ..., size_average: Optional[bool] = ...,
reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
def multilabel_margin_loss(input: Tensor, target: Tensor, size_average: Optional[bool] = ...,
reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
def soft_margin_loss(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., reduce: Optional[bool] = ...,
reduction: str = ...) -> Tensor: ...
def multilabel_soft_margin_loss(input: Tensor, target: Tensor, weight: Optional[Tensor] = ...,
size_average: Optional[bool] = ..., reduce: Optional[bool] = ...,
reduction: str = ...) -> Tensor: ...
def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: float = ...,
size_average: Optional[bool] = ..., reduce: Optional[bool] = ...,
reduction: str = ...) -> Tensor: ...
def multi_margin_loss(input: Tensor, target: Tensor, p: int = ..., margin: float = ..., weight: Optional[Tensor] = ...,
size_average: Optional[bool] = ..., reduce: Optional[bool] = ...,
reduction: str = ...) -> Tensor: ...
def upsample(input: Any, size: Optional[Any] = ..., scale_factor: Optional[Any] = ..., mode: str = ...,
align_corners: Optional[Any] = ...): ...
def interpolate(input: Any, size: Optional[Any] = ..., scale_factor: Optional[Any] = ..., mode: str = ...,
align_corners: Optional[Any] = ...): ...
def upsample_nearest(input: Any, size: Optional[Any] = ..., scale_factor: Optional[Any] = ...): ...
def upsample_bilinear(input: Any, size: Optional[Any] = ..., scale_factor: Optional[Any] = ...): ...
def grid_sample(input: Tensor, grid: Tensor, mode: str = ..., padding_mode: str = ...) -> Tensor: ...
def affine_grid(theta: Tensor, size: List[int]) -> Tensor: ...
def pad(input: Tensor, pad: List[int], mode: str = ..., value: float = ...) -> Tensor: ...
def pairwise_distance(x1: Tensor, x2: Tensor, p: float = ..., eps: float = ..., keepdim: bool = ...) -> Tensor: ...
def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: float = ..., p: float = ...,
eps: float = ..., swap: bool = ..., size_average: Optional[bool] = ...,
reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ...
def normalize(input: Tensor, p: float = ..., dim: int = ..., eps: float = ...,
out: Optional[Tensor] = ...) -> Tensor: ...
def assert_int_or_pair(arg: Any, arg_name: Any, message: Any) -> None: ...
def unfold(input: Tensor, kernel_size: _size, dilation: _size = ..., padding: _size = ...,
stride: _size = ...) -> Tensor: ...
def fold(input: Tensor, output_size: _size, kernel_size: _size, dilation: _size = ..., padding: _size = ...,
stride: _size = ...) -> Tensor: ...
from .. import conv1d as conv1d
from .. import conv2d as conv2d
from .. import conv3d as conv3d
from .. import conv_transpose1d as conv_transpose1d
from .. import conv_transpose2d as conv_transpose2d
from .. import conv_transpose3d as conv_transpose3d
from .. import conv_tbc as conv_tbc
from .. import avg_pool1d as avg_pool1d
from .. import relu_ as relu_
from .. import selu_ as selu_
from .. import celu_ as celu_
from .. import rrelu_ as rrelu_
from .. import pixel_shuffle as pixel_shuffle
from .. import pdist as pdist
from .. import cosine_similarity as cosine_similarity
fractional_max_pool2d: Callable
fractional_max_pool3d: Callable
max_pool1d: Callable
max_pool2d: Callable
max_pool3d: Callable
adaptive_max_pool1d: Callable
adaptive_max_pool2d: Callable
adaptive_max_pool3d: Callable
avg_pool2d: Callable
avg_pool3d: Callable
hardtanh_: Callable
elu_: Callable
leaky_relu_: Callable
logsigmoid: Callable
softplus: Callable
softshrink: Callable
one_hot: Callable
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .module import Module as Module
from .activation import CELU as CELU, ELU as ELU, GLU as GLU, GELU as GELU, Hardshrink as Hardshrink, \
Hardtanh as Hardtanh, LeakyReLU as LeakyReLU, LogSigmoid as LogSigmoid, LogSoftmax as LogSoftmax, PReLU as PReLU, \
RReLU as RReLU, ReLU as ReLU, ReLU6 as ReLU6, SELU as SELU, Sigmoid as Sigmoid, Softmax as Softmax, \
Softmax2d as Softmax2d, Softmin as Softmin, Softplus as Softplus, Softshrink as Softshrink, Softsign as Softsign, \
Tanh as Tanh, Tanhshrink as Tanhshrink, Threshold as Threshold
from .adaptive import AdaptiveLogSoftmaxWithLoss as AdaptiveLogSoftmaxWithLoss
from .batchnorm import BatchNorm1d as BatchNorm1d, BatchNorm2d as BatchNorm2d, BatchNorm3d as BatchNorm3d, \
SyncBatchNorm as SyncBatchNorm
from .container import Container as Container, ModuleDict as ModuleDict, ModuleList as ModuleList, \
ParameterDict as ParameterDict, ParameterList as ParameterList, Sequential as Sequential
from .conv import Conv1d as Conv1d, Conv2d as Conv2d, Conv3d as Conv3d, ConvTranspose1d as ConvTranspose1d, \
ConvTranspose2d as ConvTranspose2d, ConvTranspose3d as ConvTranspose3d
from .distance import CosineSimilarity as CosineSimilarity, PairwiseDistance as PairwiseDistance
from .dropout import AlphaDropout as AlphaDropout, Dropout as Dropout, Dropout2d as Dropout2d, Dropout3d as Dropout3d, \
FeatureAlphaDropout as FeatureAlphaDropout
from .fold import Fold as Fold, Unfold as Unfold
from .instancenorm import InstanceNorm1d as InstanceNorm1d, InstanceNorm2d as InstanceNorm2d, \
InstanceNorm3d as InstanceNorm3d
from .linear import Bilinear as Bilinear, Identity as Identity, Linear as Linear
from .loss import BCELoss as BCELoss, BCEWithLogitsLoss as BCEWithLogitsLoss, CTCLoss as CTCLoss, \
CosineEmbeddingLoss as CosineEmbeddingLoss, CrossEntropyLoss as CrossEntropyLoss, \
HingeEmbeddingLoss as HingeEmbeddingLoss, KLDivLoss as KLDivLoss, L1Loss as L1Loss, MSELoss as MSELoss, \
MarginRankingLoss as MarginRankingLoss, MultiLabelMarginLoss as MultiLabelMarginLoss, \
MultiLabelSoftMarginLoss as MultiLabelSoftMarginLoss, MultiMarginLoss as MultiMarginLoss, NLLLoss as NLLLoss, \
NLLLoss2d as NLLLoss2d, PoissonNLLLoss as PoissonNLLLoss, SmoothL1Loss as SmoothL1Loss, \
SoftMarginLoss as SoftMarginLoss, TripletMarginLoss as TripletMarginLoss
from .module import Module as Module
from .normalization import CrossMapLRN2d as CrossMapLRN2d, GroupNorm as GroupNorm, LayerNorm as LayerNorm, \
LocalResponseNorm as LocalResponseNorm
from .padding import ConstantPad1d as ConstantPad1d, ConstantPad2d as ConstantPad2d, ConstantPad3d as ConstantPad3d, \
ReflectionPad1d as ReflectionPad1d, ReflectionPad2d as ReflectionPad2d, ReplicationPad1d as ReplicationPad1d, \
ReplicationPad2d as ReplicationPad2d, ReplicationPad3d as ReplicationPad3d, ZeroPad2d as ZeroPad2d
from .pixelshuffle import PixelShuffle as PixelShuffle
from .pooling import AdaptiveAvgPool1d as AdaptiveAvgPool1d, AdaptiveAvgPool2d as AdaptiveAvgPool2d, \
AdaptiveAvgPool3d as AdaptiveAvgPool3d, AdaptiveMaxPool1d as AdaptiveMaxPool1d, \
AdaptiveMaxPool2d as AdaptiveMaxPool2d, AdaptiveMaxPool3d as AdaptiveMaxPool3d, AvgPool1d as AvgPool1d, \
AvgPool2d as AvgPool2d, AvgPool3d as AvgPool3d, FractionalMaxPool2d as FractionalMaxPool2d, \
FractionalMaxPool3d as FractionalMaxPool3d, LPPool1d as LPPool1d, LPPool2d as LPPool2d, MaxPool1d as MaxPool1d, \
MaxPool2d as MaxPool2d, MaxPool3d as MaxPool3d, MaxUnpool1d as MaxUnpool1d, MaxUnpool2d as MaxUnpool2d, \
MaxUnpool3d as MaxUnpool3d
from .rnn import GRU as GRU, GRUCell as GRUCell, LSTM as LSTM, LSTMCell as LSTMCell, RNN as RNN, RNNBase as RNNBase, \
RNNCell as RNNCell, RNNCellBase as RNNCellBase
from .sparse import Embedding as Embedding, EmbeddingBag as EmbeddingBag
from .upsampling import Upsample as Upsample, UpsamplingBilinear2d as UpsamplingBilinear2d, \
UpsamplingNearest2d as UpsamplingNearest2d
#MODIFIED BY TORCHGPIPE
from .modules.flatten import Flatten as Flatten
#END
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from ... import Tensor
from .. import Parameter
from .module import Module
from typing import Any, Optional
class Threshold(Module):
threshold: float = ...
value: float = ...
inplace: bool = ...
def __init__(self, threshold: float, value: float, inplace: bool = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class ReLU(Threshold):
def __init__(self, inplace: bool = ...) -> None: ...
class RReLU(Module):
lower: float = ...
upper: float = ...
inplace: bool = ...
def __init__(self, lower: float = ..., upper: float = ..., inplace: bool = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Hardtanh(Module):
min_val: float = ...
max_val: float = ...
inplace: bool = ...
def __init__(self, min_val: float = ..., max_val: float = ..., inplace: bool = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class ReLU6(Hardtanh):
def __init__(self, inplace: bool = ...) -> None: ...
class Sigmoid(Module):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Tanh(Module):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class ELU(Module):
alpha: float = ...
inplace: bool = ...
def __init__(self, alpha: float = ..., inplace: bool = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class CELU(Module):
alpha: float = ...
inplace: bool = ...
def __init__(self, alpha: float = ..., inplace: bool = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class SELU(Module):
inplace: bool = ...
def __init__(self, inplace: bool = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class GLU(Module):
dim: int = ...
def __init__(self, dim: int = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class GELU(Module):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Hardshrink(Module):
lambd: float = ...
def __init__(self, lambd: float = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class LeakyReLU(Module):
negative_slope: float = ...
inplace: bool = ...
def __init__(self, negative_slope: float = ..., inplace: bool = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class LogSigmoid(Module):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Softplus(Module):
beta: float = ...
threshold: float = ...
def __init__(self, beta: float = ..., threshold: float = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Softshrink(Module):
lambd: float = ...
def __init__(self, lambd: float = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class PReLU(Module):
num_parameters: int = ...
weight: Parameter = ...
def __init__(self, num_parameters: int = ..., init: float = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Softsign(Module):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Tanhshrink(Module):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Softmin(Module):
dim: int = ...
def __init__(self, dim: Optional[int] = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Softmax(Module):
dim: int = ...
def __init__(self, dim: Optional[int] = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Softmax2d(Module):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class LogSoftmax(Module):
dim: int = ...
def __init__(self, dim: Optional[int] = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from ... import Tensor
from .module import Module
from .linear import Linear
from collections import namedtuple
from typing import List, Sequence
from .container import ModuleList
_ASMoutput = namedtuple('ASMoutput', ['output', 'loss'])
class AdaptiveLogSoftmaxWithLoss(Module):
in_features: int = ...
n_classes: int = ...
cutoffs: List[int] = ...
div_value: float = ...
head_bias: bool = ...
head: Linear = ...
tail: ModuleList = ...
def __init__(self, in_features: int, n_classes: int, cutoffs: Sequence[int], div_value: float = ...,
head_bias: bool = ...) -> None: ...
def reset_parameters(self) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> _ASMoutput: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> _ASMoutput: ... # type: ignore
def log_prob(self, input: Tensor) -> List[float]: ...
def predict(self, input: Tensor) -> Tensor: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from ... import Tensor
from .. import Parameter
from .module import Module
from typing import Any, Optional
class _BatchNorm(Module):
num_features: int = ...
eps: float = ...
momentum: float = ...
affine: bool = ...
track_running_stats: bool = ...
weight: Parameter = ...
bias: Parameter = ...
#MODIFIED BY TORCHGPIPE
running_mean: Tensor
running_var: Tensor
num_batches_tracked: Tensor
def __init__(self, num_features: int, eps: float = ..., momentum: Optional[float] = ..., affine: bool = ...,
track_running_stats: bool = ...) -> None: ...
#END
def reset_running_stats(self) -> None: ...
def reset_parameters(self) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class BatchNorm1d(_BatchNorm): ...
class BatchNorm2d(_BatchNorm): ...
class BatchNorm3d(_BatchNorm): ...
class SyncBatchNorm(_BatchNorm):
# TODO set process_group to the write type once torch.distributed is stubbed
def __init__(self, num_features: int, eps: float = ..., momentum: float = ..., affine: bool = ...,
track_running_stats: bool = ..., process_group: Optional[Any] = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .module import Module
from typing import Any, Optional, Union, overload, TypeVar, Iterable, Tuple, Mapping, Iterator
from collections import OrderedDict
from ... import Tensor
from .. import Parameter
class Container(Module):
def __init__(self, **kwargs: Any) -> None: ...
T = TypeVar('T')
class Sequential(Module):
@overload
def __init__(self, *args: Module) -> None: ...
@overload
def __init__(self, arg: OrderedDict[str, Module]) -> None: ...
@overload
def __getitem__(self, idx: int) -> Module: ...
@overload
def __getitem__(self: T, idx: slice) -> T: ...
def __setitem__(self, idx: Union[int], module: Module) -> None: ...
def __delitem__(self, idx: Union[slice, int]) -> None: ...
def __len__(self) -> int: ...
#MODIFIED BY TORCHGPIPE
TensorOrTensors = Union[Tensor, Tuple[Tensor, ...]]
def forward(self, input: TensorOrTensors) -> TensorOrTensors: ... # type: ignore
def __call__(self, input: TensorOrTensors) -> TensorOrTensors: ... # type: ignore
from typing import Iterator
def __iter__(self) -> Iterator[Module]: ...
#END
class ModuleList(Module):
def __init__(self, modules: Optional[Iterable[Module]] = ...) -> None: ...
@overload
def __getitem__(self, idx: int) -> Module: ...
@overload
def __getitem__(self: T, idx: slice) -> T: ...
def __setitem__(self, idx: int, module: Module) -> None: ...
def __delitem__(self, idx: Union[int, slice]) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[Module]: ...
def __iadd__(self: T, modules: Iterable[Module]) -> T: ...
def insert(self, index: int, module: Module) -> None: ...
def append(self: T, module: Module) -> T: ...
def extend(self: T, modules: Iterable[Module]) -> T: ...
class ModuleDict(Module):
def __init__(self, modules: Optional[Mapping[str, Module]] = ...) -> None: ...
def __getitem__(self, key: str): ...
def __setitem__(self, key: str, module: Module) -> None: ...
def __delitem__(self, key: str) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[str]: ...
def __contains__(self, key: str) -> bool: ...
def clear(self) -> None: ...
def pop(self, key: str): ...
def keys(self) -> Iterable[str]: ...
def items(self) -> Iterable[Tuple[str, Module]]: ...
def values(self) -> Iterable[Module]: ...
def update(self, modules: Mapping[str, Module]) -> None: ...
class ParameterList(Module):
def __init__(self, parameters: Optional[Iterable[Parameter]] = ...) -> None: ...
@overload
def __getitem__(self, idx: int) -> Parameter: ...
@overload
def __getitem__(self: T, idx: slice) -> T: ...
def __setitem__(self, idx: int, param: Parameter) -> None: ...
def __delitem__(self, idx: Union[int, slice]) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[Parameter]: ...
def __iadd__(self: T, parameters: Iterable[Parameter]) -> T: ...
def insert(self, index: int, parameter: Parameter) -> None: ...
def append(self: T, parameter: Parameter) -> T: ...
def extend(self: T, parameters: Iterable[Parameter]) -> T: ...
class ParameterDict(Module):
def __init__(self, parameters: Optional[Mapping[str, Parameter]] = ...) -> None: ...
def __getitem__(self, key: str): ...
def __setitem__(self, key: str, param: Parameter) -> None: ...
def __delitem__(self, key: str) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[str]: ...
def __contains__(self, key: str) -> bool: ...
def clear(self) -> None: ...
def pop(self, key: str): ...
def keys(self) -> Iterable[str]: ...
def items(self) -> Iterable[Tuple[str, Parameter]]: ...
def values(self) -> Iterable[Parameter]: ...
def update(self, parameters: Mapping[str, Parameter]) -> None: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .module import Module
from typing import Any, Optional, List, Tuple, Union
from ... import Tensor
from ..common_types import _size_1_t, _size_2_t, _size_3_t
class _ConvNd(Module):
in_channels: int = ...
out_channels: int = ...
kernel_size: Tuple[int, ...] = ...
stride: Tuple[int, ...] = ...
padding: Tuple[int, ...] = ...
dilation: Tuple[int, ...] = ...
transposed: bool = ...
output_padding: Tuple[int, ...] = ...
groups: int = ...
padding_mode: str = ...
weight: Tensor = ...
bias: Tensor = ...
# padding_mode can only one of an enumerated set of strings. Python typing will eventually support precisely typing
# this with the `Literal` type.
def __init__(self, in_channels: Any, out_channels: Any, kernel_size: Any, stride: Any, padding: Any, dilation: Any,
transposed: Any, output_padding: Any, groups: Any, bias: Any, padding_mode: Any) -> None: ...
def reset_parameters(self) -> None: ...
class Conv1d(_ConvNd):
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_1_t, stride: _size_1_t = ...,
padding: _size_1_t = ..., dilation: _size_1_t = ..., groups: int = ..., bias: bool = ...,
padding_mode: str = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Conv2d(_ConvNd):
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t = ...,
padding: _size_2_t = ..., dilation: _size_2_t = ..., groups: int = ..., bias: bool = ...,
padding_mode: str = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Conv3d(_ConvNd):
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_3_t, stride: _size_3_t = ...,
padding: _size_3_t = ..., dilation: _size_3_t = ..., groups: int = ..., bias: bool = ...,
padding_mode: str = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class _ConvTransposeMixin:
def forward(self, input: Tensor, output_size: Optional[List[int]] = ...): ... # type: ignore
def __call__(self, input: Tensor, output_size: Optional[List[int]] = ...): ... # type: ignore
# We need a '# type: ignore' at the end of the declaration of each class that inherits from
# `_ConvTransposeMixin` since the `forward` method declared in `_ConvTransposeMixin` is
# incompatible with the `forward` method declared in `Module`.
class ConvTranspose1d(_ConvTransposeMixin, _ConvNd): # type: ignore
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_1_t, stride: _size_1_t = ...,
padding: _size_1_t = ..., output_padding: _size_1_t = ..., groups: int = ..., bias: bool = ...,
dilation: int = ..., padding_mode: str = ...) -> None: ...
def forward(self, input: Tensor, output_size: Optional[List[int]] = ...) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, output_size: Optional[List[int]] = ...) -> Tensor: ... # type: ignore
class ConvTranspose2d(_ConvTransposeMixin, _ConvNd): # type: ignore
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t = ...,
padding: _size_2_t = ..., output_padding: _size_2_t = ..., groups: int = ..., bias: bool = ...,
dilation: int = ..., padding_mode: str = ...) -> None: ...
def forward(self, input: Tensor, output_size: Optional[List[int]] = ...) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, output_size: Optional[List[int]] = ...) -> Tensor: ... # type: ignore
class ConvTranspose3d(_ConvTransposeMixin, _ConvNd): # type: ignore
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_3_t, stride: _size_3_t = ...,
padding: _size_3_t = ..., output_padding: _size_3_t = ..., groups: int = ..., bias: bool = ...,
dilation: int = ..., padding_mode: str = ...) -> None: ...
def forward(self, input: Tensor, output_size: Optional[List[int]] = ...) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, output_size: Optional[List[int]] = ...) -> Tensor: ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from ... import Tensor
from .module import Module
class PairwiseDistance(Module):
norm: float
eps: float
keepdim: bool
def __init__(self, p: float = ..., eps: float = ..., keepdim: bool = ...) -> None: ...
def forward(self, x1: Tensor, x2: Tensor) -> Tensor: ... # type: ignore
def __call__(self, x1: Tensor, x2: Tensor) -> Tensor: ... # type: ignore
class CosineSimilarity(Module):
dim: int
eps: float
def __init__(self, dim: int = ..., eps: float = ...) -> None: ...
def forward(self, x1: Tensor, x2: Tensor) -> Tensor: ... # type: ignore
def __call__(self, x1: Tensor, x2: Tensor) -> Tensor: ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from ... import Tensor
from .module import Module
class _DropoutNd(Module):
p: float
inplace: bool
def __init__(self, p: float = ..., inplace: bool = ...) -> None: ...
def extra_repr(self): ...
class Dropout(_DropoutNd):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Dropout2d(_DropoutNd):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Dropout3d(_DropoutNd):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class AlphaDropout(_DropoutNd):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class FeatureAlphaDropout(_DropoutNd):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Any
from .module import Module
class Flatten(Module):
__constants__: Any = ...
start_dim: Any = ...
end_dim: Any = ...
def __init__(self, start_dim: int = ..., end_dim: int = ...) -> None: ...
def forward(self, input: Any): ... # type: ignore
def __call__(self, input: Any): ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .module import Module
from ... import Tensor
from ..common_types import _size_any_t
class Fold(Module):
output_size: _size_any_t = ...
kernel_size: _size_any_t = ...
dilation: _size_any_t = ...
padding: _size_any_t = ...
stride: _size_any_t = ...
def __init__(self, output_size: _size_any_t, kernel_size: _size_any_t, dilation: _size_any_t = ...,
padding: _size_any_t = ..., stride: _size_any_t = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Unfold(Module):
kernel_size: _size_any_t = ...
dilation: _size_any_t = ...
padding: _size_any_t = ...
stride: _size_any_t = ...
def __init__(self, kernel_size: _size_any_t, dilation: _size_any_t = ..., padding: _size_any_t = ...,
stride: _size_any_t = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from ... import Tensor
from .batchnorm import _BatchNorm
class _InstanceNorm(_BatchNorm):
def __init__(self, num_features: int, eps: float = ..., momentum: float = ..., affine: bool = ...,
track_running_stats: bool = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class InstanceNorm1d(_InstanceNorm): ...
class InstanceNorm2d(_InstanceNorm): ...
class InstanceNorm3d(_InstanceNorm): ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .module import Module
from .. import Parameter
from ... import Tensor
class Identity(Module):
def __init__(self) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Linear(Module):
in_features: int = ...
out_features: int = ...
weight: Parameter = ...
bias: Parameter = ...
def __init__(self, in_features: int, out_features: int, bias: bool = ...) -> None: ...
def reset_parameters(self) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class Bilinear(Module):
in1_features: int = ...
in2_features: int = ...
out_features: int = ...
weight: Parameter = ...
bias: Parameter = ...
def __init__(self, in1_features: int, in2_features: int, out_features: int, bias: bool = ...) -> None: ...
def reset_parameters(self) -> None: ...
def forward(self, input1: Tensor, input2: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input1: Tensor, input2: Tensor) -> Tensor: ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Any, Optional
from .module import Module
from ... import Tensor
# The deprecated `size_average` and `reduce` arguments are not included in the stubs
class _Loss(Module):
reduction: str = ...
def __init__(self, reduction: str = ...) -> None: ...
class _WeightedLoss(_Loss):
def __init__(self, weight: Optional[Any] = ..., reduction: str = ...) -> None: ...
class L1Loss(_Loss):
def __init__(self, reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class NLLLoss(_WeightedLoss):
ignore_index: int = ...
def __init__(self, weight: Optional[Any] = ..., ignore_index: int = ..., reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class NLLLoss2d(NLLLoss):
def __init__(self, weight: Optional[Any] = ..., ignore_index: int = ..., reduction: str = ...) -> None: ...
class PoissonNLLLoss(_Loss):
log_input: bool = ...
full: bool = ...
eps: float = ...
def __init__(self, log_input: bool = ..., full: bool = ..., eps: float = ..., reduction: str = ...) -> None: ...
def forward(self, log_input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, log_input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class KLDivLoss(_Loss):
def __init__(self, reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class MSELoss(_Loss):
def __init__(self, reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class BCELoss(_WeightedLoss):
def __init__(self, weight: Optional[Any] = ..., reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class BCEWithLogitsLoss(_Loss):
def __init__(self, weight: Optional[Any] = ..., reduction: str = ..., pos_weight: Optional[Any] = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class HingeEmbeddingLoss(_Loss):
margin: Any = ...
def __init__(self, margin: float = ..., reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class MultiLabelMarginLoss(_Loss):
def __init__(self, reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class SmoothL1Loss(_Loss):
def __init__(self, reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class SoftMarginLoss(_Loss):
def __init__(self, reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class CrossEntropyLoss(_WeightedLoss):
ignore_index: int = ...
def __init__(self, weight: Optional[Any] = ..., ignore_index: int = ..., reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class MultiLabelSoftMarginLoss(_WeightedLoss):
def __init__(self, weight: Optional[Any] = ..., reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class CosineEmbeddingLoss(_Loss):
margin: float = ...
def __init__(self, margin: float = ..., reduction: str = ...) -> None: ...
def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class MarginRankingLoss(_Loss):
margin: float = ...
def __init__(self, margin: float = ..., reduction: str = ...) -> None: ...
def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class MultiMarginLoss(_WeightedLoss):
p: int = ...
margin: float = ...
def __init__(self, p: int = ..., margin: float = ..., weight: Optional[Any] = ...,
reduction: str = ...) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> Tensor: ... # type: ignore
class TripletMarginLoss(_Loss):
margin: float = ...
p: int = ...
eps: float = ...
swap: bool = ...
def __init__(self, margin: float = ..., p: int = ..., eps: float = ..., swap: bool = ...,
reduction: str = ...) -> None: ...
def forward(self, anchor: Tensor, positive: Tensor, negative: Tensor) -> Tensor: ... # type: ignore
def __call__(self, anchor: Tensor, positive: Tensor, negative: Tensor) -> Tensor: ... # type: ignore
class CTCLoss(_Loss):
blank: int = ...
zero_infinity: bool = ...
def __init__(self, blank: int = ..., reduction: str = ..., zero_infinity: bool = ...) -> None: ...
def forward(self, log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor) -> Tensor: ... # type: ignore
def __call__(self, log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor) -> Tensor: ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from ... import Tensor, device, dtype
from .. import Parameter
from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict, Generic
from collections import OrderedDict
from ...utils.hooks import RemovableHandle
_grad_t = Union[Tuple[Tensor, ...], Tensor]
# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use
# of `T` to annotate `self`. Many methods of `Module` return `self` and we want those return values to be
# the type of the subclass, not the looser type of `Module`.
T = TypeVar('T')
# We parameter modules by the return type of its `forward` (and therefore `__call__`) method. This allows
# type inference to infer that the return value of calling a module in the canonical way (via `__call__)` is the
# same as the custom `forward` function of the submodule. Submodules tha wish to opt in this functionality be
# defined as eg class ReturnsTwoTensors(Module[Tuple[Tensor, Tensor]]): ...
T_co = TypeVar('T_co', covariant=True)
class Module(Generic[T_co]):
def __init__(self) -> None: ...
def forward(self, *input: Any, **kwargs: Any) -> T_co: ... # type: ignore
def __call__(self, *input: Any, **kwargs: Any) -> T_co: ... # type: ignore
def register_buffer(self, name: str, tensor: Tensor) -> None: ...
def register_parameter(self, name: str, param: Parameter) -> None: ...
def add_module(self, name: str, module: 'Module') -> None: ...
def apply(self: T, fn: Callable[['Module'], None]) -> T: ...
def cuda(self: T, device: Optional[Union[int, device]] = ...) -> T: ...
def cpu(self: T) -> T: ...
def type(self: T, dst_type: Union[dtype, str]) -> T: ...
def float(self: T) -> T: ...
def double(self: T) -> T: ...
def half(self: T) -> T: ...
@overload
def to(self: T, device: Optional[Union[int, device]] = ..., dtype: Optional[Union[dtype, str]] = ...,
non_blocking: bool = ...) -> T: ...
@overload
def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ...
@overload
def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ...
def register_backward_hook(self, hook: Callable[
['Module', _grad_t, _grad_t], Union[None, Tensor]]) -> RemovableHandle: ...
# The hook takes a module as a first argument and variadic arguments after that, but there is no way to express that
def register_forward_pre_hook(self, hook: Callable[..., None]) -> RemovableHandle: ...
def register_forward_hook(self, hook: Callable[..., None]) -> RemovableHandle: ...
def __getattr__(self, name: str) -> Union[Tensor, 'Module']: ...
# TODO double-check this
def __setattr__(self, name: str, value: Union[Tensor, 'Module']) -> None: ...
# The user can pass an optional arbitrary mappable object to `state_dict`, in which case `state_dict` returns
# back that same object. But if they pass nothing, an `OrederedDict` is created and returned.
T_destination = TypeVar('T_destination', bound=Mapping[str, Tensor])
@overload
def state_dict(self, destination: T_destination, prefix: str = ..., keep_vars: bool = ...) -> T_destination: ...
@overload
def state_dict(self, prefix: str = ..., keep_vars: bool = ...) -> OrderedDict[str, Tensor]: ...
def load_state_dict(self, state_dict: Union[Dict[str, Tensor], OrderedDict[str, Tensor]], strict: bool = ...): ...
def parameters(self, recurse: bool = ...) -> Iterator[Parameter]: ...
def named_parameters(self, prefix: str = ..., recurse: bool = ...) -> Iterator[Tuple[str, Parameter]]: ...
def buffers(self, recurse: bool = ...) -> Iterator[Tensor]: ...
def named_buffers(self, prefix: str = ..., recurse: bool = ...) -> Iterator[Tuple[str, Tensor]]: ...
def children(self) -> Iterator['Module']: ...
def named_children(self) -> Iterator[Tuple[str, 'Module']]: ...
def modules(self) -> Iterator['Module']: ...
def named_modules(self, memo: Optional[Set['Module']] = ..., prefix: str = ...) -> Iterator[
Tuple[str, 'Module']]: ...
def train(self: T, mode: bool = ...) -> T: ...
def eval(self: T) -> T: ...
def zero_grad(self) -> None: ...
def share_memory(self: T) -> T: ...
def extra_repr(self) -> str: ...
#MODIFIED BY TORCHGPIPE
training: bool
#END
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .module import Module
from typing import Any, Union, List
from ... import Tensor, Size
from .. import Parameter
class LocalResponseNorm(Module):
size: int = ...
alpha: float = ...
beta: float = ...
k: float = ...
def __init__(self, size: int, alpha: float = ..., beta: float = ..., k: float = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class CrossMapLRN2d(Module):
size: int = ...
alpha: float = ...
beta: float = ...
k: float = ...
def __init__(self, size: int, alpha: float = ..., beta: float = ..., k: float = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
_shape_t = Union[int, List[int], Size]
class LayerNorm(Module):
normalized_shape: _shape_t = ...
eps: float = ...
elementwise_affine: bool = ...
weight: Parameter = ...
bias: Parameter = ...
def __init__(self, normalized_shape: _shape_t, eps: float = ..., elementwise_affine: bool = ...) -> None: ...
def reset_parameters(self) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class GroupNorm(Module):
num_groups: int = ...
num_channels: int = ...
eps: float = ...
affine: bool = ...
weight: Parameter = ...
bias: Parameter = ...
def __init__(self, num_groups: int, num_channels: int, eps: float = ..., affine: bool = ...) -> None: ...
def reset_parameters(self) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .module import Module
from ... import Tensor
from ..common_types import _size_2_t, _size_4_t, _size_6_t
class _ConstantPadNd(Module):
value: float
def __init__(self, value: float) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class ConstantPad1d(_ConstantPadNd):
padding: _size_2_t = ...
def __init__(self, padding: _size_2_t, value: float) -> None: ...
class ConstantPad2d(_ConstantPadNd):
padding: _size_4_t = ...
def __init__(self, padding: _size_4_t, value: float) -> None: ...
class ConstantPad3d(_ConstantPadNd):
padding: _size_6_t = ...
def __init__(self, padding: _size_6_t, value: float) -> None: ...
class _ReflectionPadNd(Module):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
def extra_repr(self): ...
class ReflectionPad1d(_ReflectionPadNd):
padding: _size_2_t = ...
def __init__(self, padding: _size_2_t) -> None: ...
class ReflectionPad2d(_ReflectionPadNd):
padding: _size_4_t = ...
def __init__(self, padding: _size_4_t) -> None: ...
class _ReplicationPadNd(Module):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
def extra_repr(self): ...
class ReplicationPad1d(_ReplicationPadNd):
padding: _size_2_t = ...
def __init__(self, padding: _size_2_t) -> None: ...
class ReplicationPad2d(_ReplicationPadNd):
padding: _size_4_t = ...
def __init__(self, padding: _size_4_t) -> None: ...
class ReplicationPad3d(_ReplicationPadNd):
padding: _size_6_t = ...
def __init__(self, padding: _size_6_t) -> None: ...
class ZeroPad2d(ConstantPad2d):
padding: _size_4_t = ...
def __init__(self, padding: _size_4_t) -> None: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .module import Module
from ... import Tensor
class PixelShuffle(Module):
upscale_factor: int = ...
def __init__(self, upscale_factor: int) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .module import Module
from typing import Optional
from ... import Tensor, _size
from ..common_types import _size_any_t, _maybe_indices_t, _size_1_t, _size_2_t, _size_3_t, _ratio_3_t, _ratio_2_t
class _MaxPoolNd(Module):
return_indices: bool = ...
ceil_mode: bool = ...
def __init__(self, kernel_size: _size_any_t, stride: Optional[_size_any_t] = ..., padding: _size_any_t = ...,
dilation: _size_any_t = ..., return_indices: bool = ..., ceil_mode: bool = ...) -> None: ...
class MaxPool1d(_MaxPoolNd):
kernel_size: _size_1_t = ...
stride: _size_1_t = ...
padding: _size_1_t = ...
dilation: _size_1_t = ...
def forward(self, input: Tensor) -> _maybe_indices_t: ... # type: ignore
def __call__(self, input: Tensor) -> _maybe_indices_t: ... # type: ignore
class MaxPool2d(_MaxPoolNd):
kernel_size: _size_2_t = ...
stride: _size_2_t = ...
padding: _size_2_t = ...
dilation: _size_2_t = ...
def forward(self, input: Tensor) -> _maybe_indices_t: ... # type: ignore
def __call__(self, input: Tensor) -> _maybe_indices_t: ... # type: ignore
class MaxPool3d(_MaxPoolNd):
kernel_size: _size_3_t = ...
stride: _size_3_t = ...
padding: _size_3_t = ...
dilation: _size_3_t = ...
def forward(self, input: Tensor) -> _maybe_indices_t: ... # type: ignore
def __call__(self, input: Tensor) -> _maybe_indices_t: ... # type: ignore
class _MaxUnpoolNd(Module):
...
class MaxUnpool1d(_MaxUnpoolNd):
kernel_size: _size_1_t = ...
stride: _size_1_t = ...
padding: _size_1_t = ...
def __init__(self, kernel_size: _size_1_t, stride: Optional[_size_1_t] = ..., padding: _size_1_t = ...) -> None: ...
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[_size] = ...) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, indices: Tensor, output_size: Optional[_size] = ...) -> Tensor: ... # type: ignore
class MaxUnpool2d(_MaxUnpoolNd):
kernel_size: _size_2_t = ...
stride: _size_2_t = ...
padding: _size_2_t = ...
def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = ..., padding: _size_2_t = ...) -> None: ...
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[_size] = ...) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, indices: Tensor, output_size: Optional[_size] = ...) -> Tensor: ... # type: ignore
class MaxUnpool3d(_MaxUnpoolNd):
kernel_size: _size_3_t = ...
stride: _size_3_t = ...
padding: _size_3_t = ...
def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = ..., padding: _size_3_t = ...) -> None: ...
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[_size] = ...) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, indices: Tensor, output_size: Optional[_size] = ...) -> Tensor: ... # type: ignore
class _AvgPoolNd(Module):
...
class AvgPool1d(_AvgPoolNd):
kernel_size: _size_1_t = ...
stride: _size_1_t = ...
padding: _size_1_t = ...
ceil_mode: bool = ...
count_include_pad: bool = ...
def __init__(self, kernel_size: _size_1_t, stride: Optional[_size_1_t] = ..., padding: _size_1_t = ...,
ceil_mode: bool = ..., count_include_pad: bool = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class AvgPool2d(_AvgPoolNd):
kernel_size: _size_2_t = ...
stride: _size_2_t = ...
padding: _size_2_t = ...
ceil_mode: bool = ...
count_include_pad: bool = ...
def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = ..., padding: _size_2_t = ...,
ceil_mode: bool = ..., count_include_pad: bool = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class AvgPool3d(_AvgPoolNd):
kernel_size: _size_3_t = ...
stride: _size_3_t = ...
padding: _size_3_t = ...
ceil_mode: bool = ...
count_include_pad: bool = ...
def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = ..., padding: _size_3_t = ...,
ceil_mode: bool = ..., count_include_pad: bool = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class FractionalMaxPool2d(Module):
kernel_size: _size_2_t = ...
return_indices: bool = ...
output_size: _size_2_t = ...
output_ratio: _ratio_2_t = ...
def __init__(self, kernel_size: _size_2_t, output_size: Optional[_size_2_t] = ...,
output_ratio: Optional[_ratio_2_t] = ..., return_indices: bool = ...) -> None: ...
def forward(self, input: Tensor) -> _maybe_indices_t: ... # type: ignore
def __call__(self, input: Tensor) -> _maybe_indices_t: ... # type: ignore
class FractionalMaxPool3d(Module):
kernel_size: _size_3_t = ...
return_indices: bool = ...
output_size: _size_3_t = ...
output_ratio: _ratio_3_t = ...
def __init__(self, kernel_size: _size_3_t, output_size: Optional[_size_3_t] = ...,
output_ratio: Optional[_ratio_3_t] = ..., return_indices: bool = ...) -> None: ...
def forward(self, input: Tensor) -> _maybe_indices_t: ... # type: ignore
def __call__(self, input: Tensor) -> _maybe_indices_t: ... # type: ignore
class _LPPoolNd(Module):
norm_type: float = ...
ceil_mode: bool = ...
def __init__(self, norm_type: float, kernel_size: _size_any_t, stride: Optional[_size_any_t] = ...,
ceil_mode: bool = ...) -> None: ...
class LPPool1d(_LPPoolNd):
kernel_size: _size_1_t = ...
stride: _size_1_t = ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class LPPool2d(_LPPoolNd):
kernel_size: _size_2_t = ...
stride: _size_2_t = ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class _AdaptiveMaxPoolNd(Module):
return_indices: bool = ...
def __init__(self, output_size: _size_any_t, return_indices: bool = ...) -> None: ...
class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd):
output_size: _size_1_t = ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
output_size: _size_2_t = ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd):
output_size: _size_3_t = ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class _AdaptiveAvgPoolNd(Module):
def __init__(self, output_size: _size_any_t) -> None: ...
class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd):
output_size: _size_1_t = ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
output_size: _size_2_t = ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd):
output_size: _size_3_t = ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from ..parameter import Parameter
from .module import Module
from typing import Any, Optional, Tuple, List
from ... import Tensor
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = ...) -> Tensor: ...
class RNNBase(Module):
mode: str = ...
input_size: int = ...
hidden_size: int = ...
num_layers: int = ...
bias: bool = ...
batch_first: bool = ...
dropout: float = ...
bidirectional: bool = ...
def __init__(self, mode: str, input_size: int, hidden_size: int, num_layers: int = ..., bias: bool = ...,
batch_first: bool = ..., dropout: float = ..., bidirectional: bool = ...) -> None: ...
def flatten_parameters(self) -> List[Parameter]: ...
def reset_parameters(self) -> None: ...
def get_flat_weights(self): ...
def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None: ...
def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]: ...
def check_hidden_size(self, hx: Tensor, expected_hidden_size: Tuple[int, int, int], msg: str = ...) -> None: ...
def check_forward_args(self, input: Any, hidden: Any, batch_sizes: Optional[Tensor]) -> None: ...
def permute_hidden(self, hx: Any, permutation: Any): ...
def forward(self, input: Tensor, hx: Optional[Any] = ...) -> Any: ... # type: ignore
def __call__(self, input: Tensor, hx: Optional[Any] = ...) -> Any: ... # type: ignore
@property
def all_weights(self) -> List[Parameter]: ...
class RNN(RNNBase):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = ..., bias: bool = ...,
batch_first: bool = ..., dropout: float = ..., bidirectional: bool = ...,
nonlinearity: str = ...) -> None: ...
def forward(self, input: Tensor, hx: Optional[Tensor] = ...) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, hx: Optional[Tensor] = ...) -> Tensor: ... # type: ignore
class LSTM(RNNBase):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = ..., bias: bool = ...,
batch_first: bool = ..., dropout: float = ..., bidirectional: bool = ...,
nonlinearity: str = ...) -> None: ...
def check_forward_args(self, input: Tensor, hidden: Tuple[Tensor, Tensor],
batch_sizes: Optional[Tensor]) -> None: ...
def permute_hidden(self, hx: Tuple[Tensor, Tensor], permutation: Optional[Tensor]) -> Tuple[Tensor, Tensor]: ...
def forward_impl(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]], batch_sizes: Optional[Tensor],
max_batch_size: int, sorted_indices: Optional[Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: ...
def forward_tensor(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = ...) -> Tuple[
Tensor, Tuple[Tensor, Tensor]]: ...
def forward_packed(self, input: Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]],
hx: Optional[Tuple[Tensor, Tensor]] = ...) -> Tuple[
Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]], Tuple[Tensor, Tensor]]: ...
def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = ...) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: ... # type: ignore
def __call__(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = ...) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: ... # type: ignore
class GRU(RNNBase):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = ..., bias: bool = ...,
batch_first: bool = ..., dropout: float = ..., bidirectional: bool = ...,
nonlinearity: str = ...) -> None: ...
def forward(self, input: Tensor, hx: Optional[Tensor] = ...) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, hx: Optional[Tensor] = ...) -> Tensor: ... # type: ignore
class RNNCellBase(Module):
input_size: int = ...
hidden_size: int = ...
bias: bool = ...
weight_ih: Parameter = ...
weight_hh: Parameter = ...
bias_ih: Parameter = ...
bias_hh: Parameter = ...
def __init__(self, input_size: int, hidden_size: int, bias: bool, num_chunks: int) -> None: ...
def check_forward_input(self, input: Tensor) -> None: ...
def check_forward_hidden(self, input: Tensor, hx: Tensor, hidden_label: str = ...) -> None: ...
def reset_parameters(self) -> None: ...
class RNNCell(RNNCellBase):
nonlinearity: str = ...
def __init__(self, input_size: int, hidden_size: int, bias: bool = ..., nonlinearity: str = ...) -> None: ...
def forward(self, input: Tensor, hx: Optional[Tensor] = ...) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, hx: Optional[Tensor] = ...) -> Tensor: ... # type: ignore
class LSTMCell(RNNCellBase):
def __init__(self, input_size: int, hidden_size: int, bias: bool = ...) -> None: ...
def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = ...) -> Tuple[Tensor, Tensor]: ... # type: ignore
def __call__(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = ...) -> Tuple[Tensor, Tensor]: ... # type: ignore
class GRUCell(RNNCellBase):
def __init__(self, input_size: int, hidden_size: int, bias: bool = ...) -> None: ...
def forward(self, input: Tensor, hx: Optional[Tensor] = ...) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, hx: Optional[Tensor] = ...) -> Tensor: ... # type: ignore
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment