Commit 0cd65242 authored by Mandeep Singh Baines's avatar Mandeep Singh Baines
Browse files

Initial commit

parents
This diff is collapsed.
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .module import Module as Module
from .activation import CELU as CELU, ELU as ELU, GLU as GLU, GELU as GELU, Hardshrink as Hardshrink, \
Hardtanh as Hardtanh, LeakyReLU as LeakyReLU, LogSigmoid as LogSigmoid, LogSoftmax as LogSoftmax, PReLU as PReLU, \
RReLU as RReLU, ReLU as ReLU, ReLU6 as ReLU6, SELU as SELU, Sigmoid as Sigmoid, Softmax as Softmax, \
Softmax2d as Softmax2d, Softmin as Softmin, Softplus as Softplus, Softshrink as Softshrink, Softsign as Softsign, \
Tanh as Tanh, Tanhshrink as Tanhshrink, Threshold as Threshold
from .adaptive import AdaptiveLogSoftmaxWithLoss as AdaptiveLogSoftmaxWithLoss
from .batchnorm import BatchNorm1d as BatchNorm1d, BatchNorm2d as BatchNorm2d, BatchNorm3d as BatchNorm3d, \
SyncBatchNorm as SyncBatchNorm
from .container import Container as Container, ModuleDict as ModuleDict, ModuleList as ModuleList, \
ParameterDict as ParameterDict, ParameterList as ParameterList, Sequential as Sequential
from .conv import Conv1d as Conv1d, Conv2d as Conv2d, Conv3d as Conv3d, ConvTranspose1d as ConvTranspose1d, \
ConvTranspose2d as ConvTranspose2d, ConvTranspose3d as ConvTranspose3d
from .distance import CosineSimilarity as CosineSimilarity, PairwiseDistance as PairwiseDistance
from .dropout import AlphaDropout as AlphaDropout, Dropout as Dropout, Dropout2d as Dropout2d, Dropout3d as Dropout3d, \
FeatureAlphaDropout as FeatureAlphaDropout
from .fold import Fold as Fold, Unfold as Unfold
from .instancenorm import InstanceNorm1d as InstanceNorm1d, InstanceNorm2d as InstanceNorm2d, \
InstanceNorm3d as InstanceNorm3d
from .linear import Bilinear as Bilinear, Identity as Identity, Linear as Linear
from .loss import BCELoss as BCELoss, BCEWithLogitsLoss as BCEWithLogitsLoss, CTCLoss as CTCLoss, \
CosineEmbeddingLoss as CosineEmbeddingLoss, CrossEntropyLoss as CrossEntropyLoss, \
HingeEmbeddingLoss as HingeEmbeddingLoss, KLDivLoss as KLDivLoss, L1Loss as L1Loss, MSELoss as MSELoss, \
MarginRankingLoss as MarginRankingLoss, MultiLabelMarginLoss as MultiLabelMarginLoss, \
MultiLabelSoftMarginLoss as MultiLabelSoftMarginLoss, MultiMarginLoss as MultiMarginLoss, NLLLoss as NLLLoss, \
NLLLoss2d as NLLLoss2d, PoissonNLLLoss as PoissonNLLLoss, SmoothL1Loss as SmoothL1Loss, \
SoftMarginLoss as SoftMarginLoss, TripletMarginLoss as TripletMarginLoss
from .module import Module as Module
from .normalization import CrossMapLRN2d as CrossMapLRN2d, GroupNorm as GroupNorm, LayerNorm as LayerNorm, \
LocalResponseNorm as LocalResponseNorm
from .padding import ConstantPad1d as ConstantPad1d, ConstantPad2d as ConstantPad2d, ConstantPad3d as ConstantPad3d, \
ReflectionPad1d as ReflectionPad1d, ReflectionPad2d as ReflectionPad2d, ReplicationPad1d as ReplicationPad1d, \
ReplicationPad2d as ReplicationPad2d, ReplicationPad3d as ReplicationPad3d, ZeroPad2d as ZeroPad2d
from .pixelshuffle import PixelShuffle as PixelShuffle
from .pooling import AdaptiveAvgPool1d as AdaptiveAvgPool1d, AdaptiveAvgPool2d as AdaptiveAvgPool2d, \
AdaptiveAvgPool3d as AdaptiveAvgPool3d, AdaptiveMaxPool1d as AdaptiveMaxPool1d, \
AdaptiveMaxPool2d as AdaptiveMaxPool2d, AdaptiveMaxPool3d as AdaptiveMaxPool3d, AvgPool1d as AvgPool1d, \
AvgPool2d as AvgPool2d, AvgPool3d as AvgPool3d, FractionalMaxPool2d as FractionalMaxPool2d, \
FractionalMaxPool3d as FractionalMaxPool3d, LPPool1d as LPPool1d, LPPool2d as LPPool2d, MaxPool1d as MaxPool1d, \
MaxPool2d as MaxPool2d, MaxPool3d as MaxPool3d, MaxUnpool1d as MaxUnpool1d, MaxUnpool2d as MaxUnpool2d, \
MaxUnpool3d as MaxUnpool3d
from .rnn import GRU as GRU, GRUCell as GRUCell, LSTM as LSTM, LSTMCell as LSTMCell, RNN as RNN, RNNBase as RNNBase, \
RNNCell as RNNCell, RNNCellBase as RNNCellBase
from .sparse import Embedding as Embedding, EmbeddingBag as EmbeddingBag
from .upsampling import Upsample as Upsample, UpsamplingBilinear2d as UpsamplingBilinear2d, \
UpsamplingNearest2d as UpsamplingNearest2d
#MODIFIED BY TORCHGPIPE
from .modules.flatten import Flatten as Flatten
#END
This diff is collapsed.
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from ... import Tensor
from .module import Module
from .linear import Linear
from collections import namedtuple
from typing import List, Sequence
from .container import ModuleList
_ASMoutput = namedtuple('ASMoutput', ['output', 'loss'])
class AdaptiveLogSoftmaxWithLoss(Module):
in_features: int = ...
n_classes: int = ...
cutoffs: List[int] = ...
div_value: float = ...
head_bias: bool = ...
head: Linear = ...
tail: ModuleList = ...
def __init__(self, in_features: int, n_classes: int, cutoffs: Sequence[int], div_value: float = ...,
head_bias: bool = ...) -> None: ...
def reset_parameters(self) -> None: ...
def forward(self, input: Tensor, target: Tensor) -> _ASMoutput: ... # type: ignore
def __call__(self, input: Tensor, target: Tensor) -> _ASMoutput: ... # type: ignore
def log_prob(self, input: Tensor) -> List[float]: ...
def predict(self, input: Tensor) -> Tensor: ...
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Any
from .module import Module
class Flatten(Module):
__constants__: Any = ...
start_dim: Any = ...
end_dim: Any = ...
def __init__(self, start_dim: int = ..., end_dim: int = ...) -> None: ...
def forward(self, input: Any): ... # type: ignore
def __call__(self, input: Any): ... # type: ignore
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment