"vscode:/vscode.git/clone" did not exist on "48ea88ab9f3f8eb1e799d1d711da0181d95e574d"
Commit 0cd65242 authored by Mandeep Singh Baines's avatar Mandeep Singh Baines
Browse files

Initial commit

parents
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .module import Module
from typing import Optional
from .. import Parameter
from ... import Tensor
class Embedding(Module):
num_embeddings: int = ...
embedding_dim: int = ...
padding_idx: int = ...
max_norm: float = ...
norm_type: float = ...
scale_grad_by_freq: bool = ...
weight: Parameter = ...
sparse: bool = ...
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = ...,
max_norm: Optional[float] = ..., norm_type: float = ..., scale_grad_by_freq: bool = ...,
sparse: bool = ..., _weight: Optional[Tensor] = ...) -> None: ...
def reset_parameters(self) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
@classmethod
def from_pretrained(cls, embeddings: Tensor, freeze: bool = ..., padding_idx: Optional[int] = ...,
max_norm: Optional[float] = ..., norm_type: float = ..., scale_grad_by_freq: bool = ...,
sparse: bool = ...): ...
class EmbeddingBag(Module):
num_embeddings: int = ...
embedding_dim: int = ...
max_norm: float = ...
norm_type: float = ...
scale_grad_by_freq: bool = ...
weight: Parameter = ...
mode: str = ...
sparse: bool = ...
def __init__(self, num_embeddings: int, embedding_dim: int, max_norm: Optional[float] = ..., norm_type: float = ...,
scale_grad_by_freq: bool = ..., mode: str = ..., sparse: bool = ...,
_weight: Optional[Tensor] = ...) -> None: ...
def reset_parameters(self) -> None: ...
def forward(self, input: Tensor, offsets: Optional[Tensor] = ...) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor, offsets: Optional[Tensor] = ...) -> Tensor: ... # type: ignore
@classmethod
def from_pretrained(cls, embeddings: Tensor, freeze: bool = ..., max_norm: Optional[float] = ...,
norm_type: float = ..., scale_grad_by_freq: bool = ..., mode: str = ...,
sparse: bool = ...): ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from ... import Tensor
from .module import Module
from typing import Optional
from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t
class Upsample(Module):
name: str = ...
size: _size_any_t = ...
scale_factor: _ratio_any_t = ...
mode: str = ...
align_corners: bool = ...
def __init__(self, size: Optional[_size_any_t] = ..., scale_factor: Optional[_ratio_any_t] = ..., mode: str = ...,
align_corners: Optional[bool] = ...) -> None: ...
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore
class UpsamplingNearest2d(Upsample):
def __init__(self, size: Optional[_size_2_t] = ..., scale_factor: Optional[_ratio_2_t] = ...) -> None: ...
class UpsamplingBilinear2d(Upsample):
def __init__(self, size: Optional[_size_2_t] = ..., scale_factor: Optional[_ratio_2_t] = ...) -> None: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .data_parallel import DataParallel as DataParallel, data_parallel as data_parallel
from .distributed import DistributedDataParallel as DistributedDataParallel
from .parallel_apply import parallel_apply as parallel_apply
from .replicate import replicate as replicate
from .scatter_gather import gather as gather, scatter as scatter
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Union, Sequence
from ... import device
_device_t = Union[int, device]
_devices_t = Sequence[_device_t]
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Any, Optional, TypeVar
from .common_types import _devices_t, _device_t
from ..modules import Module
from ... import device, Tensor
T_co = TypeVar('T_co', covariant=True)
class DataParallel(Module[T_co]):
module: Module = ...
device_ids: _devices_t = ...
dim: int = ...
output_device: _device_t = ...
src_device_obj: device = ...
def __init__(self, module: Module[T_co], device_ids: Optional[_devices_t] = ..., output_device: Optional[_device_t] = ...,
dim: int = ...) -> None: ...
def forward(self, *inputs: Any, **kwargs: Any) -> T_co: ...
def __call__(self, *inputs: Any, **kwargs: Any) -> T_co: ...
def data_parallel(module: Module, inputs: Any, device_ids: Optional[_devices_t] = ...,
output_device: Optional[_device_t] = ..., dim: int = ...,
module_kwargs: Optional[Any] = ...) -> Tensor: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from ..modules import Module
from typing import Any, Optional, TypeVar
from .common_types import _devices_t, _device_t
T_co = TypeVar('T_co', covariant=True)
def get_rank(group: Any): ...
class DistributedDataParallel(Module[T_co]):
process_group: Any = ...
dim: int = ...
module: Module[T_co] = ...
device_ids: _devices_t = ...
output_device: _device_t = ...
broadcast_buffers: bool = ...
check_reduction: bool = ...
broadcast_bucket_size: float = ...
bucket_bytes_cap: float = ...
# TODO type process_group once `distributed` module is stubbed
def __init__(self, module: Module[T_co], device_ids: Optional[_devices_t] = ...,
output_device: Optional[_device_t] = ..., dim: int = ...,
broadcast_buffers: bool = ..., process_group: Optional[Any] = ..., bucket_cap_mb: float = ...,
check_reduction: bool = ...) -> None: ...
def forward(self, *inputs: Any, **kwargs: Any) -> T_co: ...
def __call__(self, *inputs: Any, **kwargs: Any) -> T_co: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Any, Optional, Sequence, List
from .common_types import _devices_t
from ..modules import Module
def parallel_apply(modules: Sequence[Module], inputs: Sequence[Any], kwargs_tup: Optional[Any] = ...,
devices: Optional[_devices_t] = ...) -> List[Any]: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import List, Union, Sequence, TypeVar
from ..modules import Module
from .common_types import _devices_t
T = TypeVar('T')
def replicate(network: Module[T], devices: Union[_devices_t, Sequence[_devices_t]], detach: bool = ...) -> List[
Module[T]]: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Any, Dict, List, Tuple, overload, TypeVar
from ... import Tensor
from .common_types import _device_t, _devices_t
T = TypeVar('T', Dict, List, Tuple)
# For some reason, 'scatter' returns a tuple when given a single Tensor input but a list otherwise.
@overload
def scatter(inputs: Tensor, target_gpus: _devices_t, dim: int = ...) -> Tuple[Tensor, ...]: ...
# flake8 will raise a spurious error here since `torch/__init__.pyi` has not been generated yet
# so mypy will interpret `Tensor` as `Any` since it is an import from what it belives to be an
# untyped module. Thus to mypy, the first definition of `scatter` looks strictly more general
# than this overload.
@overload
def scatter(inputs: T, target_gpus: _devices_t, dim: int = ...) -> List[T]: ... # type: ignore
# TODO More precise types here.
def scatter_kwargs(inputs: Any, kwargs: Any, target_gpus: _devices_t, dim: int = ...) -> Any: ...
def gather(outputs: Any, target_device: _device_t, dim: int = ...) -> Any: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .. import Tensor
import builtins
class Parameter(Tensor):
def __init__(self, data: Tensor, requires_grad: builtins.bool): ...
...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .sgd import SGD as SGD
from .adam import Adam as Adam
from . import lr_scheduler as lr_scheduler
from .optimizer import Optimizer as Optimizer
#MODIFIED BY TORCHGPIPE
from .rmsprop import RMSprop as RMSprop
#END
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Tuple
from .optimizer import _params_t, Optimizer
class Adam(Optimizer):
def __init__(self, params: _params_t, lr: float=..., betas: Tuple[float, float]=..., eps: float=..., weight_decay: float=..., amsgrad: bool = ...) -> None: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Iterable, Any, Optional, Callable
from .optimizer import Optimizer
class _LRScheduler:
def __init__(self, optimizer: Optimizer, last_epoch: int=...) -> None: ...
def state_dict(self) -> dict: ...
def load_state_dict(self, state_dict: dict) -> None: ...
#MODIFIED BY TORCHGPIPE
from typing import List
def get_lr(self) -> List[float]: ...
def step(self, epoch: Optional[int] = ...) -> None: ...
#END
class LambdaLR(_LRScheduler):
#MODIFIED BY TORCHGPIPE
from typing import Callable, List, Union
def __init__(self, optimizer: Optimizer, lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], last_epoch: int=...) -> None: ...
#END
class StepLR(_LRScheduler):
def __init__(self, optimizer: Optimizer, step_size: int, gamma: float=..., last_epoch: int=...) -> None:...
class MultiStepLR(_LRScheduler):
def __init__(self, optimizer: Optimizer, milestones: Iterable[int], gamma: float=..., last_epoch: int=...) -> None: ...
class ExponentialLR(_LRScheduler):
def __init__(self, optimizer: Optimizer, gamma: float, last_epoch: int=...) -> None: ...
class CosineAnnealingLR(_LRScheduler):
def __init__(self, optimizer: Optimizer, T_max: int, eta_min: float, last_epoch: int=...) -> None: ...
class ReduceLROnPlateau:
in_cooldown: bool
def __init__(self, optimizer: Optimizer, mode: str=..., factor: float=..., patience: int=..., verbose: bool=..., threshold: float=..., threshold_mode: str=..., cooldown: int=..., min_lr: float=..., eps: float=...) -> None: ...
def step(self, metrics: Any, epoch: Optional[int]=...) -> None: ...
def state_dict(self) -> dict: ...
def load_state_dict(self, state_dict: dict): ...
class CyclicLR(_LRScheduler):
def __init__(self, optimizer: Optimizer, base_lr: float=..., max_lr: float=..., step_size_up: int=..., step_size_down: int=..., mode: str=..., gamma: float=..., scale_fn: Optional[Callable[[float], float]]=..., scale_mode: str=..., cycle_momentum: bool=..., base_momentum: float=..., max_momentum: float=..., last_epoch: int=...) -> None: ...
class CosineAnnealingWarmRestarts(_LRScheduler):
def __init__(self, optimizer: Optimizer, T_0: int=..., T_mult: int=..., eta_min: int=..., last_epoch: int=...) -> None: ...
def step(self, epoch: Optional[int] = ...) -> None: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Any, List, Iterable, Union, Callable, Optional
from .. import Tensor
_params_t = Union[Iterable[Tensor], Iterable[dict]]
class Optimizer(object):
param_groups: List[dict]
def __init__(self, params: _params_t, defaults: dict) -> None: ...
def state_dict(self) -> dict: ...
def load_state_dict(self, state_dict: dict) -> None: ...
def zero_grad(self) -> None: ...
def step(self, closure: Optional[Callable[[], float]]=...) -> Optional[float]: ...
def add_param_group(self, param_group: dict) -> None: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .optimizer import _params_t, Optimizer
class SGD(Optimizer):
def __init__(self, params: _params_t, lr: float, momentum: float=..., dampening: float=..., weight_decay:float=..., nesterov:bool=...) -> None: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#MODIFIED BY TORCHGPIPE
from contextlib import contextmanager
from typing import Any, Generator, Iterable, Union
from torch import ByteTensor, device
def set_rng_state(new_state: ByteTensor) -> None: ...
def get_rng_state() -> ByteTensor: ...
def manual_seed(seed: int) -> Any: ...
def seed() -> int: ...
def initial_seed() -> int: ...
@contextmanager
def fork_rng(devices: Iterable[Union[device, str, int]] = ..., enabled: bool = ...) -> Generator[None, None, None]: ...
#END
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .sampler import Sampler as Sampler, SequentialSampler as SequentialSampler, RandomSampler as RandomSampler, \
SubsetRandomSampler as SubsetRandomSampler, WeightedRandomSampler as WeightedRandomSampler, BatchSampler as BatchSampler
from .distributed import DistributedSampler as DistributedSampler
from .dataset import Dataset as Dataset, TensorDataset as TensorDataset, ConcatDataset as ConcatDataset, \
Subset as Subset, random_split as random_split
from .dataloader import DataLoader as DataLoader
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Any, Callable, TypeVar, Generic, overload, Sequence, List, Optional
from . import Dataset, Sampler
T_co = TypeVar('T_co', covariant=True)
T = TypeVar('T')
_worker_init_fn_t = Callable[[int], None]
# Ideally we would parameterize `DataLoader` by the return type of `collate_fn`, but there is currently no way to have that
# type parameter set to a default value if the user doesn't pass in a custom 'collate_fn'.
# See https://github.com/python/mypy/issues/3737.
_collate_fn_t = Callable[[List[T]], Any]
class DataLoader(Generic[T_co]):
dataset: Dataset[T_co]
batch_size: int
num_workers: int
pin_memory: bool
drop_last: bool
timeout: float
@overload
def __init__(self, dataset: Dataset[T_co], batch_size: int=..., shuffle: bool=...,
sampler: Optional[Sampler[int]]=..., num_workers: int=..., collate_fn: _collate_fn_t=...,
pin_memory: bool=..., drop_last: bool=..., timeout: float=...,
worker_init_fn: _worker_init_fn_t=...) -> None: ...
@overload
def __init__(self, dataset: Dataset[T_co], batch_sampler: Optional[Sampler[Sequence[int]]]=...,
num_workers: int=..., collate_fn: _collate_fn_t=..., pin_memory: bool=..., timeout: float=...,
worker_init_fn: _worker_init_fn_t=...) -> None: ...
def __len__(self) -> int: ...
# We quote '_BaseDataLoaderIter' since it isn't defined yet and the definition can't be moved up
# since '_BaseDataLoaderIter' references 'DataLoader'. In mypy 0.720 and newer a new semantic
# analyzer is used that obviates the need for this but we leave the quoting in to support older
# versions of mypy
def __iter__(self) -> '_BaseDataLoaderIter':...
class _BaseDataLoaderIter:
def __init__(self, loader: DataLoader) -> None:...
def __len__(self) -> int: ...
def __iter__(self) -> _BaseDataLoaderIter: ...
def __next__(self) -> Any: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import TypeVar, Generic, Iterable, Sequence, List, Tuple
from ... import Tensor
T_co = TypeVar('T_co', covariant=True)
T = TypeVar('T')
class Dataset(Generic[T_co]):
def __getitem__(self, index: int) -> T_co: ...
def __len__(self) -> int: ...
def __add__(self, other: T_co) -> 'ConcatDataset[T_co]': ...
class IterableDataset(Dataset[T_co]):
def __iter__(self) -> Iterable[T_co]: ...
class TensorDataset(Dataset[Tuple[Tensor, ...]]):
tensors: List[Tensor]
def __init__(self, *tensors: Tensor) -> None: ...
class ConcatDataset(Dataset[T_co]):
datasets: List[Dataset[T_co]]
cumulative_sizes: List[int]
def __init__(self, datasets: Iterable[Dataset]) -> None: ...
class Subset(Dataset[T_co]):
dataset: Dataset[T_co]
indices: Sequence[int]
def __init__(self, dataset: Dataset[T_co], indices: Sequence[int]) -> None: ...
def random_split(dataset: Dataset[T], lengths: Sequence[int]) -> List[Subset[T]]: ...
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import TypeVar, Optional, Iterator
from . import Sampler, Dataset
T_co = TypeVar('T_co', covariant=True)
class DistributedSampler(Sampler[T_co]):
def __init__(self, dataset: Dataset, num_replicas: Optional[int]=..., rank: Optional[int]=...): ...
def __iter__(self) -> Iterator[int]: ...
def __len__(self) -> int: ...
def set_epoch(self, epoch: int) -> None: ...
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment