forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
__init__.pyi.in
106 lines (83 loc) · 3.48 KB
/
__init__.pyi.in
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# ${generated_comment}
from typing import List, Tuple, Optional, Union, Any, ContextManager, Callable, overload
from torch._six import inf
import builtins
# These identifiers are reexported from other modules. These modules
# are not mypy-clean yet, so in order to use this stub file usefully
# from mypy you will need to specify --follow-imports=silent.
# Not all is lost: these imports still enable IDEs like PyCharm to offer
# autocomplete.
#
# Note: Why does the syntax here look so strange? Import visibility
# rules in stubs are different from normal Python files! You must use
# 'from ... import ... as ...' syntax to cause an identifier to be
# exposed (or use a wildcard); regular syntax is not exposed.
from .random import set_rng_state as set_rng_state, get_rng_state as get_rng_state, \
manual_seed as manual_seed, initial_seed as initial_seed
from ._tensor_str import set_printoptions as set_printoptions
from .functional import *
from .serialization import save as save, load as load
from .autograd import no_grad as no_grad, enable_grad as enable_grad, \
set_grad_enabled as set_grad_enabled
from . import cuda as cuda
from . import optim as optim
class dtype: ...
class layout: ...
strided : layout = ...
# See https://github.com/python/mypy/issues/4146 for why these workarounds
# is necessary
_int = builtins.int
_float = builtins.float
class device:
type: str
index: _int
@overload
def __init__(self, device: Union[_int, str]) -> None: ...
@overload
def __init__(self, type: str, index: _int) -> None: ...
class Generator: ...
class Size(tuple): ...
class Storage: ...
# See https://github.com/python/mypy/issues/4146 for why these workarounds
# is necessary
_dtype = dtype
_device = device
_size = Union[Size, List[_int], Tuple[_int, ...]]
# Meta-type for "numeric" things; matches our docs
Number = Union[builtins.int, builtins.float]
# TODO: One downside of doing it this way, is direct use of
# torch.tensor.Tensor doesn't get type annotations. Nobody
# should really do that, so maybe this is not so bad.
class Tensor:
dtype: _dtype = ...
shape: Size = ...
device: _device = ...
requires_grad: bool = ...
grad: Optional[Tensor] = ...
${tensor_method_hints}
# Manually defined methods from torch/tensor.py
def backward(self, gradient: Optional[Tensor]=None, retain_graph: Optional[bool]=None, create_graph: bool=False) -> None: ...
def register_hook(self, hook: Callable) -> Any: ...
def retain_grad(self) -> None: ...
def is_pinned(self) -> bool: ...
def is_shared(self) -> bool: ...
def share_memory_(self) -> None: ...
# TODO: fill in the types for these, or otherwise figure out some
# way to not have to write these out again...
def norm(self, p="fro", dim=None, keepdim=False): ...
def stft(self, n_fft, hop_length=None, win_length=None, window=None,
center=True, pad_mode='reflect', normalized=False, onesided=True): ...
def split(self, split_size, dim=0): ...
def unique(self, sorted=True, return_inverse=False, dim=None): ...
def lu(self, pivot=True, get_infos=False): ...
${function_hints}
${legacy_class_hints}
${dtype_class_hints}
# Pure Python functions defined in torch/__init__.py
def typename(obj) -> str: ...
def is_tensor(obj) -> bool: ...
def is_storage(obj) -> bool: ...
def set_default_tensor_type(type) -> None: ... # ick, what a bad legacy API
def set_default_dtype(d : _dtype) -> None: ...
def manager_path() -> str: ...
def compiled_with_cxx11_abi() -> bool: ...