forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
python_legacy_variable.cpp
167 lines (151 loc) · 4.92 KB
/
python_legacy_variable.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
#include <torch/csrc/autograd/python_legacy_variable.h>
#include <ATen/ATen.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/autograd/python_function.h>
#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/jit/frontend/tracer.h>
#include <torch/csrc/tensor/python_tensor.h>
using namespace at;
namespace torch {
namespace autograd {
static PyObject* THPVariable_pynew(
PyTypeObject* type,
PyObject* args,
PyObject* kwds) {
HANDLE_TH_ERRORS
THPObjectPtr _data;
PyObject* data = nullptr;
PyObject* grad_fn = nullptr;
char is_volatile = 0;
char requires_grad = 0;
const char* name = nullptr;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
constexpr const char* accepted_args[] = {
"data", "requires_grad", "volatile", "_grad_fn", "name", nullptr};
if (!PyArg_ParseTupleAndKeywords(
args,
kwds,
"|ObbOz",
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
const_cast<char**>(accepted_args),
&data,
&requires_grad,
&is_volatile,
&grad_fn,
&name))
return nullptr;
if (grad_fn == Py_None)
grad_fn = nullptr;
if (is_volatile) {
auto r = PyErr_WarnEx(
PyExc_UserWarning,
"volatile was removed and now has no effect. Use `with torch.no_grad():` "
"instead.",
1);
if (r != 0)
throw python_error();
}
TORCH_CHECK_VALUE(
!is_volatile || !requires_grad,
"Variable can't be volatile and require_grad at the same time!");
if (grad_fn && !THPFunction_Check(grad_fn)) {
throw TypeError(
"_grad_fn has to be a Function object or None, but got %s",
Py_TYPE(grad_fn)->tp_name);
}
Variable var;
if (!data || data == Py_None) {
// For legacy serialization code, create an empty tensor. This is also used
// by nn.Parameter() with no arguments.
auto dispatch_key = torch::tensors::get_default_dispatch_key();
auto scalar_type = torch::tensors::get_default_scalar_type();
auto options = TensorOptions(scalar_type)
.device(dispatchKeyToDeviceType(dispatch_key))
.layout(dispatchKeyToLayout(dispatch_key));
var = at::empty({0}, options);
} else if (THPVariable_Check(data)) {
var = THPVariable_Unpack(data).detach();
} else {
throw torch::TypeError(
"Variable data has to be a tensor, but got %s", Py_TYPE(data)->tp_name);
}
// We set `tensor`'s `allow_tensor_metadata_change` to true here, because we
// want to allow the following use case for backward compatibility:
//
// ```python
// var = Variable(torch.randn(2, 3))
// var.resize_(4, 5)
// ```
var.unsafeGetTensorImpl()->set_allow_tensor_metadata_change(true);
TORCH_CHECK(
!grad_fn,
"_grad_fn argument to legacy Variable constructor is no longer supported. "
"Instead, please invoke your _grad_fn to produce a variable with it as the "
"_grad_fn.");
var.set_requires_grad(requires_grad);
if (name) {
impl::set_name(var, name);
}
if (jit::tracer::isTracing() && data && data != Py_None &&
THPVariable_Check(data)) {
if (auto* v = jit::tracer::getValueTrace(THPVariable_Unpack(data))) {
jit::tracer::setValueTrace(var, v);
}
}
return THPVariable_Wrap(std::move(var));
END_HANDLE_TH_ERRORS
}
PyTypeObject THPLegacyVariableType = {
PyVarObject_HEAD_INIT(
nullptr,
0) "torch._C._LegacyVariableBase", /* tp_name */
0, /* tp_basicsize */
0, /* tp_itemsize */
nullptr, /* tp_dealloc */
0, /* tp_vectorcall_offset */
nullptr, /* tp_getattr */
nullptr, /* tp_setattr */
nullptr, /* tp_reserved */
nullptr, /* tp_repr */
nullptr, /* tp_as_number */
nullptr, /* tp_as_sequence */
nullptr, /* tp_as_mapping */
nullptr, /* tp_hash */
nullptr, /* tp_call */
nullptr, /* tp_str */
nullptr, /* tp_getattro */
nullptr, /* tp_setattro */
nullptr, /* tp_as_buffer */
// NOLINTNEXTLINE(misc-redundant-expression)
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
nullptr, /* tp_doc */
nullptr, /* tp_traverse */
nullptr, /* tp_clear */
nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
nullptr, /* tp_iter */
nullptr, /* tp_iternext */
nullptr, /* tp_methods */
nullptr, /* tp_members */
nullptr, /* tp_getset */
nullptr, /* tp_base */
nullptr, /* tp_dict */
nullptr, /* tp_descr_get */
nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
nullptr, /* tp_init */
nullptr, /* tp_alloc */
THPVariable_pynew /* tp_new */
};
void init_legacy_variable(PyObject* module) {
if (PyType_Ready(&THPLegacyVariableType) < 0) {
throw python_error();
}
auto obj = (PyObject*)&THPLegacyVariableType;
Py_INCREF(obj);
if (PyModule_AddObject(module, "_LegacyVariableBase", obj) < 0) {
throw python_error();
}
}
} // namespace autograd
} // namespace torch