| File: | build/../torch/csrc/autograd/python_variable.cpp |
| Warning: | line 604, column 22 PyObject ownership leak with reference count of 1 |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | #include <torch/csrc/autograd/python_variable.h> | |||
| 2 | ||||
| 3 | #include <torch/csrc/THP.h> | |||
| 4 | #include <torch/csrc/DynamicTypes.h> | |||
| 5 | #include <torch/csrc/Exceptions.h> | |||
| 6 | #include <torch/csrc/Device.h> | |||
| 7 | #include <torch/csrc/Size.h> | |||
| 8 | #include <torch/csrc/Types.h> | |||
| 9 | #include <torch/csrc/autograd/autograd.h> | |||
| 10 | #include <torch/csrc/autograd/edge.h> | |||
| 11 | #include <torch/csrc/autograd/python_cpp_function.h> | |||
| 12 | #include <torch/csrc/autograd/python_hook.h> | |||
| 13 | #include <torch/csrc/autograd/python_variable_indexing.h> | |||
| 14 | #include <torch/csrc/autograd/variable.h> | |||
| 15 | #include <torch/csrc/autograd/functions/accumulate_grad.h> | |||
| 16 | #include <torch/csrc/autograd/function.h> | |||
| 17 | #include <torch/csrc/autograd/generated/VariableType.h> | |||
| 18 | #include <torch/csrc/autograd/utils/error_messages.h> | |||
| 19 | #include <torch/csrc/autograd/utils/wrap_outputs.h> | |||
| 20 | #include <torch/csrc/tensor/python_tensor.h> | |||
| 21 | #include <pybind11/pybind11.h> | |||
| 22 | #include <torch/csrc/utils/cuda_lazy_init.h> | |||
| 23 | #include <torch/csrc/utils/pybind.h> | |||
| 24 | #include <torch/csrc/utils/pycfunction_helpers.h> | |||
| 25 | #include <torch/csrc/utils/python_strings.h> | |||
| 26 | #include <torch/csrc/utils/python_arg_parser.h> | |||
| 27 | #include <torch/csrc/utils/tensor_new.h> | |||
| 28 | #include <torch/csrc/jit/frontend/tracer.h> | |||
| 29 | #include <ATen/NamedTensorUtils.h> | |||
| 30 | #include <c10/util/DeadlockDetection.h> | |||
| 31 | #include <c10/util/irange.h> | |||
| 32 | ||||
| 33 | #include <torch/library.h> | |||
| 34 | #include <torch/csrc/jit/python/pybind_utils.h> | |||
| 35 | ||||
| 36 | ||||
| 37 | #include <ATen/ATen.h> | |||
| 38 | #include <pybind11/pybind11.h> | |||
| 39 | ||||
| 40 | #include <structmember.h> | |||
| 41 | #include <cstdint> | |||
| 42 | #include <iostream> | |||
| 43 | #include <memory> | |||
| 44 | #include <utility> | |||
| 45 | #include <vector> | |||
| 46 | ||||
| 47 | using namespace at; | |||
| 48 | using namespace torch; | |||
| 49 | using namespace torch::autograd; | |||
| 50 | ||||
| 51 | namespace { | |||
| 52 | ||||
| 53 | std::string concrete_name_fn(const c10::impl::PyInterpreter* self) { | |||
| 54 | std::stringstream ss; | |||
| 55 | ss << self; | |||
| 56 | return ss.str(); | |||
| 57 | } | |||
| 58 | ||||
| 59 | void concrete_decref_fn(const c10::impl::PyInterpreter* self, PyObject* pyobj) { | |||
| 60 | // Leak the pyobj if not initialized. This can happen if we are running | |||
| 61 | // exit handlers that are destructing tensors with residual (owned) | |||
| 62 | // PyObjects stored in them. | |||
| 63 | if (!Py_IsInitialized()) | |||
| 64 | return; | |||
| 65 | ||||
| 66 | pybind11::gil_scoped_acquire gil; | |||
| 67 | if (Py_REFCNT(pyobj)(((PyObject*)(pyobj))->ob_refcnt) > 1) { | |||
| 68 | // It's still alive! This can happen if a weak ref resurrected | |||
| 69 | // the PyObject without flipping ownership. At this point it is | |||
| 70 | // too late to rescue the object, so just stub out the PyObject | |||
| 71 | // so that it fails on subsequent uses. Don't raise an error here; | |||
| 72 | // you're probably in a destructor. | |||
| 73 | TORCH_WARN(::c10::Warning::warn( {__func__, "../torch/csrc/autograd/python_variable.cpp" , static_cast<uint32_t>(78)}, ::c10::str("Deallocating Tensor that still has live PyObject references. " "This probably happened because you took out a weak reference to " "Tensor and didn't call _fix_weakref() after dereferencing it. " "Subsequent accesses to this tensor via the PyObject will now fail." ), false) | |||
| 74 | "Deallocating Tensor that still has live PyObject references. "::c10::Warning::warn( {__func__, "../torch/csrc/autograd/python_variable.cpp" , static_cast<uint32_t>(78)}, ::c10::str("Deallocating Tensor that still has live PyObject references. " "This probably happened because you took out a weak reference to " "Tensor and didn't call _fix_weakref() after dereferencing it. " "Subsequent accesses to this tensor via the PyObject will now fail." ), false) | |||
| 75 | "This probably happened because you took out a weak reference to "::c10::Warning::warn( {__func__, "../torch/csrc/autograd/python_variable.cpp" , static_cast<uint32_t>(78)}, ::c10::str("Deallocating Tensor that still has live PyObject references. " "This probably happened because you took out a weak reference to " "Tensor and didn't call _fix_weakref() after dereferencing it. " "Subsequent accesses to this tensor via the PyObject will now fail." ), false) | |||
| 76 | "Tensor and didn't call _fix_weakref() after dereferencing it. "::c10::Warning::warn( {__func__, "../torch/csrc/autograd/python_variable.cpp" , static_cast<uint32_t>(78)}, ::c10::str("Deallocating Tensor that still has live PyObject references. " "This probably happened because you took out a weak reference to " "Tensor and didn't call _fix_weakref() after dereferencing it. " "Subsequent accesses to this tensor via the PyObject will now fail." ), false) | |||
| 77 | "Subsequent accesses to this tensor via the PyObject will now fail."::c10::Warning::warn( {__func__, "../torch/csrc/autograd/python_variable.cpp" , static_cast<uint32_t>(78)}, ::c10::str("Deallocating Tensor that still has live PyObject references. " "This probably happened because you took out a weak reference to " "Tensor and didn't call _fix_weakref() after dereferencing it. " "Subsequent accesses to this tensor via the PyObject will now fail." ), false) | |||
| 78 | )::c10::Warning::warn( {__func__, "../torch/csrc/autograd/python_variable.cpp" , static_cast<uint32_t>(78)}, ::c10::str("Deallocating Tensor that still has live PyObject references. " "This probably happened because you took out a weak reference to " "Tensor and didn't call _fix_weakref() after dereferencing it. " "Subsequent accesses to this tensor via the PyObject will now fail." ), false); | |||
| 79 | ((THPVariable*)pyobj)->cdata = MaybeOwned<Variable>(); | |||
| 80 | } | |||
| 81 | Py_DECREF(pyobj)_Py_DECREF(((PyObject*)(pyobj))); | |||
| 82 | }; | |||
| 83 | ||||
| 84 | c10::intrusive_ptr<TensorImpl> concrete_detach_fn(const c10::impl::PyInterpreter*, const c10::TensorImpl* self); | |||
| 85 | void concrete_dispatch_fn(const c10::impl::PyInterpreter*, const c10::OperatorHandle& op, torch::jit::Stack* stack); | |||
| 86 | ||||
| 87 | class PyInterpreterHolder { | |||
| 88 | public: | |||
| 89 | PyInterpreterHolder() | |||
| 90 | : impl_(new c10::impl::PyInterpreter( | |||
| 91 | &concrete_name_fn, | |||
| 92 | &concrete_decref_fn, | |||
| 93 | &concrete_detach_fn, | |||
| 94 | &concrete_dispatch_fn)) {} | |||
| 95 | // NB: intentionally leaks the memory | |||
| 96 | ~PyInterpreterHolder() { | |||
| 97 | impl_->disarm(); | |||
| 98 | } | |||
| 99 | c10::impl::PyInterpreter* get() const noexcept { | |||
| 100 | return impl_; | |||
| 101 | } | |||
| 102 | ||||
| 103 | private: | |||
| 104 | c10::impl::PyInterpreter* impl_; | |||
| 105 | }; | |||
| 106 | PyInterpreterHolder self_interpreter; | |||
| 107 | ||||
| 108 | } // anonymous namespace | |||
| 109 | ||||
| 110 | namespace py = pybind11; | |||
| 111 | ||||
| 112 | // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) | |||
| 113 | PyObject *THPVariableClass = nullptr; | |||
| 114 | ||||
| 115 | // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) | |||
| 116 | PyObject *ParameterClass = nullptr; | |||
| 117 | ||||
| 118 | // clang-tidy gets confused by static const | |||
| 119 | // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) | |||
| 120 | static const char* VOLATILE_WARNING = | |||
| 121 | "volatile was removed and now has no effect. Use " | |||
| 122 | "`with torch.no_grad():` instead."; | |||
| 123 | ||||
| 124 | static bool check_has_torch_dispatch(PyObject *obj) { | |||
| 125 | PyTypeObject *tp = Py_TYPE(obj)(((PyObject*)(obj))->ob_type); | |||
| 126 | return ( | |||
| 127 | !THPVariable_CheckTypeExact(tp) && | |||
| 128 | // TODO: test if Python key is disabled | |||
| 129 | PyObject_FastGetAttrString(obj, "__torch_dispatch__").ptr() != nullptr | |||
| 130 | ); | |||
| 131 | } | |||
| 132 | ||||
| 133 | // Creates a new Python object for a Variable. The status parameter | |||
| 134 | // specifies what the interpreter tag status on the object is; for | |||
| 135 | // example, if you ran check_pyobj, the return optional of this object | |||
| 136 | // tells you if the tensor was already tagged or not so you can pass | |||
| 137 | // TAGGED_BY_US or MAYBE_UNINITIALIZED; in other cases, you know where | |||
| 138 | // var came from and can directly assert that it's DEFINITELY_UNINITIALIZED. | |||
| 139 | // It's ALWAYS safe (albeit slower) to call this with MAYBE_UNINITIALIZED. | |||
| 140 | static PyObject* THPVariable_NewWithVar( | |||
| 141 | PyTypeObject* type, | |||
| 142 | Variable _var, | |||
| 143 | c10::impl::PyInterpreterStatus status) { | |||
| 144 | PyObject* obj = type->tp_alloc(type, 0); | |||
| 145 | if (obj) { | |||
| 146 | auto v = (THPVariable*) obj; | |||
| 147 | // TODO: named constructor to avoid default initialization | |||
| 148 | new (&v->cdata) MaybeOwned<Variable>(); | |||
| 149 | v->cdata = MaybeOwned<Variable>::owned(std::move(_var)); | |||
| 150 | const auto& var = THPVariable_Unpack(v); | |||
| 151 | var.unsafeGetTensorImpl()->init_pyobj(self_interpreter.get(), obj, status); | |||
| 152 | if (check_has_torch_dispatch(obj)) { | |||
| 153 | var.unsafeGetTensorImpl()->set_python_dispatch(true); | |||
| 154 | } | |||
| 155 | } | |||
| 156 | return obj; | |||
| 157 | } | |||
| 158 | ||||
| 159 | // TODO: Make this take Variable by const reference | |||
| 160 | PyObject * THPVariable_Wrap(Variable var) | |||
| 161 | { | |||
| 162 | if (!var.defined()) { | |||
| 163 | Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (& _Py_NoneStruct); | |||
| 164 | } | |||
| 165 | ||||
| 166 | c10::optional<PyObject*> mb_obj = | |||
| 167 | var.unsafeGetTensorImpl()->check_pyobj(self_interpreter.get()); | |||
| 168 | c10::impl::PyInterpreterStatus status; | |||
| 169 | if (mb_obj.has_value()) { | |||
| 170 | auto obj = *mb_obj; | |||
| 171 | if (obj) { | |||
| 172 | if (var.unsafeGetTensorImpl()->owns_pyobj()) { | |||
| 173 | // C++ owns the Python object; this implies there weren't any other | |||
| 174 | // owning references to the Python object. Since we're making the | |||
| 175 | // object "live" again on Python side, let's flip back the ownership | |||
| 176 | // (Python owns C++) as it would now be unsound to deallocate the C++ | |||
| 177 | // object if all C++ references go to zero | |||
| 178 | var.unsafeGetTensorImpl()->set_owns_pyobj(false); | |||
| 179 | reinterpret_cast<THPVariable*>(obj)->cdata = | |||
| 180 | MaybeOwned<Variable>::owned(std::move(var)); | |||
| 181 | // NB: incref is not necessary, because we are "stealing" the previous | |||
| 182 | // ownership from the Variable to return it here for the wrap | |||
| 183 | return obj; | |||
| 184 | } | |||
| 185 | Py_INCREF(obj)_Py_INCREF(((PyObject*)(obj))); | |||
| 186 | return obj; | |||
| 187 | } | |||
| 188 | // TODO: a better invariant is that if we tagged, we MUST have a valid | |||
| 189 | // PyObject. That's PyObject preservation | |||
| 190 | // (https://github.com/pytorch/pytorch/pull/56017). Prior to this PR | |||
| 191 | // being a thing, the PyObject field will get cleared when all references | |||
| 192 | // to the Python object are removed. | |||
| 193 | status = c10::impl::PyInterpreterStatus::TAGGED_BY_US; | |||
| 194 | } else { | |||
| 195 | // Assumption: if a Tensor has been shared across threads, this induces | |||
| 196 | // a refcount bump. Therefore, if the use count 1, we are the sole thread | |||
| 197 | // with access to this tensor and no race is possible. | |||
| 198 | if (var.use_count() <= 1) { | |||
| 199 | status = c10::impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED; | |||
| 200 | } else { | |||
| 201 | status = c10::impl::PyInterpreterStatus::MAYBE_UNINITIALIZED; | |||
| 202 | } | |||
| 203 | } | |||
| 204 | return THPVariable_NewWithVar( | |||
| 205 | (PyTypeObject*)THPVariableClass, std::move(var), status); | |||
| 206 | } | |||
| 207 | ||||
| 208 | static int THPVariable_clear(THPVariable* self) { | |||
| 209 | Py_CLEAR(self->backward_hooks)do { PyObject *_py_tmp = ((PyObject*)(self->backward_hooks )); if (_py_tmp != __null) { (self->backward_hooks) = __null ; _Py_DECREF(((PyObject*)(_py_tmp))); } } while (0); | |||
| 210 | const auto& tensor = THPVariable_Unpack(self); | |||
| 211 | if (tensor.defined()) { | |||
| 212 | // Two situations to consider: | |||
| 213 | // PyObject -owns-> Tensor | |||
| 214 | // unsafeIsBorrowed() is FALSE. We're obligated to look through | |||
| 215 | // Tensor to break references. Clearing cdata must induce the | |||
| 216 | // destruction of the C++ Tensor. If there were other references | |||
| 217 | // to C++ tensor, the Python object would have been resurrected | |||
| 218 | // by flipping the ownership. | |||
| 219 | // Tensor -owns-> PyObject | |||
| 220 | // unsafeIsBorrowed() is TRUE. We're deallocating the PyObject | |||
| 221 | // because Tensor asked us to (it's already destructing). | |||
| 222 | ||||
| 223 | if (!self->cdata.unsafeIsBorrowed()) { | |||
| 224 | // TODO: empirically, on OS X this assert appears to be untrue | |||
| 225 | // In test_py_tensors_multi_async_call - ProcessGroupRpcTestWithSpawn | |||
| 226 | // distributed/rpc/test_process_group_agent.py | |||
| 227 | // | |||
| 228 | // libc++abi.dylib: terminating with uncaught exception of type | |||
| 229 | // c10::Error: !tensor.unsafeGetTensorImpl()->owns_pyobj()INTERNAL ASSERT | |||
| 230 | // FAILED at "../torch/csrc/autograd/python_variable.cpp":171, please | |||
| 231 | // report a bug to PyTorch. Exception raised from THPVariable_clear at | |||
| 232 | // ../torch/csrc/autograd/python_variable.cpp:171 (most recent call | |||
| 233 | // first): frame #0: c10::Error::Error(c10::SourceLocation, | |||
| 234 | // std::__1::basic_string<char, std::__1::char_traits<char>, | |||
| 235 | // std::__1::allocator<char> >) + 98 (0x1158a0442 in libc10.dylib) frame | |||
| 236 | // #1: c10::detail::torchCheckFail(char const*, char const*, unsigned | |||
| 237 | // int, char const*) + 205 (0x11589ed3d in libc10.dylib) frame #2: | |||
| 238 | // c10::detail::torchInternalAssertFail(char const*, char const*, | |||
| 239 | // unsigned int, char const*, c10::detail::CompileTimeEmptyString) + 9 | |||
| 240 | // (0x1141e3f89 in libtorch_python.dylib) frame #3: | |||
| 241 | // THPVariable_clear(THPVariable*) + 412 (0x1148a547c in | |||
| 242 | // libtorch_python.dylib) frame #4: | |||
| 243 | // THPVariable_subclass_dealloc(_object*) + 453 (0x1148a5035 in | |||
| 244 | // libtorch_python.dylib) frame #5: (anonymous | |||
| 245 | // namespace)::concrete_decref_fn(c10::impl::PyInterpreter const*, | |||
| 246 | // _object*) + 53 (0x1148a5ea5 in libtorch_python.dylib) frame #6: | |||
| 247 | // c10::TensorImpl::release_resources() + 182 (0x11588c4a6 in | |||
| 248 | // libc10.dylib) frame #7: | |||
| 249 | // c10::MaybeOwned<at::Tensor>::operator=(c10::MaybeOwned<at::Tensor>&&) | |||
| 250 | // + 91 (0x11488c11b in libtorch_python.dylib) frame #8: | |||
| 251 | // THPVariable_subclass_dealloc(_object*) + 607 (0x1148a50cf in | |||
| 252 | // libtorch_python.dylib) <omitting python frames> frame #47: start + 1 | |||
| 253 | // (0x7fff6ffc7cc9 in libdyld.dylib) frame #48: 0x0 + 4 (0x4 in ???) | |||
| 254 | // TORCH_INTERNAL_ASSERT(!tensor.unsafeGetTensorImpl()->owns_pyobj()); | |||
| 255 | if (auto grad_acc = | |||
| 256 | torch::autograd::impl::try_get_grad_accumulator(tensor)) { | |||
| 257 | grad_acc->pre_hooks().clear(); | |||
| 258 | } | |||
| 259 | } | |||
| 260 | } | |||
| 261 | self->cdata = MaybeOwned<Variable>(); | |||
| 262 | return 0; | |||
| 263 | } | |||
| 264 | ||||
| 265 | // returns true if successfully rezzed; if so, cancel the | |||
| 266 | // rest of deallocation | |||
| 267 | static bool THPVariable_tryResurrect(THPVariable* self) { | |||
| 268 | const auto& tensor = THPVariable_Unpack(self); | |||
| 269 | ||||
| 270 | // Is this true or not??? Triggered by TestAutograd.test_variable_traverse | |||
| 271 | // TORCH_INTERNAL_ASSERT(tensor.defined()); | |||
| 272 | ||||
| 273 | // Check if there are other C++ owners | |||
| 274 | if (tensor.use_count() <= 1) { | |||
| 275 | return false; | |||
| 276 | } | |||
| 277 | ||||
| 278 | // There are other C++ owners of the tensor. Flip ownership | |||
| 279 | // so that C++ owns this Python object, and cancel deallocation. | |||
| 280 | TORCH_INTERNAL_ASSERT(!tensor.unsafeGetTensorImpl()->owns_pyobj())if ((__builtin_expect(static_cast<bool>(!(!tensor.unsafeGetTensorImpl ()->owns_pyobj())), 0))) { ::c10::detail::torchInternalAssertFail ( __func__, "../torch/csrc/autograd/python_variable.cpp", static_cast <uint32_t>(280), "!tensor.unsafeGetTensorImpl()->owns_pyobj()" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/autograd/python_variable.cpp\"" ":" "280" ", please report a bug to PyTorch. ", c10::str()); }; | |||
| 281 | ||||
| 282 | tensor.unsafeGetTensorImpl()->set_owns_pyobj(true); | |||
| 283 | ||||
| 284 | // Resurrect the Python object. This is something CPython does | |||
| 285 | // internally occasionally, see | |||
| 286 | // https://github.com/python/cpython/blob/b98eba5bc2ffbe7a0ed49d540ebc4f756ae61985/Objects/object.c#L248-L259 | |||
| 287 | // so we just copy the pattern here. Note that we don't have to worry | |||
| 288 | // about saving and restoring the refcount (as the quoted code does) | |||
| 289 | // because we actually DO need to reset the refcount to one here, we | |||
| 290 | // can't assume that some other code has taken care of it. | |||
| 291 | // NB: this will overreport _Py_RefTotal but based on inspection of object.c | |||
| 292 | // there is no way to avoid this | |||
| 293 | #ifdef Py_TRACE_REFS | |||
| 294 | _Py_AddToAllObjects(reinterpret_cast<PyObject *>(self), 1); | |||
| 295 | #endif | |||
| 296 | Py_INCREF(self)_Py_INCREF(((PyObject*)(self))); | |||
| 297 | ||||
| 298 | // Flip THPVariable to be non-owning | |||
| 299 | // (near use-after-free miss here: fresh MaybeOwned is created breaking | |||
| 300 | // reference on Tensor in struct BEFORE we overwrite the old one) | |||
| 301 | self->cdata = MaybeOwned<Variable>::borrowed(tensor); | |||
| 302 | ||||
| 303 | // NB: At this point, tensor *could* be dead (e.g., some other C++ thread | |||
| 304 | // decrefed it.) At this point, it is probably waiting on the GIL to | |||
| 305 | // deallocate the Python object and will kill self, BUT NOT YET. | |||
| 306 | ||||
| 307 | return true; | |||
| 308 | } | |||
| 309 | ||||
| 310 | PyObject *THPVariable_pynew(PyTypeObject *type, PyObject *args, PyObject *kwargs); | |||
| 311 | ||||
| 312 | static PyObject* THPVariable_fix_weakref(PyObject* self, PyObject* noargs) { | |||
| 313 | const auto& var = THPVariable_Unpack(self); | |||
| 314 | THPVariable_Wrap(var); | |||
| 315 | Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (& _Py_NoneStruct); | |||
| 316 | } | |||
| 317 | ||||
| 318 | // Instantiates a subclass of self with the same data. | |||
| 319 | static PyObject* THPVariable_as_subclass(PyObject* _self, PyObject* args, PyObject* kwargs) { | |||
| 320 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 321 | const auto& self = THPVariable_Unpack(_self); | |||
| 322 | static PythonArgParser parser({ | |||
| 323 | "as_subclass(PyObject* cls)", | |||
| 324 | }); | |||
| 325 | ParsedArgs<1> parsed_args{}; | |||
| 326 | auto r = parser.parse(_self, args, kwargs, parsed_args); | |||
| 327 | PyObject* cls = r.pyobject(0); | |||
| 328 | if (!PyType_Check(cls)((((((PyObject*)(cls))->ob_type))->tp_flags & ((1UL << 31))) != 0)) { | |||
| 329 | throw torch::TypeError("cls must be a type (got %s)", Py_TYPE(cls)(((PyObject*)(cls))->ob_type)->tp_name); | |||
| 330 | } | |||
| 331 | return THPVariable_NewWithVar( | |||
| 332 | (PyTypeObject*)cls, | |||
| 333 | self.alias(), | |||
| 334 | c10::impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED); | |||
| 335 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 336 | } | |||
| 337 | ||||
| 338 | static PyObject* THPVariable_make_subclass(PyObject* _ignored, PyObject* args, PyObject* kwargs) { | |||
| 339 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 340 | static PythonArgParser parser({ | |||
| 341 | "_make_subclass(PyObject* cls, Tensor data, bool require_grad=False)", | |||
| 342 | }); | |||
| 343 | ParsedArgs<3> parsed_args{}; | |||
| 344 | auto r = parser.parse(args, kwargs, parsed_args); | |||
| 345 | PyObject* cls = r.pyobject(0); | |||
| 346 | if (!PyType_Check(cls)((((((PyObject*)(cls))->ob_type))->tp_flags & ((1UL << 31))) != 0)) { | |||
| 347 | throw torch::TypeError("cls must be a type (got %s)", Py_TYPE(cls)(((PyObject*)(cls))->ob_type)->tp_name); | |||
| 348 | } | |||
| 349 | auto data = | |||
| 350 | r.tensor(1).detach(); // creates a fresh Tensor (DEFINITELY_UNINITIALIZED) | |||
| 351 | // We set `data`'s `allow_tensor_metadata_change` to true here, because we want to | |||
| 352 | // allow the following use case for backward compatibility: | |||
| 353 | // | |||
| 354 | // ```python | |||
| 355 | // rnn = torch.nn.RNN(100, 100, 2) | |||
| 356 | // # The following calls `torch._cudnn_rnn_flatten_weight(rnn._flat_weights, ...)`, | |||
| 357 | // # which changes storage of `rnn`'s weights in-place | |||
| 358 | // rnn.flatten_parameters() | |||
| 359 | // ``` | |||
| 360 | data.unsafeGetTensorImpl()->set_allow_tensor_metadata_change(true); | |||
| 361 | data.set_requires_grad(r.toBool(2)); | |||
| 362 | return THPVariable_NewWithVar( | |||
| 363 | (PyTypeObject*)cls, | |||
| 364 | std::move(data), | |||
| 365 | c10::impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED); | |||
| 366 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 367 | } | |||
| 368 | ||||
| 369 | typedef PyObject *(*getter)(PyObject *, void *); | |||
| 370 | typedef int (*setter)(PyObject *, PyObject *, void *); | |||
| 371 | ||||
| 372 | PyObject *THPVariable_get_python_dispatch(THPVariable *self, void *unused) | |||
| 373 | { | |||
| 374 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 375 | const auto& var = THPVariable_Unpack(self); | |||
| 376 | return torch::autograd::utils::wrap(var.unsafeGetTensorImpl()->is_python_dispatch()); | |||
| 377 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 378 | } | |||
| 379 | ||||
| 380 | PyObject *THPVariable_get_T(THPVariable *self, void *unused) | |||
| 381 | { | |||
| 382 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 383 | if (check_has_torch_function((PyObject *)self)) { | |||
| 384 | return handle_torch_function_getter(self, "T"); | |||
| 385 | } | |||
| 386 | const auto& var = THPVariable_Unpack(self); | |||
| 387 | return THPVariable_Wrap(var.numpy_T()); | |||
| 388 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 389 | } | |||
| 390 | ||||
| 391 | PyObject *THPVariable_get_cdata(THPVariable *self, void *unused) | |||
| 392 | { | |||
| 393 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 394 | if (check_has_torch_function((PyObject *)self)) { | |||
| 395 | return handle_torch_function_getter(self, "_cdata"); | |||
| 396 | } | |||
| 397 | const auto& var = THPVariable_Unpack(self); | |||
| 398 | return PyLong_FromVoidPtr(var.unsafeGetTensorImpl()); | |||
| 399 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 400 | } | |||
| 401 | ||||
| 402 | PyObject *THPVariable_get_version(THPVariable *self, void *unused) | |||
| 403 | { | |||
| 404 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 405 | if (check_has_torch_function((PyObject *)self)) { | |||
| 406 | return handle_torch_function_getter(self, "_version"); | |||
| 407 | } | |||
| 408 | const auto& var = THPVariable_Unpack(self); | |||
| 409 | return PyInt_FromLongPyLong_FromLong(var._version()); | |||
| 410 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 411 | } | |||
| 412 | ||||
| 413 | PyObject *THPVariable_get_grad_fn(THPVariable *self, void *unused) | |||
| 414 | { | |||
| 415 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 416 | if (check_has_torch_function((PyObject *)self)) { | |||
| 417 | return handle_torch_function_getter(self, "grad_fn"); | |||
| 418 | } | |||
| 419 | const auto& var = THPVariable_Unpack(self); | |||
| 420 | if (!var.grad_fn()) { | |||
| 421 | Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (& _Py_NoneStruct); | |||
| 422 | } | |||
| 423 | return functionToPyObject(var.grad_fn()); | |||
| 424 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 425 | } | |||
| 426 | ||||
| 427 | static int THPVariable_set_grad_fn(THPVariable *self, PyObject *obj, void *unused) | |||
| 428 | { | |||
| 429 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 430 | if (check_has_torch_function((PyObject *)self)) { | |||
| 431 | return handle_torch_function_setter(self, "_grad_fn", obj); | |||
| 432 | } | |||
| 433 | THPUtils_assertRet(-1, obj, "Deletion of _grad_fn not allowed. Detach tensor instead!")if ((__builtin_expect((!(obj)), (0)))) { THPUtils_setError("Deletion of _grad_fn not allowed. Detach tensor instead!" ); return -1; }; | |||
| 434 | THPUtils_assertRet(-1, obj == Py_None, "_grad_fn can be only set to None")if ((__builtin_expect((!(obj == (&_Py_NoneStruct))), (0)) )) { THPUtils_setError("_grad_fn can be only set to None"); return -1; }; | |||
| 435 | THPVariable_Unpack(self).detach_(); | |||
| 436 | return 0; | |||
| 437 | END_HANDLE_TH_ERRORS_RET(-1)} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return -1; } catch (const c10::IndexError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_IndexError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return -1; } catch (torch::PyTorchError & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (e.python_type(), msg); return -1; } catch (const std::exception & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (PyExc_RuntimeError, msg); return -1; } | |||
| 438 | } | |||
| 439 | ||||
| 440 | static PyObject *THPVariable_is_leaf(THPVariable *self, void *unused) | |||
| 441 | { | |||
| 442 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 443 | if (check_has_torch_function((PyObject *)self)) { | |||
| 444 | return handle_torch_function_getter(self, "is_leaf"); | |||
| 445 | } | |||
| 446 | return PyBool_FromLong(!THPVariable_Unpack(self).grad_fn()); | |||
| 447 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 448 | } | |||
| 449 | ||||
| 450 | static PyObject * THPVariable_get_data(THPVariable *self, void *unused) | |||
| 451 | { | |||
| 452 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 453 | if (check_has_torch_function((PyObject *)self)) { | |||
| 454 | return handle_torch_function_getter(self, "data"); | |||
| 455 | } | |||
| 456 | const auto& var = THPVariable_Unpack(self).variable_data(); | |||
| 457 | return THPVariable_Wrap(var); | |||
| 458 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 459 | } | |||
| 460 | ||||
| 461 | int THPVariable_set_data(THPVariable *self, PyObject *data, void *unused) | |||
| 462 | { | |||
| 463 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 464 | if (check_has_torch_function((PyObject *)self)) { | |||
| 465 | return handle_torch_function_setter(self, "data", data); | |||
| 466 | } | |||
| 467 | THPUtils_assertRet(-1, data, "Deleting tensor data is not allowed. Delete tensor instead!")if ((__builtin_expect((!(data)), (0)))) { THPUtils_setError("Deleting tensor data is not allowed. Delete tensor instead!" ); return -1; }; | |||
| 468 | if (!THPVariable_Check(data)) { | |||
| 469 | throw torch::TypeError("Variable data has to be a tensor, but got %s", Py_TYPE(data)(((PyObject*)(data))->ob_type)->tp_name); | |||
| 470 | } | |||
| 471 | ||||
| 472 | THPVariable_Unpack(self).set_data(THPVariable_Unpack(data)); | |||
| 473 | return 0; | |||
| 474 | END_HANDLE_TH_ERRORS_RET(-1)} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return -1; } catch (const c10::IndexError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_IndexError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return -1; } catch (torch::PyTorchError & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (e.python_type(), msg); return -1; } catch (const std::exception & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (PyExc_RuntimeError, msg); return -1; } | |||
| 475 | } | |||
| 476 | ||||
| 477 | PyObject *THPVariable_get_grad(THPVariable *self, void *unused) | |||
| 478 | { | |||
| 479 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 480 | if (check_has_torch_function((PyObject *)self)) { | |||
| 481 | return handle_torch_function_getter(self, "grad"); | |||
| 482 | } | |||
| 483 | return THPVariable_Wrap(THPVariable_Unpack(self).grad()); | |||
| 484 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 485 | } | |||
| 486 | ||||
| 487 | int THPVariable_set_grad(THPVariable *self, PyObject *py_grad, void *unused) | |||
| 488 | { | |||
| 489 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 490 | if (check_has_torch_function((PyObject *)self)) { | |||
| 491 | return handle_torch_function_setter(self, "grad", py_grad); | |||
| 492 | } | |||
| 493 | const auto& var = THPVariable_Unpack(self); | |||
| 494 | if (!py_grad || py_grad == Py_None(&_Py_NoneStruct)) { | |||
| 495 | var.mutable_grad().reset(); | |||
| 496 | return 0; | |||
| 497 | } | |||
| 498 | ||||
| 499 | THPUtils_assertRet(-1, self != (THPVariable*)py_grad,if ((__builtin_expect((!(self != (THPVariable*)py_grad)), (0) ))) { THPUtils_setError("can't assign Variable as its own grad" ); return -1; } | |||
| 500 | "can't assign Variable as its own grad")if ((__builtin_expect((!(self != (THPVariable*)py_grad)), (0) ))) { THPUtils_setError("can't assign Variable as its own grad" ); return -1; }; | |||
| 501 | ||||
| 502 | const auto& grad = THPVariable_Unpack(py_grad); | |||
| 503 | bool gradIsSparse = (var.dtype() == grad.dtype() && | |||
| 504 | var.device().type() == grad.device().type() && | |||
| 505 | grad.layout() == kSparse); | |||
| 506 | THPUtils_assertRet(-1, grad.options().type_equal(var.options()) || gradIsSparse,if ((__builtin_expect((!(grad.options().type_equal(var.options ()) || gradIsSparse)), (0)))) { THPUtils_setError("assigned grad has data of a different type" ); return -1; } | |||
| 507 | "assigned grad has data of a different type")if ((__builtin_expect((!(grad.options().type_equal(var.options ()) || gradIsSparse)), (0)))) { THPUtils_setError("assigned grad has data of a different type" ); return -1; }; | |||
| 508 | if (var.is_cuda()) { | |||
| 509 | THPUtils_assertRet(-1, grad.get_device() == var.get_device(),if ((__builtin_expect((!(grad.get_device() == var.get_device( ))), (0)))) { THPUtils_setError("assigned grad has data located on a different device" ); return -1; } | |||
| 510 | "assigned grad has data located on a different device")if ((__builtin_expect((!(grad.get_device() == var.get_device( ))), (0)))) { THPUtils_setError("assigned grad has data located on a different device" ); return -1; }; | |||
| 511 | } | |||
| 512 | THPUtils_assertRet(-1, grad.sizes().equals(var.sizes()),if ((__builtin_expect((!(grad.sizes().equals(var.sizes()))), ( 0)))) { THPUtils_setError("assigned grad has data of a different size" ); return -1; } | |||
| 513 | "assigned grad has data of a different size")if ((__builtin_expect((!(grad.sizes().equals(var.sizes()))), ( 0)))) { THPUtils_setError("assigned grad has data of a different size" ); return -1; }; | |||
| 514 | ||||
| 515 | var.mutable_grad() = grad; | |||
| 516 | return 0; | |||
| 517 | END_HANDLE_TH_ERRORS_RET(-1)} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return -1; } catch (const c10::IndexError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_IndexError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return -1; } catch (torch::PyTorchError & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (e.python_type(), msg); return -1; } catch (const std::exception & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (PyExc_RuntimeError, msg); return -1; } | |||
| 518 | } | |||
| 519 | ||||
| 520 | PyObject *THPVariable_get_volatile(THPVariable *self, void *unused) | |||
| 521 | { | |||
| 522 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 523 | if (check_has_torch_function((PyObject *)self)) { | |||
| 524 | return handle_torch_function_getter(self, "volatile"); | |||
| 525 | } | |||
| 526 | const char* msg = "volatile was removed (Variable.volatile is always False)"; | |||
| 527 | auto r = PyErr_WarnEx(PyExc_UserWarning, msg, 1); | |||
| 528 | if (r != 0) throw python_error(); | |||
| 529 | Py_RETURN_FALSEreturn _Py_INCREF(((PyObject*)(((PyObject *) &_Py_FalseStruct )))), ((PyObject *) &_Py_FalseStruct); | |||
| 530 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 531 | } | |||
| 532 | ||||
| 533 | int THPVariable_set_volatile(THPVariable *self, PyObject *obj, void *unused) | |||
| 534 | { | |||
| 535 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 536 | if (check_has_torch_function((PyObject *)self)) { | |||
| 537 | return handle_torch_function_setter(self, "volatile", obj); | |||
| 538 | } | |||
| 539 | auto r = PyErr_WarnEx(PyExc_UserWarning, VOLATILE_WARNING, 1); | |||
| 540 | if (r != 0) throw python_error(); | |||
| 541 | return 0; | |||
| 542 | END_HANDLE_TH_ERRORS_RET(-1)} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return -1; } catch (const c10::IndexError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_IndexError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return -1; } catch (torch::PyTorchError & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (e.python_type(), msg); return -1; } catch (const std::exception & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (PyExc_RuntimeError, msg); return -1; } | |||
| 543 | } | |||
| 544 | ||||
| 545 | PyObject *THPVariable_get_output_nr(THPVariable *self, void *unused) | |||
| 546 | { | |||
| 547 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 548 | if (check_has_torch_function((PyObject *)self)) { | |||
| 549 | return handle_torch_function_getter(self, "output_nr"); | |||
| 550 | } | |||
| 551 | const auto output_nr = static_cast<long>(THPVariable_Unpack(self).output_nr()); | |||
| 552 | return PyInt_FromLongPyLong_FromLong(output_nr); | |||
| 553 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 554 | } | |||
| 555 | ||||
| 556 | PyObject *THPVariable_get_requires_grad(THPVariable *self, void *unused) | |||
| 557 | { | |||
| 558 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 559 | if (check_has_torch_function((PyObject *)self)) { | |||
| 560 | return handle_torch_function_getter(self, "requires_grad"); | |||
| 561 | } | |||
| 562 | if(THPVariable_Unpack(self).requires_grad()) { | |||
| 563 | Py_RETURN_TRUEreturn _Py_INCREF(((PyObject*)(((PyObject *) &_Py_TrueStruct )))), ((PyObject *) &_Py_TrueStruct); | |||
| 564 | } else { | |||
| 565 | Py_RETURN_FALSEreturn _Py_INCREF(((PyObject*)(((PyObject *) &_Py_FalseStruct )))), ((PyObject *) &_Py_FalseStruct); | |||
| 566 | } | |||
| 567 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 568 | } | |||
| 569 | ||||
| 570 | PyObject *THPVariable_retains_grad(THPVariable *self, void *unused) | |||
| 571 | { | |||
| 572 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 573 | if (check_has_torch_function((PyObject *)self)) { | |||
| 574 | return handle_torch_function_getter(self, "retains_grad"); | |||
| 575 | } | |||
| 576 | if(THPVariable_Unpack(self).retains_grad()) { | |||
| 577 | Py_RETURN_TRUEreturn _Py_INCREF(((PyObject*)(((PyObject *) &_Py_TrueStruct )))), ((PyObject *) &_Py_TrueStruct); | |||
| 578 | } else { | |||
| 579 | Py_RETURN_FALSEreturn _Py_INCREF(((PyObject*)(((PyObject *) &_Py_FalseStruct )))), ((PyObject *) &_Py_FalseStruct); | |||
| 580 | } | |||
| 581 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 582 | } | |||
| 583 | ||||
| 584 | PyObject *THPVariable_get_ndim(THPVariable *self, void *unused) | |||
| 585 | { | |||
| 586 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 587 | if (check_has_torch_function((PyObject *)self)) { | |||
| 588 | return handle_torch_function_getter(self, "ndim"); | |||
| 589 | } | |||
| 590 | return PyInt_FromLongPyLong_FromLong(THPVariable_Unpack(self).dim()); | |||
| 591 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 592 | } | |||
| 593 | ||||
| 594 | PyObject *THPVariable_get_names(PyObject *self, void *unused) | |||
| 595 | { | |||
| 596 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 597 | if (check_has_torch_function(self)) { | |||
| ||||
| 598 | return handle_torch_function_getter((THPVariable*)self, "names"); | |||
| 599 | } | |||
| 600 | // The long-term plan is to return a list of (python) torch.Dimname. | |||
| 601 | // However, for now, return a list of string. | |||
| 602 | const auto& tensor = THPVariable_Unpack(self); | |||
| 603 | size_t size = tensor.dim(); | |||
| 604 | THPObjectPtr tuple(PyTuple_New(size)); | |||
| ||||
| 605 | if (!tuple) throw python_error(); | |||
| 606 | ||||
| 607 | const auto dimnames = tensor.names(); | |||
| 608 | for (const auto i : c10::irange(size)) { | |||
| 609 | // NOLINTNEXTLINE(cppcoreguidelines-init-variables) | |||
| 610 | PyObject* str; | |||
| 611 | if (dimnames[i].type() == at::NameType::WILDCARD) { | |||
| 612 | // PyTuple_SET_ITEM steals a reference to the object. When the tuple is | |||
| 613 | // deallocated, it'll decrement the refcount on Py_None, which is bad. | |||
| 614 | // To avoid this, we "create" a new reference to Py_None by increasing | |||
| 615 | // the refcount. | |||
| 616 | // Sources: | |||
| 617 | // - https://docs.python.org/3/c-api/tuple.html#c.PyTuple_SetItem | |||
| 618 | // - https://stackoverflow.com/questions/16400600/how-to-return-a-tuple-containing-a-none-value-from-the-c-api | |||
| 619 | Py_INCREF(Py_None)_Py_INCREF(((PyObject*)((&_Py_NoneStruct)))); | |||
| 620 | str = Py_None(&_Py_NoneStruct); | |||
| 621 | } else { | |||
| 622 | str = THPUtils_packString(dimnames[i].symbol().toUnqualString()); | |||
| 623 | if (!str) throw python_error(); | |||
| 624 | } | |||
| 625 | PyTuple_SET_ITEM(tuple.get(), i, str)PyTuple_SetItem(tuple.get(), i, str); | |||
| 626 | } | |||
| 627 | return tuple.release(); | |||
| 628 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 629 | } | |||
| 630 | ||||
| 631 | int THPVariable_set_names(PyObject *self, PyObject *names, void *unused) { | |||
| 632 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 633 | if (check_has_torch_function(self)) { | |||
| 634 | return handle_torch_function_setter((THPVariable*)self, "names", names); | |||
| 635 | } | |||
| 636 | const auto& var = THPVariable_Unpack(self); | |||
| 637 | if (names == Py_None(&_Py_NoneStruct)) { | |||
| 638 | at::internal_set_names_inplace(var, at::nullopt); | |||
| 639 | } else { | |||
| 640 | THPUtils_assertRet(-1,if ((__builtin_expect((!(THPUtils_checkDimnameList(names))), ( 0)))) { THPUtils_setError("names must either be None or a tuple of dim names" ); return -1; } | |||
| 641 | THPUtils_checkDimnameList(names),if ((__builtin_expect((!(THPUtils_checkDimnameList(names))), ( 0)))) { THPUtils_setError("names must either be None or a tuple of dim names" ); return -1; } | |||
| 642 | "names must either be None or a tuple of dim names")if ((__builtin_expect((!(THPUtils_checkDimnameList(names))), ( 0)))) { THPUtils_setError("names must either be None or a tuple of dim names" ); return -1; }; | |||
| 643 | at::internal_set_names_inplace(var, torch::parseDimnameList(names)); | |||
| 644 | } | |||
| 645 | return 0; | |||
| 646 | END_HANDLE_TH_ERRORS_RET(-1)} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return -1; } catch (const c10::IndexError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_IndexError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return -1; } catch (torch::PyTorchError & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (e.python_type(), msg); return -1; } catch (const std::exception & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (PyExc_RuntimeError, msg); return -1; } | |||
| 647 | } | |||
| 648 | ||||
| 649 | int THPVariable_set_requires_grad(THPVariable *self, PyObject *obj, void *unused) | |||
| 650 | { | |||
| 651 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 652 | if (check_has_torch_function((PyObject *)self)) { | |||
| 653 | return handle_torch_function_setter(self, "requires_grad", obj); | |||
| 654 | } | |||
| 655 | THPUtils_assertRet(-1, obj && PyBool_Check(obj), "requires_grad must be a bool")if ((__builtin_expect((!(obj && ((((PyObject*)(obj))-> ob_type) == &PyBool_Type))), (0)))) { THPUtils_setError("requires_grad must be a bool" ); return -1; }; | |||
| 656 | const auto& var = THPVariable_Unpack(self); | |||
| 657 | auto requires_grad = (obj == Py_True((PyObject *) &_Py_TrueStruct)); | |||
| 658 | if (!var.is_leaf()) { | |||
| 659 | THPUtils_setError(autograd::utils::requires_grad_leaf_error(obj == Py_True((PyObject *) &_Py_TrueStruct)).c_str()); | |||
| 660 | return -1; | |||
| 661 | } | |||
| 662 | if (requires_grad && !isDifferentiableType(at::typeMetaToScalarType((var.dtype())))) { | |||
| 663 | THPUtils_setError("only Tensors of floating point and complex dtype can require gradients"); | |||
| 664 | return -1; | |||
| 665 | } | |||
| 666 | var.set_requires_grad(requires_grad); | |||
| 667 | return 0; | |||
| 668 | END_HANDLE_TH_ERRORS_RET(-1)} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return -1; } catch (const c10::IndexError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_IndexError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return -1; } catch (torch::PyTorchError & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (e.python_type(), msg); return -1; } catch (const std::exception & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (PyExc_RuntimeError, msg); return -1; } | |||
| 669 | } | |||
| 670 | ||||
| 671 | PyObject *THPVariable_get_name(THPVariable* self, void *unused) | |||
| 672 | { | |||
| 673 | if (check_has_torch_function((PyObject *)self)) { | |||
| 674 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 675 | return handle_torch_function_getter(self, "name"); | |||
| 676 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 677 | } | |||
| 678 | const auto& tensor = THPVariable_Unpack(self); | |||
| 679 | if (tensor.name() == "") | |||
| 680 | Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (& _Py_NoneStruct); | |||
| 681 | return THPUtils_packString(tensor.name().c_str()); | |||
| 682 | } | |||
| 683 | ||||
| 684 | PyObject *THPVariable_get_backwards_hooks(THPVariable *self, void *unused) | |||
| 685 | { | |||
| 686 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 687 | if (check_has_torch_function((PyObject *)self)) { | |||
| 688 | return handle_torch_function_getter(self, "_backward_hooks"); | |||
| 689 | } | |||
| 690 | if (self->backward_hooks) { | |||
| 691 | Py_INCREF(self->backward_hooks)_Py_INCREF(((PyObject*)(self->backward_hooks))); | |||
| 692 | return self->backward_hooks; | |||
| 693 | } | |||
| 694 | Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (& _Py_NoneStruct); | |||
| 695 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 696 | } | |||
| 697 | ||||
| 698 | int THPVariable_set_backwards_hooks(THPVariable *self, PyObject *obj, void *unused) | |||
| 699 | { | |||
| 700 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 701 | if (check_has_torch_function((PyObject *)self)) { | |||
| 702 | return handle_torch_function_setter(self, "_backward_hooks", obj); | |||
| 703 | } | |||
| 704 | THPUtils_assertRet(-1, obj, "Deletion of _backwards_hooks not allowed!")if ((__builtin_expect((!(obj)), (0)))) { THPUtils_setError("Deletion of _backwards_hooks not allowed!" ); return -1; }; | |||
| 705 | if (obj == Py_None(&_Py_NoneStruct)) { | |||
| 706 | obj = nullptr; | |||
| 707 | } | |||
| 708 | Py_XINCREF(obj)_Py_XINCREF(((PyObject*)(obj))); | |||
| 709 | Py_XDECREF(self->backward_hooks)_Py_XDECREF(((PyObject*)(self->backward_hooks))); | |||
| 710 | self->backward_hooks = obj; | |||
| 711 | const auto& tensor = THPVariable_Unpack(self); | |||
| 712 | torch::autograd::impl::clear_hooks(tensor); | |||
| 713 | if (obj) { | |||
| 714 | torch::autograd::impl::add_hook(tensor, std::make_shared<PyFunctionPreHook>(obj, 0)); | |||
| 715 | } | |||
| 716 | return 0; | |||
| 717 | END_HANDLE_TH_ERRORS_RET(-1)} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return -1; } catch (const c10::IndexError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_IndexError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return -1; } catch (torch::PyTorchError & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (e.python_type(), msg); return -1; } catch (const std::exception & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (PyExc_RuntimeError, msg); return -1; } | |||
| 718 | } | |||
| 719 | ||||
| 720 | PyObject *THPVariable_get_base(THPVariable *self, void *unused) | |||
| 721 | { | |||
| 722 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 723 | if (check_has_torch_function((PyObject *)self)) { | |||
| 724 | return handle_torch_function_getter(self, "_base"); | |||
| 725 | } | |||
| 726 | const auto& tensor = THPVariable_Unpack(self); | |||
| 727 | if (tensor.is_view()) { | |||
| 728 | return THPVariable_Wrap(tensor._base()); | |||
| 729 | } | |||
| 730 | Py_RETURN_NONEreturn _Py_INCREF(((PyObject*)((&_Py_NoneStruct)))), (& _Py_NoneStruct); | |||
| 731 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 732 | } | |||
| 733 | ||||
| 734 | #ifndef USE_DEPLOY | |||
| 735 | // This code is only used for asserts, so it is OK to skip it entirely from | |||
| 736 | // deploy interpreters (in which case we will just skip the safety check). For | |||
| 737 | // a more precise check, it would be necessary to test that we are not holding | |||
| 738 | // the GIL for *all* active torch deploy interpreters. There is not really any | |||
| 739 | // reason to do this. | |||
| 740 | struct ConcretePythonGILHooks : public c10::impl::PythonGILHooks { | |||
| 741 | bool check_python_gil() const override { | |||
| 742 | return Py_IsInitialized() && PyGILState_Check(); | |||
| 743 | }; | |||
| 744 | }; | |||
| 745 | // During process destruction, python_gil_hooks will get destructed, making | |||
| 746 | // further virtual calls on the object invalid. By the ordering of declarations | |||
| 747 | // in this file, the registerer will get destructed first, removing the | |||
| 748 | // externally visible reference to the object. Assuming at this point in time, | |||
| 749 | // there aren't other threads racing to read out the hooks, subsequent calls | |||
| 750 | // into GIL hooks will hit a nullptr and gracefully no-op the asserts (as | |||
| 751 | // desired, since at process shutdown time the Python interpreter is definitely | |||
| 752 | // dead). | |||
| 753 | // | |||
| 754 | // An alternative way to reduce the risk of python_gil_hooks going prematurely | |||
| 755 | // dead would be to leak it at destruction time. I didn't do that because | |||
| 756 | // it's annoying to write the Registerer class for this case. | |||
| 757 | ConcretePythonGILHooks python_gil_hooks; | |||
| 758 | static c10::impl::PythonGILHooksRegisterer python_gil_hooks_registerer(&python_gil_hooks); | |||
| 759 | #endif | |||
| 760 | ||||
| 761 | PyObject *THPVariable_get_shape(THPVariable *self, void *unused) | |||
| 762 | { | |||
| 763 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 764 | if (check_has_torch_function((PyObject *)self)) { | |||
| 765 | return handle_torch_function_getter(self, "shape"); | |||
| 766 | } | |||
| 767 | return THPSize_New(THPVariable_Unpack(self)); | |||
| 768 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 769 | } | |||
| 770 | ||||
| 771 | PyObject *THPVariable_is_cuda(THPVariable *self, void *unused) | |||
| 772 | { | |||
| 773 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 774 | if (check_has_torch_function((PyObject *)self)) { | |||
| 775 | return handle_torch_function_getter(self, "is_cuda"); | |||
| 776 | } | |||
| 777 | auto& self_ = THPVariable_Unpack(self); | |||
| 778 | return torch::autograd::utils::wrap(self_.is_cuda()); | |||
| 779 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 780 | } | |||
| 781 | ||||
| 782 | PyObject* THPVariable_is_xpu(THPVariable* self, void* unused) { | |||
| 783 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 784 | if (check_has_torch_function((PyObject*)self)) { | |||
| 785 | return handle_torch_function_getter(self, "is_xpu"); | |||
| 786 | } | |||
| 787 | auto& self_ = THPVariable_Unpack(self); | |||
| 788 | return torch::autograd::utils::wrap(self_.is_xpu()); | |||
| 789 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 790 | } | |||
| 791 | ||||
| 792 | PyObject *THPVariable_is_sparse(THPVariable *self, void *unused) | |||
| 793 | { | |||
| 794 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 795 | if (check_has_torch_function((PyObject *)self)) { | |||
| 796 | return handle_torch_function_getter(self, "is_sparse"); | |||
| 797 | } | |||
| 798 | auto& self_ = THPVariable_Unpack(self); | |||
| 799 | return torch::autograd::utils::wrap(self_.is_sparse()); | |||
| 800 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 801 | } | |||
| 802 | ||||
| 803 | PyObject *THPVariable_is_sparse_csr(THPVariable *self, void *unused) | |||
| 804 | { | |||
| 805 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 806 | if (check_has_torch_function((PyObject *)self)) { | |||
| 807 | return handle_torch_function_getter(self, "is_sparse_csr"); | |||
| 808 | } | |||
| 809 | auto& self_ = THPVariable_Unpack(self); | |||
| 810 | return torch::autograd::utils::wrap(self_.is_sparse_csr()); | |||
| 811 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 812 | } | |||
| 813 | ||||
| 814 | PyObject *THPVariable_is_mkldnn(THPVariable *self, void *unused) | |||
| 815 | { | |||
| 816 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 817 | if (check_has_torch_function((PyObject *)self)) { | |||
| 818 | return handle_torch_function_getter(self, "is_mkldnn"); | |||
| 819 | } | |||
| 820 | auto& self_ = THPVariable_Unpack(self); | |||
| 821 | return torch::autograd::utils::wrap(self_.is_mkldnn()); | |||
| 822 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 823 | } | |||
| 824 | ||||
| 825 | PyObject *THPVariable_is_mlc(THPVariable *self, void *unused) | |||
| 826 | { | |||
| 827 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 828 | if (check_has_torch_function((PyObject *)self)) { | |||
| 829 | return handle_torch_function_getter(self, "is_mlc"); | |||
| 830 | } | |||
| 831 | auto& self_ = THPVariable_Unpack(self); | |||
| 832 | return torch::autograd::utils::wrap(self_.is_mlc()); | |||
| 833 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 834 | } | |||
| 835 | ||||
| 836 | PyObject *THPVariable_is_vulkan(THPVariable *self, void *unused) | |||
| 837 | { | |||
| 838 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 839 | if (check_has_torch_function((PyObject *)self)) { | |||
| 840 | return handle_torch_function_getter(self, "is_vulkan"); | |||
| 841 | } | |||
| 842 | auto& self_ = THPVariable_Unpack(self); | |||
| 843 | return torch::autograd::utils::wrap(self_.is_vulkan()); | |||
| 844 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 845 | } | |||
| 846 | ||||
| 847 | PyObject *THPVariable_is_quantized(THPVariable *self, void *unused) | |||
| 848 | { | |||
| 849 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 850 | if (check_has_torch_function((PyObject *)self)) { | |||
| 851 | return handle_torch_function_getter(self, "is_quantized"); | |||
| 852 | } | |||
| 853 | auto& self_ = THPVariable_Unpack(self); | |||
| 854 | return torch::autograd::utils::wrap(self_.is_quantized()); | |||
| 855 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 856 | } | |||
| 857 | ||||
| 858 | PyObject *THPVariable_is_meta(THPVariable *self, void *unused) | |||
| 859 | { | |||
| 860 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 861 | if (check_has_torch_function((PyObject *)self)) { | |||
| 862 | return handle_torch_function_getter(self, "is_meta"); | |||
| 863 | } | |||
| 864 | auto& self_ = THPVariable_Unpack(self); | |||
| 865 | return torch::autograd::utils::wrap(self_.is_meta()); | |||
| 866 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 867 | } | |||
| 868 | ||||
| 869 | PyObject *THPVariable_is_complex(THPVariable *self, void *unused) | |||
| 870 | { | |||
| 871 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 872 | if (check_has_torch_function((PyObject *)self)) { | |||
| 873 | return handle_torch_function_getter(self, "is_complex"); | |||
| 874 | } | |||
| 875 | auto& self_ = THPVariable_Unpack(self); | |||
| 876 | return torch::autograd::utils::wrap(self_.is_complex()); | |||
| 877 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 878 | } | |||
| 879 | ||||
| 880 | static PyObject *THPVariable_dtype(THPVariable *self, void *unused) | |||
| 881 | { | |||
| 882 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 883 | if (check_has_torch_function((PyObject *)self)) { | |||
| 884 | return handle_torch_function_getter(self, "dtype"); | |||
| 885 | } | |||
| 886 | auto& self_ = THPVariable_Unpack(self); | |||
| 887 | return torch::autograd::utils::wrap(torch::getTHPDtype(self_.scalar_type())); | |||
| 888 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 889 | } | |||
| 890 | ||||
| 891 | static PyObject * THPVariable_layout(THPVariable* self, void *unused) { | |||
| 892 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 893 | if (check_has_torch_function((PyObject *)self)) { | |||
| 894 | return handle_torch_function_getter(self, "layout"); | |||
| 895 | } | |||
| 896 | auto& self_ = THPVariable_Unpack(self); | |||
| 897 | return torch::autograd::utils::wrap(torch::getTHPLayout(self_.layout())); | |||
| 898 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 899 | } | |||
| 900 | ||||
| 901 | static PyObject * THPVariable_device(THPVariable* self, void *unused) { | |||
| 902 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 903 | if (check_has_torch_function((PyObject *)self)) { | |||
| 904 | return handle_torch_function_getter(self, "device"); | |||
| 905 | } | |||
| 906 | return THPDevice_New(THPVariable_Unpack(self).device()); | |||
| 907 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 908 | } | |||
| 909 | ||||
| 910 | PyObject *THPVariable_get_real(THPVariable* self, void *unused) | |||
| 911 | { | |||
| 912 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 913 | if (check_has_torch_function((PyObject *)self)) { | |||
| 914 | return handle_torch_function_getter(self, "real"); | |||
| 915 | } | |||
| 916 | auto& self_ = THPVariable_Unpack(self); | |||
| 917 | auto real = at::real(self_); | |||
| 918 | return THPVariable_Wrap(real); | |||
| 919 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 920 | } | |||
| 921 | ||||
| 922 | PyObject *THPVariable_get_imag(THPVariable* self, void *unused) | |||
| 923 | { | |||
| 924 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 925 | if (check_has_torch_function((PyObject *)self)) { | |||
| 926 | return handle_torch_function_getter(self, "imag"); | |||
| 927 | } | |||
| 928 | auto& self_ = THPVariable_Unpack(self); | |||
| 929 | auto imag = at::imag(self_); | |||
| 930 | return THPVariable_Wrap(imag); | |||
| 931 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 932 | } | |||
| 933 | ||||
| 934 | int THPVariable_set_real(THPVariable *self, THPVariable *real, void *unused) | |||
| 935 | { | |||
| 936 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 937 | auto& self_ = THPVariable_Unpack(self); | |||
| 938 | auto self_real = at::real(self_); | |||
| 939 | self_real.copy_(THPVariable_Unpack(real)); | |||
| 940 | return 0; | |||
| 941 | END_HANDLE_TH_ERRORS_RET(-1)} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return -1; } catch (const c10::IndexError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_IndexError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return -1; } catch (torch::PyTorchError & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (e.python_type(), msg); return -1; } catch (const std::exception & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (PyExc_RuntimeError, msg); return -1; } | |||
| 942 | } | |||
| 943 | ||||
| 944 | int THPVariable_set_imag(THPVariable* self, THPVariable *imag, void *unused) | |||
| 945 | { | |||
| 946 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 947 | auto& self_ = THPVariable_Unpack(self); | |||
| 948 | auto self_imag = at::imag(self_); | |||
| 949 | self_imag.copy_(THPVariable_Unpack(imag)); | |||
| 950 | return 0; | |||
| 951 | END_HANDLE_TH_ERRORS_RET(-1)} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return -1; } catch (const c10::IndexError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_IndexError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return -1; } catch (const c10 ::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return -1; } catch (torch::PyTorchError & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (e.python_type(), msg); return -1; } catch (const std::exception & e) { auto msg = torch::processErrorMsg(e.what()); PyErr_SetString (PyExc_RuntimeError, msg); return -1; } | |||
| 952 | } | |||
| 953 | ||||
| 954 | // properties are registered here because we are currently only able to bind them | |||
| 955 | // manually. TODO: make declarable in native_functions | |||
| 956 | // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables) | |||
| 957 | static struct PyGetSetDef THPVariable_properties[] = { | |||
| 958 | {"_python_dispatch", (getter)THPVariable_get_python_dispatch, nullptr, nullptr, nullptr}, | |||
| 959 | {"T", (getter)THPVariable_get_T, nullptr, nullptr, nullptr}, | |||
| 960 | {"_cdata", (getter)THPVariable_get_cdata, nullptr, nullptr, nullptr}, | |||
| 961 | {"_version", (getter)THPVariable_get_version, nullptr, nullptr, nullptr}, | |||
| 962 | {"grad_fn", (getter)THPVariable_get_grad_fn, nullptr, nullptr, nullptr}, | |||
| 963 | {"_grad_fn", (getter)THPVariable_get_grad_fn, (setter)THPVariable_set_grad_fn, nullptr, nullptr}, | |||
| 964 | {"is_leaf", (getter)THPVariable_is_leaf, nullptr, nullptr, nullptr}, | |||
| 965 | {"retains_grad", (getter)THPVariable_retains_grad, nullptr, nullptr, nullptr}, | |||
| 966 | {"data", (getter)THPVariable_get_data, (setter)THPVariable_set_data, nullptr, nullptr}, | |||
| 967 | {"_grad", (getter)THPVariable_get_grad, (setter)THPVariable_set_grad, nullptr, nullptr}, // Allows the python class to override .grad | |||
| 968 | {"grad", (getter)THPVariable_get_grad, (setter)THPVariable_set_grad, nullptr, nullptr}, | |||
| 969 | {"_base", (getter)THPVariable_get_base, nullptr, nullptr, nullptr}, | |||
| 970 | {"volatile", (getter)THPVariable_get_volatile, (setter)THPVariable_set_volatile, nullptr, nullptr}, | |||
| 971 | {"output_nr", (getter)THPVariable_get_output_nr, nullptr, nullptr, nullptr}, | |||
| 972 | {"requires_grad", (getter)THPVariable_get_requires_grad, (setter)THPVariable_set_requires_grad, nullptr, nullptr}, | |||
| 973 | {"_backward_hooks", (getter)THPVariable_get_backwards_hooks, (setter)THPVariable_set_backwards_hooks, nullptr, nullptr}, | |||
| 974 | {"name", (getter)THPVariable_get_name, nullptr, nullptr, nullptr}, | |||
| 975 | {"shape", (getter)THPVariable_get_shape, nullptr, nullptr, nullptr}, | |||
| 976 | {"is_cuda", (getter)THPVariable_is_cuda, nullptr, nullptr, nullptr}, | |||
| 977 | {"is_xpu", (getter)THPVariable_is_xpu, nullptr, nullptr, nullptr}, | |||
| 978 | {"is_sparse", (getter)THPVariable_is_sparse, nullptr, nullptr, nullptr}, | |||
| 979 | {"is_sparse_csr", (getter)THPVariable_is_sparse_csr, nullptr, nullptr, nullptr}, | |||
| 980 | {"is_mkldnn", (getter)THPVariable_is_mkldnn, nullptr, nullptr, nullptr}, | |||
| 981 | {"is_mlc", (getter)THPVariable_is_mlc, nullptr, nullptr, nullptr}, | |||
| 982 | {"is_vulkan", (getter)THPVariable_is_vulkan, nullptr, nullptr, nullptr}, | |||
| 983 | {"is_complex", (getter)THPVariable_is_complex, nullptr, nullptr, nullptr}, | |||
| 984 | {"is_quantized", (getter)THPVariable_is_quantized, nullptr, nullptr, nullptr}, | |||
| 985 | {"is_meta", (getter)THPVariable_is_meta, nullptr, nullptr, nullptr}, | |||
| 986 | {"dtype", (getter)THPVariable_dtype, nullptr, nullptr, nullptr}, | |||
| 987 | {"layout", (getter)THPVariable_layout, nullptr, nullptr, nullptr}, | |||
| 988 | {"device", (getter)THPVariable_device, nullptr, nullptr, nullptr}, | |||
| 989 | {"ndim", (getter)THPVariable_get_ndim, nullptr, nullptr, nullptr}, | |||
| 990 | {"names", (getter)THPVariable_get_names, (setter)THPVariable_set_names, nullptr, nullptr}, | |||
| 991 | {"real", (getter)THPVariable_get_real, (setter)THPVariable_set_real, nullptr, nullptr}, | |||
| 992 | {"imag", (getter)THPVariable_get_imag, (setter)THPVariable_set_imag, nullptr, nullptr}, | |||
| 993 | {nullptr} | |||
| 994 | }; | |||
| 995 | ||||
| 996 | // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) | |||
| 997 | static PyMappingMethods THPVariable_as_mapping = { | |||
| 998 | THPVariable_length, | |||
| 999 | THPVariable_getitem, | |||
| 1000 | THPVariable_setitem, | |||
| 1001 | }; | |||
| 1002 | ||||
| 1003 | // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables) | |||
| 1004 | static PyMethodDef extra_methods[] = { | |||
| 1005 | {"as_subclass", castPyCFunctionWithKeywords(THPVariable_as_subclass), | |||
| 1006 | METH_VARARGS0x0001 | METH_KEYWORDS0x0002, nullptr}, | |||
| 1007 | {"_make_subclass", castPyCFunctionWithKeywords(THPVariable_make_subclass), | |||
| 1008 | METH_STATIC0x0020 | METH_VARARGS0x0001 | METH_KEYWORDS0x0002, nullptr}, | |||
| 1009 | {"_fix_weakref", THPVariable_fix_weakref, | |||
| 1010 | METH_NOARGS0x0004, nullptr}, | |||
| 1011 | {nullptr} | |||
| 1012 | }; | |||
| 1013 | ||||
| 1014 | /* From https://github.com/python/cpython/blob/v3.7.0/Modules/xxsubtype.c | |||
| 1015 | If compiled as a shared library instead, some compilers don't allow addresses | |||
| 1016 | of Python objects defined in other libraries to be used in static | |||
| 1017 | initializers here. The DEFERRED_ADDRESS macro is used to tag the slots where | |||
| 1018 | such addresses appear; the module init function must fill in the tagged slots | |||
| 1019 | at runtime. The argument is for documentation -- the macro ignores it. | |||
| 1020 | */ | |||
| 1021 | #define DEFERRED_ADDRESS(ADDR)nullptr nullptr | |||
| 1022 | ||||
| 1023 | struct THPVariableMeta { | |||
| 1024 | PyHeapTypeObject base; | |||
| 1025 | }; | |||
| 1026 | ||||
| 1027 | int THPVariableMetaType_init(PyObject *cls, PyObject *args, PyObject *kwargs); | |||
| 1028 | ||||
| 1029 | // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) | |||
| 1030 | PyTypeObject THPVariableMetaType = { | |||
| 1031 | PyVarObject_HEAD_INIT(DEFERRED_ADDRESS(&PyType_Type), 0){ { 1, nullptr }, 0 }, | |||
| 1032 | "torch._C._TensorMeta", /* tp_name */ | |||
| 1033 | sizeof(THPVariableMeta), /* tp_basicsize */ | |||
| 1034 | 0, /* tp_itemsize */ | |||
| 1035 | nullptr, /* tp_dealloc */ | |||
| 1036 | // NOLINTNEXTLINE(modernize-use-nullptr) | |||
| 1037 | 0, /* tp_vectorcall_offset */ | |||
| 1038 | nullptr, /* tp_getattr */ | |||
| 1039 | nullptr, /* tp_setattr */ | |||
| 1040 | nullptr, /* tp_reserved */ | |||
| 1041 | nullptr, /* tp_repr */ | |||
| 1042 | nullptr, /* tp_as_number */ | |||
| 1043 | nullptr, /* tp_as_sequence */ | |||
| 1044 | nullptr, /* tp_as_mapping */ | |||
| 1045 | nullptr, /* tp_hash */ | |||
| 1046 | nullptr, /* tp_call */ | |||
| 1047 | nullptr, /* tp_str */ | |||
| 1048 | nullptr, /* tp_getattro */ | |||
| 1049 | nullptr, /* tp_setattro */ | |||
| 1050 | nullptr, /* tp_as_buffer */ | |||
| 1051 | Py_TPFLAGS_DEFAULT( 0 | (1UL << 18) | 0) | Py_TPFLAGS_BASETYPE(1UL << 10), /* tp_flags */ | |||
| 1052 | nullptr, /* tp_doc */ | |||
| 1053 | nullptr, /* tp_traverse */ | |||
| 1054 | nullptr, /* tp_clear */ | |||
| 1055 | nullptr, /* tp_richcompare */ | |||
| 1056 | 0, /* tp_weaklistoffset */ | |||
| 1057 | nullptr, /* tp_iter */ | |||
| 1058 | nullptr, /* tp_iternext */ | |||
| 1059 | nullptr, /* tp_methods */ | |||
| 1060 | nullptr, /* tp_members */ | |||
| 1061 | nullptr, /* tp_getset */ | |||
| 1062 | DEFERRED_ADDRESS(&PyType_Type)nullptr, /* tp_base */ | |||
| 1063 | nullptr, /* tp_dict */ | |||
| 1064 | nullptr, /* tp_descr_get */ | |||
| 1065 | nullptr, /* tp_descr_set */ | |||
| 1066 | 0, /* tp_dictoffset */ | |||
| 1067 | THPVariableMetaType_init, /* tp_init */ | |||
| 1068 | nullptr, /* tp_alloc */ | |||
| 1069 | nullptr, /* tp_new */ | |||
| 1070 | }; | |||
| 1071 | ||||
| 1072 | // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) | |||
| 1073 | PyTypeObject THPVariableType = { | |||
| 1074 | PyVarObject_HEAD_INIT({ { 1, &THPVariableMetaType }, 0 }, | |||
| 1075 | &THPVariableMetaType,{ { 1, &THPVariableMetaType }, 0 }, | |||
| 1076 | 0){ { 1, &THPVariableMetaType }, 0 }, "torch._C._TensorBase", /* tp_name */ | |||
| 1077 | sizeof(THPVariable), /* tp_basicsize */ | |||
| 1078 | 0, /* tp_itemsize */ | |||
| 1079 | // This is unspecified, because it is illegal to create a THPVariableType | |||
| 1080 | // directly. Subclasses will have their tp_dealloc set appropriately | |||
| 1081 | // by the metaclass | |||
| 1082 | nullptr, /* tp_dealloc */ | |||
| 1083 | // NOLINTNEXTLINE(modernize-use-nullptr) | |||
| 1084 | 0, /* tp_vectorcall_offset */ | |||
| 1085 | nullptr, /* tp_getattr */ | |||
| 1086 | nullptr, /* tp_setattr */ | |||
| 1087 | nullptr, /* tp_reserved */ | |||
| 1088 | nullptr, /* tp_repr */ | |||
| 1089 | nullptr, /* tp_as_number */ | |||
| 1090 | nullptr, /* tp_as_sequence */ | |||
| 1091 | &THPVariable_as_mapping, /* tp_as_mapping */ | |||
| 1092 | nullptr, /* tp_hash */ | |||
| 1093 | nullptr, /* tp_call */ | |||
| 1094 | nullptr, /* tp_str */ | |||
| 1095 | nullptr, /* tp_getattro */ | |||
| 1096 | nullptr, /* tp_setattro */ | |||
| 1097 | nullptr, /* tp_as_buffer */ | |||
| 1098 | Py_TPFLAGS_DEFAULT( 0 | (1UL << 18) | 0) | Py_TPFLAGS_BASETYPE(1UL << 10) | | |||
| 1099 | Py_TPFLAGS_HAVE_GC(1UL << 14), /* tp_flags */ | |||
| 1100 | nullptr, /* tp_doc */ | |||
| 1101 | // Also set by metaclass | |||
| 1102 | nullptr, /* tp_traverse */ | |||
| 1103 | (inquiry)THPVariable_clear, /* tp_clear */ | |||
| 1104 | nullptr, /* tp_richcompare */ | |||
| 1105 | 0, /* tp_weaklistoffset */ | |||
| 1106 | nullptr, /* tp_iter */ | |||
| 1107 | nullptr, /* tp_iternext */ | |||
| 1108 | nullptr, /* tp_methods */ | |||
| 1109 | nullptr, /* tp_members */ | |||
| 1110 | THPVariable_properties, /* tp_getset */ | |||
| 1111 | nullptr, /* tp_base */ | |||
| 1112 | nullptr, /* tp_dict */ | |||
| 1113 | nullptr, /* tp_descr_get */ | |||
| 1114 | nullptr, /* tp_descr_set */ | |||
| 1115 | 0, /* tp_dictoffset */ | |||
| 1116 | nullptr, /* tp_init */ | |||
| 1117 | nullptr, /* tp_alloc */ | |||
| 1118 | // Although new is provided here, it is illegal to call this with cls == | |||
| 1119 | // THPVariableMeta. Instead, subclass it first and then construct it | |||
| 1120 | THPVariable_pynew, /* tp_new */ | |||
| 1121 | }; | |||
| 1122 | ||||
| 1123 | PyObject *THPVariable_pynew(PyTypeObject *type, PyObject *args, PyObject *kwargs) | |||
| 1124 | { | |||
| 1125 | HANDLE_TH_ERRORStry { torch::PyWarningHandler __enforce_warning_buffer; try { | |||
| 1126 | TORCH_CHECK(type != &THPVariableType, "Cannot directly construct _TensorBase; subclass it and then construct that")if ((__builtin_expect(static_cast<bool>(!(type != & THPVariableType)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/autograd/python_variable.cpp", static_cast< uint32_t>(1126), (::c10::detail::torchCheckMsgImpl( "Expected " "type != &THPVariableType" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)" , "Cannot directly construct _TensorBase; subclass it and then construct that" ))); }; | |||
| 1127 | jit::tracer::warn("torch.Tensor", jit::tracer::WARN_CONSTRUCTOR); | |||
| 1128 | auto tensor = torch::utils::legacy_tensor_ctor(torch::tensors::get_default_dispatch_key(), torch::tensors::get_default_scalar_type(), args, kwargs); | |||
| 1129 | // WARNING: tensor is NOT guaranteed to be a fresh tensor; e.g., if it was | |||
| 1130 | // given a raw pointer that will refcount bump | |||
| 1131 | return THPVariable_NewWithVar( | |||
| 1132 | type, | |||
| 1133 | std::move(tensor), | |||
| 1134 | c10::impl::PyInterpreterStatus::MAYBE_UNINITIALIZED); | |||
| 1135 | END_HANDLE_TH_ERRORS} catch(...) { __enforce_warning_buffer.set_in_exception(); throw ; } } catch (python_error & e) { e.restore(); return nullptr ; } catch (const c10::IndexError& e) { auto msg = torch:: get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace (); PyErr_SetString(PyExc_IndexError, torch::processErrorMsg( msg)); return nullptr; } catch (const c10::ValueError& e) { auto msg = torch::get_cpp_stacktraces_enabled() ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_ValueError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::TypeError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_TypeError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::NotImplementedError& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_NotImplementedError , torch::processErrorMsg(msg)); return nullptr; } catch (const c10::Error& e) { auto msg = torch::get_cpp_stacktraces_enabled () ? e.what() : e.what_without_backtrace(); PyErr_SetString(PyExc_RuntimeError , torch::processErrorMsg(msg)); return nullptr; } catch (torch ::PyTorchError & e) { auto msg = torch::processErrorMsg(e .what()); PyErr_SetString(e.python_type(), msg); return nullptr ; } catch (const std::exception& e) { auto msg = torch::processErrorMsg (e.what()); PyErr_SetString(PyExc_RuntimeError, msg); return nullptr ; } | |||
| 1136 | } | |||
| 1137 | ||||
| 1138 | static void clear_slots(PyTypeObject* type, PyObject* self) { | |||
| 1139 | Py_ssize_t i, n; | |||
| 1140 | PyMemberDef* mp; | |||
| 1141 | ||||
| 1142 | n = Py_SIZE(type)(((PyVarObject*)(type))->ob_size); | |||
| 1143 | mp = PyHeapType_GET_MEMBERS((PyHeapTypeObject*)type)((PyMemberDef *)(((char *)(PyHeapTypeObject*)type) + (((PyObject *)((PyHeapTypeObject*)type))->ob_type)->tp_basicsize)); | |||
| 1144 | for (i = 0; i < n; i++, mp++) { | |||
| 1145 | if (mp->type == T_OBJECT_EX16 && !(mp->flags & READONLY1)) { | |||
| 1146 | char* addr = (char*)self + mp->offset; | |||
| 1147 | PyObject* obj = *(PyObject**)addr; | |||
| 1148 | if (obj != NULL__null) { | |||
| 1149 | *(PyObject**)addr = NULL__null; | |||
| 1150 | Py_DECREF(obj)_Py_DECREF(((PyObject*)(obj))); | |||
| 1151 | } | |||
| 1152 | } | |||
| 1153 | } | |||
| 1154 | } | |||
| 1155 | ||||
| 1156 | // NB: this is not the tp_dealloc on THPVariable; instead, its the dealloc | |||
| 1157 | // on subclasses. It's never valid to construct a THPVariable so it's not | |||
| 1158 | // necessary to implement the dealloc for that case | |||
| 1159 | void THPVariable_subclass_dealloc(PyObject* self) { | |||
| 1160 | if (THPVariable_tryResurrect((THPVariable*)self)) | |||
| 1161 | return; | |||
| 1162 | ||||
| 1163 | // This is like a crappy version of subtype_dealloc. | |||
| 1164 | // Unfortunately, we cannot directly delegate to | |||
| 1165 | // subtype_dealloc as it will start walking the parent | |||
| 1166 | // chain *starting with* the type of self, which will cause | |||
| 1167 | // us to go back to our custom dealloc. | |||
| 1168 | // | |||
| 1169 | // We have to replicate the subtype_dealloc logic to ensure | |||
| 1170 | // that finalizers are handled correctly | |||
| 1171 | PyTypeObject* type = Py_TYPE(self)(((PyObject*)(self))->ob_type); | |||
| 1172 | TORCH_INTERNAL_ASSERT(type->tp_flags & Py_TPFLAGS_HEAPTYPE)if ((__builtin_expect(static_cast<bool>(!(type->tp_flags & (1UL << 9))), 0))) { ::c10::detail::torchInternalAssertFail ( __func__, "../torch/csrc/autograd/python_variable.cpp", static_cast <uint32_t>(1172), "type->tp_flags & Py_TPFLAGS_HEAPTYPE" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/autograd/python_variable.cpp\"" ":" "1172" ", please report a bug to PyTorch. ", c10::str()) ; }; | |||
| 1173 | TORCH_INTERNAL_ASSERT(PyType_IS_GC(type), "GC types not implemented")if ((__builtin_expect(static_cast<bool>(!(((((type))-> tp_flags & ((1UL << 14))) != 0))), 0))) { ::c10::detail ::torchInternalAssertFail( __func__, "../torch/csrc/autograd/python_variable.cpp" , static_cast<uint32_t>(1173), "PyType_IS_GC(type)" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/autograd/python_variable.cpp\"" ":" "1173" ", please report a bug to PyTorch. " , c10::str("GC types not implemented")); }; | |||
| 1174 | ||||
| 1175 | PyObject_GC_UnTrack(self); | |||
| 1176 | // TODO: consider using trash can | |||
| 1177 | ||||
| 1178 | bool has_finalizer = type->tp_finalize || type->tp_del; | |||
| 1179 | ||||
| 1180 | if (type->tp_finalize) { | |||
| 1181 | PyObject_GC_Track(self); | |||
| 1182 | if (PyObject_CallFinalizerFromDealloc(self) < 0) { | |||
| 1183 | /* Resurrected */ | |||
| 1184 | return; | |||
| 1185 | } | |||
| 1186 | PyObject_GC_UnTrack(self); | |||
| 1187 | } | |||
| 1188 | ||||
| 1189 | // base test is unnecessary as THPVariable does not set this | |||
| 1190 | if (type->tp_weaklistoffset) { | |||
| 1191 | PyObject_ClearWeakRefs(self); | |||
| 1192 | } | |||
| 1193 | ||||
| 1194 | if (type->tp_del) { | |||
| 1195 | PyObject_GC_Track(self); | |||
| 1196 | type->tp_del(self); | |||
| 1197 | if (self->ob_refcnt > 0) { | |||
| 1198 | /* Resurrected */ | |||
| 1199 | return; | |||
| 1200 | } | |||
| 1201 | PyObject_GC_UnTrack(self); | |||
| 1202 | } | |||
| 1203 | ||||
| 1204 | if (has_finalizer) { | |||
| 1205 | /* New weakrefs could be created during the finalizer call. | |||
| 1206 | If this occurs, clear them out without calling their | |||
| 1207 | finalizers since they might rely on part of the object | |||
| 1208 | being finalized that has already been destroyed. */ | |||
| 1209 | if (type->tp_weaklistoffset) { | |||
| 1210 | /* Modeled after GET_WEAKREFS_LISTPTR() */ | |||
| 1211 | PyWeakReference** list = | |||
| 1212 | (PyWeakReference**)PyObject_GET_WEAKREFS_LISTPTR(self)((PyObject **) (((char *) (self)) + (((PyObject*)(self))-> ob_type)->tp_weaklistoffset)); | |||
| 1213 | while (*list) | |||
| 1214 | _PyWeakref_ClearRef(*list); | |||
| 1215 | } | |||
| 1216 | } | |||
| 1217 | ||||
| 1218 | // Clear all slots until we get to base class THPVariableType | |||
| 1219 | { | |||
| 1220 | PyTypeObject* base = type; | |||
| 1221 | while (base != &THPVariableType) { | |||
| 1222 | if (Py_SIZE(base)(((PyVarObject*)(base))->ob_size)) { | |||
| 1223 | clear_slots(base, self); | |||
| 1224 | } | |||
| 1225 | base = base->tp_base; | |||
| 1226 | TORCH_INTERNAL_ASSERT(base)if ((__builtin_expect(static_cast<bool>(!(base)), 0))) { ::c10::detail::torchInternalAssertFail( __func__, "../torch/csrc/autograd/python_variable.cpp" , static_cast<uint32_t>(1226), "base" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/autograd/python_variable.cpp\"" ":" "1226" ", please report a bug to PyTorch. " , c10::str()); }; | |||
| 1227 | } | |||
| 1228 | } | |||
| 1229 | ||||
| 1230 | // All Python defined classes have __dict__ | |||
| 1231 | if (C10_LIKELY(type->tp_dictoffset)(__builtin_expect(static_cast<bool>(type->tp_dictoffset ), 1))) { | |||
| 1232 | PyObject** dictptr = _PyObject_GetDictPtr(self); | |||
| 1233 | if (dictptr != NULL__null) { | |||
| 1234 | PyObject* dict = *dictptr; | |||
| 1235 | if (dict != NULL__null) { | |||
| 1236 | Py_DECREF(dict)_Py_DECREF(((PyObject*)(dict))); | |||
| 1237 | *dictptr = NULL__null; | |||
| 1238 | } | |||
| 1239 | } | |||
| 1240 | } | |||
| 1241 | ||||
| 1242 | // subtype_dealloc allows for this but we don't | |||
| 1243 | TORCH_INTERNAL_ASSERT(Py_TYPE(self) == type)if ((__builtin_expect(static_cast<bool>(!((((PyObject*) (self))->ob_type) == type)), 0))) { ::c10::detail::torchInternalAssertFail ( __func__, "../torch/csrc/autograd/python_variable.cpp", static_cast <uint32_t>(1243), "Py_TYPE(self) == type" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/autograd/python_variable.cpp\"" ":" "1243" ", please report a bug to PyTorch. " , c10::str()); }; | |||
| 1244 | ||||
| 1245 | // Finally clear out the base THPVariable | |||
| 1246 | THPVariable_clear((THPVariable*)self); | |||
| 1247 | ((THPVariable*)self)->cdata.~MaybeOwned<Variable>(); | |||
| 1248 | Py_TYPE(self)(((PyObject*)(self))->ob_type)->tp_free(self); | |||
| 1249 | ||||
| 1250 | // Python defined subclasses should always be on the heap | |||
| 1251 | TORCH_INTERNAL_ASSERT(type->tp_flags & Py_TPFLAGS_HEAPTYPE)if ((__builtin_expect(static_cast<bool>(!(type->tp_flags & (1UL << 9))), 0))) { ::c10::detail::torchInternalAssertFail ( __func__, "../torch/csrc/autograd/python_variable.cpp", static_cast <uint32_t>(1251), "type->tp_flags & Py_TPFLAGS_HEAPTYPE" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/autograd/python_variable.cpp\"" ":" "1251" ", please report a bug to PyTorch. ", c10::str()) ; }; | |||
| 1252 | Py_DECREF(type)_Py_DECREF(((PyObject*)(type))); | |||
| 1253 | } | |||
| 1254 | ||||
| 1255 | /// NOTE [ PyObject Traversal ] | |||
| 1256 | /// | |||
| 1257 | /// PyObjects that are wrapping c++ objects can lead to non-trivial traverse logic | |||
| 1258 | /// and it can be tricky to know what to traverse and when. This note tries to | |||
| 1259 | /// clarify what is the danger here and a simple algorithm to choose how to write | |||
| 1260 | /// the tp_traverse and tp_clear functions. | |||
| 1261 | /// If you're not already familiar with how the CPython GC works, you should read this | |||
| 1262 | /// in-depth description: https://devguide.python.org/garbage_collector/ | |||
| 1263 | /// | |||
| 1264 | /// The complexity for us comes from the fact that some c++ shared_ptr objects | |||
| 1265 | /// own references to python objects and are also owned both by other python objects | |||
| 1266 | /// and c++ objects. This means that to allow the GC to collect all cycles, we need to | |||
| 1267 | /// properly implement the traverse/clear methods that take into account these C++ | |||
| 1268 | /// ownership links. | |||
| 1269 | /// | |||
| 1270 | /// The main danger here comes from the fact that, while all python-related code is | |||
| 1271 | /// thread safe wrt the GC execution (thanks to the GIL), other threads might be using | |||
| 1272 | /// our C++ objects arbitrarily which can lead to shared_ptr ref count going up or down | |||
| 1273 | /// in between the different traverse/clear invocations. | |||
| 1274 | /// The one constraint we add here that is not explicitly mentioned in the GC description | |||
| 1275 | /// above is that for a given GC run (meaning while the GIL is held), the traverse/clear | |||
| 1276 | /// pair should never report different ownership relations: if traverse visited a given | |||
| 1277 | /// PyObject, then the clear within that same GC run must still be the sole owner and | |||
| 1278 | /// clear that PyObject. | |||
| 1279 | /// | |||
| 1280 | /// A more mechanical algorithm to know what to traverse/clear is as follows: | |||
| 1281 | /// - Any field on this PyObject that contains a strong reference to another PyObject | |||
| 1282 | /// must be visited and cleared. An example of that is the "backward_hooks" field of | |||
| 1283 | /// the THPVariable. | |||
| 1284 | /// - Any field that contains a C++ object that is uniquely owned by this PyObject (either | |||
| 1285 | /// a unique_ptr or a shared_ptr with use_count==1) should have all the PyObject it owns | |||
| 1286 | /// visited and cleared. An example would be here the tensor hooks. | |||
| 1287 | /// - If that uniquely owned C++ object also uniquely owns other C++ objects, these should be | |||
| 1288 | /// visited and cleared as well if they contain any PyObject. | |||
| 1289 | /// | |||
| 1290 | /// Caveat: to avoid slow runtime, we limit the depth of this exploration of C++ objects in | |||
| 1291 | /// practice and we do not, for example, go through the whole autograd graph, even if it is | |||
| 1292 | /// uniquely owned. This is a known place where users can create noncollectable cycles as described | |||
| 1293 | /// in: https://github.com/pytorch/pytorch/issues/7343 | |||
| 1294 | /// | |||
| 1295 | ||||
| 1296 | static int traverse_slots( | |||
| 1297 | PyTypeObject* type, | |||
| 1298 | PyObject* self, | |||
| 1299 | visitproc visit, | |||
| 1300 | void* arg) { | |||
| 1301 | Py_ssize_t i, n; | |||
| 1302 | PyMemberDef* mp; | |||
| 1303 | ||||
| 1304 | n = Py_SIZE(type)(((PyVarObject*)(type))->ob_size); | |||
| 1305 | mp = PyHeapType_GET_MEMBERS((PyHeapTypeObject*)type)((PyMemberDef *)(((char *)(PyHeapTypeObject*)type) + (((PyObject *)((PyHeapTypeObject*)type))->ob_type)->tp_basicsize)); | |||
| 1306 | for (i = 0; i < n; i++, mp++) { | |||
| 1307 | if (mp->type == T_OBJECT_EX16) { | |||
| 1308 | char* addr = (char*)self + mp->offset; | |||
| 1309 | PyObject* obj = *(PyObject**)addr; | |||
| 1310 | if (obj != NULL__null) { | |||
| 1311 | int err = visit(obj, arg); | |||
| 1312 | if (err) | |||
| 1313 | return err; | |||
| 1314 | } | |||
| 1315 | } | |||
| 1316 | } | |||
| 1317 | return 0; | |||
| 1318 | } | |||
| 1319 | ||||
| 1320 | static int THPVariable_subclass_traverse( | |||
| 1321 | PyObject* self, | |||
| 1322 | visitproc visit, | |||
| 1323 | void* arg) { | |||
| 1324 | // If the tensor is eligible to be resurrected, don't traverse it; instead | |||
| 1325 | // treat all of its references as a root (as they WOULD be a root since we | |||
| 1326 | // can treat the inbound C++ references as root owners). | |||
| 1327 | // | |||
| 1328 | // This works because unlike conventional GCs, Python's GC operates in two | |||
| 1329 | // phases: first it uses traverse to discover roots, and then it uses traverse | |||
| 1330 | // to do reachability. Bypassing traverse during root discovery forces Python | |||
| 1331 | // to treat self as a root for everything it refers to. For a full | |||
| 1332 | // explanation of the algorithm see | |||
| 1333 | // https://devguide.python.org/garbage_collector/ | |||
| 1334 | // | |||
| 1335 | // NB: if we don't hold an owning reference to the underlying Tensor, it is | |||
| 1336 | // possible that the underlying Tensor has already gone dead. In that case, | |||
| 1337 | // it's not safe to access it. But it's also safe to traverse, because if | |||
| 1338 | // the underlying Tensor *is* live, then root discovery will determine that | |||
| 1339 | // self is live, and nothing will get GC'ed anyway (resurrection cannot happen | |||
| 1340 | // if the C++ objects owns the PyObject) | |||
| 1341 | THPVariable* var = reinterpret_cast<THPVariable*>(self); | |||
| 1342 | if (!var->cdata.unsafeIsBorrowed()) { | |||
| 1343 | const auto& tensor = THPVariable_Unpack(self); | |||
| 1344 | if (tensor.defined() && tensor.use_count() > 1) | |||
| 1345 | return 0; | |||
| 1346 | } | |||
| 1347 | ||||
| 1348 | // Crappy version of subtype_traverse; same deal as | |||
| 1349 | // THPVariable_subclass_dealloc | |||
| 1350 | ||||
| 1351 | PyTypeObject* type = Py_TYPE(self)(((PyObject*)(self))->ob_type); | |||
| 1352 | // Traverse slots until we get to base class THPVariableType | |||
| 1353 | { | |||
| 1354 | PyTypeObject* base = type; | |||
| 1355 | while (base != &THPVariableType) { | |||
| 1356 | if (Py_SIZE(base)(((PyVarObject*)(base))->ob_size)) { | |||
| 1357 | int err = traverse_slots(base, self, visit, arg); | |||
| 1358 | if (err) | |||
| 1359 | return err; | |||
| 1360 | } | |||
| 1361 | base = base->tp_base; | |||
| 1362 | TORCH_INTERNAL_ASSERT(base)if ((__builtin_expect(static_cast<bool>(!(base)), 0))) { ::c10::detail::torchInternalAssertFail( __func__, "../torch/csrc/autograd/python_variable.cpp" , static_cast<uint32_t>(1362), "base" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/autograd/python_variable.cpp\"" ":" "1362" ", please report a bug to PyTorch. " , c10::str()); }; | |||
| 1363 | } | |||
| 1364 | } | |||
| 1365 | ||||
| 1366 | // All Python defined classes have __dict__ | |||
| 1367 | if (C10_LIKELY(type->tp_dictoffset)(__builtin_expect(static_cast<bool>(type->tp_dictoffset ), 1))) { | |||
| 1368 | PyObject** dictptr = _PyObject_GetDictPtr(self); | |||
| 1369 | if (dictptr && *dictptr) | |||
| 1370 | Py_VISIT(*dictptr)do { if (*dictptr) { int vret = visit(((PyObject*)(*dictptr)) , arg); if (vret) return vret; } } while (0); | |||
| 1371 | } | |||
| 1372 | ||||
| 1373 | TORCH_INTERNAL_ASSERT(type->tp_flags & Py_TPFLAGS_HEAPTYPE)if ((__builtin_expect(static_cast<bool>(!(type->tp_flags & (1UL << 9))), 0))) { ::c10::detail::torchInternalAssertFail ( __func__, "../torch/csrc/autograd/python_variable.cpp", static_cast <uint32_t>(1373), "type->tp_flags & Py_TPFLAGS_HEAPTYPE" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/autograd/python_variable.cpp\"" ":" "1373" ", please report a bug to PyTorch. ", c10::str()) ; }; | |||
| 1374 | Py_VISIT(type)do { if (type) { int vret = visit(((PyObject*)(type)), arg); if (vret) return vret; } } while (0); | |||
| 1375 | ||||
| 1376 | // Finally traverse THPVariable special stuff | |||
| 1377 | Py_VISIT(var->backward_hooks)do { if (var->backward_hooks) { int vret = visit(((PyObject *)(var->backward_hooks)), arg); if (vret) return vret; } } while (0); | |||
| 1378 | if (!var->cdata.unsafeIsBorrowed()) { | |||
| 1379 | const auto& tensor = THPVariable_Unpack(var); | |||
| 1380 | if (tensor.defined()) { | |||
| 1381 | // WARNING: The grad_fn traversal logic is very subtle, if you change this, | |||
| 1382 | // be very careful not to re-introduce this bug: | |||
| 1383 | // https://gist.github.com/zou3519/7ac92b84dd7d206dcc6eae55fee8372c | |||
| 1384 | ||||
| 1385 | // We ensure that we follow NOTE [ PyObject Traversal ] he by checking that this | |||
| 1386 | // python object is the sole owner of the underlying Tensor and that this Tensor | |||
| 1387 | // is the sole owner of its grad_fn. | |||
| 1388 | // In this case, the only way to get a new reference to the grad_fn is by using | |||
| 1389 | // this python object, which requires the GIL to be accessed. | |||
| 1390 | // Note that this is only valid as long as user don't share non-owning references | |||
| 1391 | // across different threads (which is crazy and should never be done). | |||
| 1392 | ||||
| 1393 | if (tensor.use_count() == 1) { | |||
| 1394 | auto autograd_meta = torch::autograd::impl::get_autograd_meta(tensor); | |||
| 1395 | if (autograd_meta) { | |||
| 1396 | // Do NOT call grad_fn() here as that might trigger a recompute | |||
| 1397 | const auto& grad_fn = autograd_meta->grad_fn_; | |||
| 1398 | if (grad_fn && grad_fn.use_count() == 1) { | |||
| 1399 | // All Node can have a pyobj (stored in "pyobj_") | |||
| 1400 | Py_VISIT(grad_fn->pyobj())do { if (grad_fn->pyobj()) { int vret = visit(((PyObject*) (grad_fn->pyobj())), arg); if (vret) return vret; } } while (0); | |||
| 1401 | // PyNode are special as they also have an "obj" field | |||
| 1402 | if (auto py_node_fn = dynamic_cast<PyNode*>(grad_fn.get())) { | |||
| 1403 | Py_VISIT(py_node_fn->obj)do { if (py_node_fn->obj) { int vret = visit(((PyObject*)( py_node_fn->obj)), arg); if (vret) return vret; } } while ( 0); | |||
| 1404 | } | |||
| 1405 | } | |||
| 1406 | } | |||
| 1407 | } | |||
| 1408 | ||||
| 1409 | for (const auto& hook : torch::autograd::impl::hooks(tensor)) { | |||
| 1410 | if (auto pyhook = dynamic_cast<PyFunctionPreHook*>(hook.get())) { | |||
| 1411 | Py_VISIT(pyhook->dict)do { if (pyhook->dict) { int vret = visit(((PyObject*)(pyhook ->dict)), arg); if (vret) return vret; } } while (0); | |||
| 1412 | } | |||
| 1413 | } | |||
| 1414 | } | |||
| 1415 | } | |||
| 1416 | ||||
| 1417 | return 0; | |||
| 1418 | } | |||
| 1419 | ||||
| 1420 | int THPVariableMetaType_init(PyObject *cls, PyObject *args, PyObject *kwargs) { | |||
| 1421 | if (PyType_Type.tp_init(cls, args, kwargs) < 0) { | |||
| 1422 | return -1; | |||
| 1423 | } | |||
| 1424 | ((PyTypeObject*)cls)->tp_dealloc = (destructor)THPVariable_subclass_dealloc; | |||
| 1425 | ((PyTypeObject*)cls)->tp_traverse = | |||
| 1426 | (traverseproc)THPVariable_subclass_traverse; | |||
| 1427 | return 0; | |||
| 1428 | } | |||
| 1429 | ||||
| 1430 | namespace torch { namespace autograd { | |||
| 1431 | ||||
| 1432 | // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables) | |||
| 1433 | extern PyMethodDef variable_methods[]; | |||
| 1434 | extern void initTorchFunctions(PyObject *module); | |||
| 1435 | ||||
| 1436 | void initTensorImplConversion(PyObject* module) { | |||
| 1437 | auto m = py::handle(module).cast<py::module>(); | |||
| 1438 | m.def("_wrap_tensor_impl", [](void* ptr) { | |||
| 1439 | auto p = c10::intrusive_ptr<c10::TensorImpl, at::UndefinedTensorImpl>:: | |||
| 1440 | unsafe_reclaim_from_nonowning(static_cast<c10::TensorImpl*>(ptr)); | |||
| 1441 | TORCH_CHECK(p.defined(), "Can't wrap undefined tensor")if ((__builtin_expect(static_cast<bool>(!(p.defined())) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/autograd/python_variable.cpp" , static_cast<uint32_t>(1441), (::c10::detail::torchCheckMsgImpl ( "Expected " "p.defined()" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Can't wrap undefined tensor" ))); }; | |||
| 1442 | auto tensor = at::Tensor::wrap_tensor_impl(std::move(p)); | |||
| 1443 | // NOLINTNEXTLINE(performance-move-const-arg) | |||
| 1444 | return py::cast(std::move(tensor)); | |||
| 1445 | }); | |||
| 1446 | // set on the module level to avoid mixing pybind and plain CPython extensions | |||
| 1447 | m.def("_tensor_impl_raw_handle", [](torch::autograd::Variable* t) -> void* { | |||
| 1448 | // We return a raw non-owning pointer here, we rely on surrounding | |||
| 1449 | // code to keep the original tensor alive | |||
| 1450 | return t->getIntrusivePtr().get(); | |||
| 1451 | }); | |||
| 1452 | } | |||
| 1453 | }} | |||
| 1454 | ||||
| 1455 | bool THPVariable_initModule(PyObject *module) | |||
| 1456 | { | |||
| 1457 | THPVariableMetaType.tp_base = &PyType_Type; | |||
| 1458 | if (PyType_Ready(&THPVariableMetaType) < 0) | |||
| 1459 | return false; | |||
| 1460 | Py_INCREF(&THPVariableMetaType)_Py_INCREF(((PyObject*)(&THPVariableMetaType))); | |||
| 1461 | PyModule_AddObject(module, "_TensorMeta", (PyObject *)&THPVariableMetaType); | |||
| 1462 | ||||
| 1463 | static std::vector<PyMethodDef> methods; | |||
| 1464 | THPUtils_addPyMethodDefs(methods, torch::autograd::variable_methods); | |||
| 1465 | THPUtils_addPyMethodDefs(methods, extra_methods); | |||
| 1466 | THPVariableType.tp_methods = methods.data(); | |||
| 1467 | if (PyType_Ready(&THPVariableType) < 0) | |||
| 1468 | return false; | |||
| 1469 | Py_INCREF(&THPVariableType)_Py_INCREF(((PyObject*)(&THPVariableType))); | |||
| 1470 | PyModule_AddObject(module, "_TensorBase", (PyObject *)&THPVariableType); | |||
| 1471 | torch::autograd::initTorchFunctions(module); | |||
| 1472 | torch::autograd::initTensorImplConversion(module); | |||
| 1473 | return true; | |||
| 1474 | } | |||
| 1475 | ||||
| 1476 | namespace { | |||
| 1477 | ||||
| 1478 | bool isPythonTensor(const Tensor& tensor) { | |||
| 1479 | return tensor.unsafeGetTensorImpl()->key_set().has(c10::DispatchKey::Python); | |||
| 1480 | } | |||
| 1481 | ||||
| 1482 | void concrete_dispatch_fn(const c10::impl::PyInterpreter*, const c10::OperatorHandle& op, torch::jit::Stack* stack) { | |||
| 1483 | const auto& schema = op.schema(); | |||
| 1484 | const auto num_returns = schema.returns().size(); | |||
| 1485 | ||||
| 1486 | const auto num_arguments = schema.arguments().size(); | |||
| 1487 | auto arguments = torch::jit::pop(*stack, num_arguments); | |||
| 1488 | ||||
| 1489 | // Parse the name into namespace and name (no overload_name) | |||
| 1490 | // TODO: put this into the library | |||
| 1491 | const auto& qualified_name = op.operator_name().name; | |||
| 1492 | auto pos = qualified_name.find("::"); | |||
| 1493 | TORCH_INTERNAL_ASSERT(pos != std::string::npos, qualified_name)if ((__builtin_expect(static_cast<bool>(!(pos != std::string ::npos)), 0))) { ::c10::detail::torchInternalAssertFail( __func__ , "../torch/csrc/autograd/python_variable.cpp", static_cast< uint32_t>(1493), "pos != std::string::npos" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/autograd/python_variable.cpp\"" ":" "1493" ", please report a bug to PyTorch. " , c10::str(qualified_name)); }; | |||
| 1494 | // Make me some null terminated strings | |||
| 1495 | std::string ns_str = qualified_name.substr(0, pos); | |||
| 1496 | const char* ns = ns_str.c_str(); | |||
| 1497 | const char* func_name = qualified_name.c_str() + pos + strlen("::"); | |||
| 1498 | ||||
| 1499 | // The plan: convert all the arguments back into PyObjects, | |||
| 1500 | // extracting out the tensor handles, then call | |||
| 1501 | // handle_torch_function_no_python_arg_parser | |||
| 1502 | // NB: at the point arguments are pushed to the stack, ALL defaults | |||
| 1503 | // are already present | |||
| 1504 | ||||
| 1505 | py::gil_scoped_acquire g; | |||
| 1506 | ||||
| 1507 | std::vector<py::handle> overloaded_args; | |||
| 1508 | auto args = py::reinterpret_steal<py::object>(PyTuple_New(num_arguments)); | |||
| 1509 | // TODO: actually populate kwargs sometimes? At the moment, every argument | |||
| 1510 | // just gets passed positionally | |||
| 1511 | py::dict kwargs; | |||
| 1512 | // For now, overloads get coalesced. Might be easier for users if they get | |||
| 1513 | // overload resolution but is more complicated (need to expose separate | |||
| 1514 | // functions per overload) | |||
| 1515 | py::handle torch_api_function = py::module::import("torch").attr("ops").attr(ns).attr(func_name); | |||
| 1516 | std::string module_name_str = "torch.ops." + ns_str; | |||
| 1517 | ||||
| 1518 | for (int64_t idx = 0; idx < arguments.size(); idx++) { | |||
| 1519 | auto& ivalue = arguments[idx]; | |||
| 1520 | // Search for Tensors (as they may have the torch functions we need) | |||
| 1521 | if (ivalue.isTensor()) { | |||
| 1522 | const auto& tensor = ivalue.toTensor(); | |||
| 1523 | if (isPythonTensor(tensor)) { | |||
| 1524 | overloaded_args.emplace_back(py::cast(tensor)); | |||
| 1525 | } | |||
| 1526 | } else if (ivalue.isList()) { | |||
| 1527 | const auto& list = ivalue.toListRef(); | |||
| 1528 | for (int64_t jdx = 0; jdx < list.size(); jdx++) { | |||
| 1529 | const auto& nv = list[jdx]; | |||
| 1530 | if (nv.isTensor()) { | |||
| 1531 | const auto& tensor = nv.toTensor(); | |||
| 1532 | if (isPythonTensor(tensor)) { | |||
| 1533 | overloaded_args.emplace_back(py::cast(tensor)); | |||
| 1534 | } | |||
| 1535 | } | |||
| 1536 | } | |||
| 1537 | } | |||
| 1538 | PyTuple_SET_ITEM(args.ptr(), idx, torch::jit::toPyObject(std::move(ivalue)).release().ptr())PyTuple_SetItem(args.ptr(), idx, torch::jit::toPyObject(std:: move(ivalue)).release().ptr()); | |||
| 1539 | } | |||
| 1540 | ||||
| 1541 | auto out = py::reinterpret_steal<py::object>(handle_torch_function_no_python_arg_parser( | |||
| 1542 | overloaded_args, | |||
| 1543 | args.ptr(), | |||
| 1544 | kwargs.ptr(), | |||
| 1545 | func_name, | |||
| 1546 | torch_api_function.ptr(), | |||
| 1547 | module_name_str.c_str(), | |||
| 1548 | "__torch_dispatch__" | |||
| 1549 | )); | |||
| 1550 | ||||
| 1551 | if (op.schema().returns().size() == 1) { | |||
| 1552 | torch::jit::push(stack, torch::jit::toIValue(out.ptr(), op.schema().returns()[0].type())); | |||
| 1553 | } else { | |||
| 1554 | auto outs = py::cast<py::sequence>(out); | |||
| 1555 | for (unsigned idx = 0; idx < outs.size(); idx++) { | |||
| 1556 | torch::jit::push(stack, torch::jit::toIValue(outs[idx].ptr(), op.schema().returns()[idx].type())); | |||
| 1557 | } | |||
| 1558 | } | |||
| 1559 | } | |||
| 1560 | ||||
| 1561 | c10::intrusive_ptr<TensorImpl> concrete_detach_fn(const c10::impl::PyInterpreter*, const c10::TensorImpl* self) { | |||
| 1562 | pybind11::gil_scoped_acquire gil; | |||
| 1563 | ||||
| 1564 | // Setup the arguments expected for the detach call | |||
| 1565 | std::vector<py::handle> overloaded_args; | |||
| 1566 | // TODO: there should be a shorter way to spell this | |||
| 1567 | // TODO: fix the constness of target | |||
| 1568 | Tensor self_t = Tensor(c10::intrusive_ptr<c10::TensorImpl, c10::UndefinedTensorImpl>::unsafe_reclaim_from_nonowning(const_cast<c10::TensorImpl*>(self))); | |||
| 1569 | auto self_p = py::reinterpret_steal<py::object>(THPVariable_Wrap(self_t)); | |||
| 1570 | overloaded_args.emplace_back(self_p); | |||
| 1571 | auto args = py::reinterpret_steal<py::object>(PyTuple_New(1)); | |||
| 1572 | PyTuple_SET_ITEM(args.ptr(), 0, self_p.release().ptr())PyTuple_SetItem(args.ptr(), 0, self_p.release().ptr()); | |||
| 1573 | ||||
| 1574 | py::dict kwargs; | |||
| 1575 | ||||
| 1576 | auto out = py::reinterpret_steal<py::object>(handle_torch_function_no_python_arg_parser( | |||
| 1577 | overloaded_args, | |||
| 1578 | args.ptr(), | |||
| 1579 | kwargs.ptr(), | |||
| 1580 | "detach", | |||
| 1581 | py::module::import("torch").attr("ops").attr("aten").attr("detach").ptr(), | |||
| 1582 | "torch.ops.aten", | |||
| 1583 | "__torch_dispatch__" | |||
| 1584 | )); | |||
| 1585 | ||||
| 1586 | TORCH_CHECK(THPVariable_Check(out.ptr()), "detach returned invalid type ", py::detail::get_fully_qualified_tp_name(Py_TYPE(out.ptr())), ", expected Tensor")if ((__builtin_expect(static_cast<bool>(!(THPVariable_Check (out.ptr()))), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/autograd/python_variable.cpp", static_cast< uint32_t>(1586), (::c10::detail::torchCheckMsgImpl( "Expected " "THPVariable_Check(out.ptr())" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)" , "detach returned invalid type ", py::detail::get_fully_qualified_tp_name ((((PyObject*)(out.ptr()))->ob_type)), ", expected Tensor" ))); }; | |||
| 1587 | const Tensor& res_t = THPVariable_Unpack(out.ptr()); | |||
| 1588 | return res_t.getIntrusivePtr(); | |||
| 1589 | } | |||
| 1590 | ||||
| 1591 | } // anonymous namespace |
| 1 | #ifndef PyTuple_New |
| 2 | struct _object; |
| 3 | typedef struct _object PyObject; |
| 4 | PyObject* clang_analyzer_PyObject_New_Reference(); |
| 5 | PyObject* PyTuple_New(Py_ssize_t len) { |
| 6 | return clang_analyzer_PyObject_New_Reference(); |
| 7 | } |
| 8 | #else |
| 9 | #warning "API PyTuple_New is defined as a macro." |
| 10 | #endif |