| File: | build/../torch/csrc/utils/tensor_numpy.cpp |
| Warning: | line 99, column 30 PyObject ownership leak with reference count of 1 |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | #include <torch/csrc/THP.h> | |||
| 2 | #include <torch/csrc/utils/tensor_numpy.h> | |||
| 3 | #define WITH_NUMPY_IMPORT_ARRAY | |||
| 4 | #include <torch/csrc/utils/numpy_stub.h> | |||
| 5 | #include <c10/util/irange.h> | |||
| 6 | ||||
| 7 | #ifndef USE_NUMPY1 | |||
| 8 | namespace torch { namespace utils { | |||
| 9 | PyObject* tensor_to_numpy(const at::Tensor& tensor) { | |||
| 10 | throw std::runtime_error("PyTorch was compiled without NumPy support"); | |||
| 11 | } | |||
| 12 | at::Tensor tensor_from_numpy(PyObject* obj, bool warn_if_not_writeable/*=true*/) { | |||
| 13 | throw std::runtime_error("PyTorch was compiled without NumPy support"); | |||
| 14 | } | |||
| 15 | ||||
| 16 | bool is_numpy_available() { | |||
| 17 | throw std::runtime_error("PyTorch was compiled without NumPy support"); | |||
| 18 | } | |||
| 19 | ||||
| 20 | bool is_numpy_int(PyObject* obj) { | |||
| 21 | throw std::runtime_error("PyTorch was compiled without NumPy support"); | |||
| 22 | } | |||
| 23 | bool is_numpy_scalar(PyObject* obj) { | |||
| 24 | throw std::runtime_error("PyTorch was compiled without NumPy support"); | |||
| 25 | } | |||
| 26 | at::Tensor tensor_from_cuda_array_interface(PyObject* obj) { | |||
| 27 | throw std::runtime_error("PyTorch was compiled without NumPy support"); | |||
| 28 | } | |||
| 29 | }} | |||
| 30 | #else | |||
| 31 | ||||
| 32 | #include <torch/csrc/DynamicTypes.h> | |||
| 33 | #include <torch/csrc/Exceptions.h> | |||
| 34 | #include <torch/csrc/autograd/python_variable.h> | |||
| 35 | #include <torch/csrc/utils/object_ptr.h> | |||
| 36 | ||||
| 37 | #include <ATen/ATen.h> | |||
| 38 | #include <ATen/TensorUtils.h> | |||
| 39 | #include <memory> | |||
| 40 | #include <sstream> | |||
| 41 | #include <stdexcept> | |||
| 42 | ||||
| 43 | using namespace at; | |||
| 44 | using namespace torch::autograd; | |||
| 45 | ||||
| 46 | namespace torch { namespace utils { | |||
| 47 | ||||
| 48 | bool is_numpy_available() { | |||
| 49 | static bool available = []() { | |||
| 50 | if (_import_array() >= 0) { | |||
| 51 | return true; | |||
| 52 | } | |||
| 53 | // Try to get exception message, print warning and return false | |||
| 54 | std::string message = "Failed to initialize NumPy"; | |||
| 55 | // NOLINTNEXTLINE(cppcoreguidelines-init-variables) | |||
| 56 | PyObject *type, *value, *traceback; | |||
| 57 | PyErr_Fetch(&type, &value, &traceback); | |||
| 58 | if (auto str = value ? PyObject_Str(value) : nullptr) { | |||
| 59 | if (auto enc_str = PyUnicode_AsEncodedString(str, "utf-8", "strict")) { | |||
| 60 | if (auto byte_str = PyBytes_AS_STRING(enc_str)((((PyBytesObject *)(enc_str))->ob_sval))) { | |||
| 61 | message += ": " + std::string(byte_str); | |||
| 62 | } | |||
| 63 | Py_XDECREF(enc_str)_Py_XDECREF(((PyObject*)(enc_str))); | |||
| 64 | } | |||
| 65 | Py_XDECREF(str)_Py_XDECREF(((PyObject*)(str))); | |||
| 66 | } | |||
| 67 | PyErr_Clear(); | |||
| 68 | TORCH_WARN(message)::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp" , static_cast<uint32_t>(68)}, ::c10::str(message), false ); | |||
| 69 | return false; | |||
| 70 | }(); | |||
| 71 | return available; | |||
| 72 | } | |||
| 73 | static std::vector<npy_intp> to_numpy_shape(IntArrayRef x) { | |||
| 74 | // shape and stride conversion from int64_t to npy_intp | |||
| 75 | auto nelem = x.size(); | |||
| 76 | auto result = std::vector<npy_intp>(nelem); | |||
| 77 | for(const auto i : c10::irange(nelem)) { | |||
| 78 | result[i] = static_cast<npy_intp>(x[i]); | |||
| 79 | } | |||
| 80 | return result; | |||
| 81 | } | |||
| 82 | ||||
| 83 | static std::vector<int64_t> to_aten_shape(int ndim, npy_intp* values) { | |||
| 84 | // shape and stride conversion from npy_intp to int64_t | |||
| 85 | auto result = std::vector<int64_t>(ndim); | |||
| 86 | for(const auto i : c10::irange(ndim)) { | |||
| 87 | result[i] = static_cast<int64_t>(values[i]); | |||
| 88 | } | |||
| 89 | return result; | |||
| 90 | } | |||
| 91 | ||||
| 92 | static std::vector<int64_t> seq_to_aten_shape(PyObject *py_seq) { | |||
| 93 | int ndim = PySequence_LengthPySequence_Size(py_seq); | |||
| 94 | if (ndim == -1) { | |||
| 95 | throw TypeError("shape and strides must be sequences"); | |||
| 96 | } | |||
| 97 | auto result = std::vector<int64_t>(ndim); | |||
| 98 | for(const auto i : c10::irange(ndim)) { | |||
| 99 | auto item = THPObjectPtr(PySequence_GetItem(py_seq, i)); | |||
| ||||
| 100 | if (!item) throw python_error(); | |||
| 101 | ||||
| 102 | result[i] = PyLong_AsLongLong(item); | |||
| 103 | if (result[i] == -1 && PyErr_Occurred()) throw python_error(); | |||
| 104 | } | |||
| 105 | return result; | |||
| 106 | } | |||
| 107 | ||||
| 108 | PyObject* tensor_to_numpy(const at::Tensor& tensor) { | |||
| 109 | if (!is_numpy_available()) { | |||
| 110 | throw std::runtime_error("Numpy is not available"); | |||
| 111 | } | |||
| 112 | if (tensor.device().type() != DeviceType::CPU) { | |||
| 113 | throw TypeError( | |||
| 114 | "can't convert %s device type tensor to numpy. Use Tensor.cpu() to " | |||
| 115 | "copy the tensor to host memory first.", tensor.device().str().c_str()); | |||
| 116 | } | |||
| 117 | if (tensor.layout() != Layout::Strided) { | |||
| 118 | throw TypeError( | |||
| 119 | "can't convert %s layout tensor to numpy." | |||
| 120 | "convert the tensor to a strided layout first.", c10::str(tensor.layout()).c_str()); | |||
| 121 | } | |||
| 122 | if (at::GradMode::is_enabled() && tensor.requires_grad()) { | |||
| 123 | throw std::runtime_error( | |||
| 124 | "Can't call numpy() on Tensor that requires grad. " | |||
| 125 | "Use tensor.detach().numpy() instead."); | |||
| 126 | } | |||
| 127 | auto dtype = aten_to_numpy_dtype(tensor.scalar_type()); | |||
| 128 | auto sizes = to_numpy_shape(tensor.sizes()); | |||
| 129 | auto strides = to_numpy_shape(tensor.strides()); | |||
| 130 | // NumPy strides use bytes. Torch strides use element counts. | |||
| 131 | auto element_size_in_bytes = tensor.element_size(); | |||
| 132 | for (auto& stride : strides) { | |||
| 133 | stride *= element_size_in_bytes; | |||
| 134 | } | |||
| 135 | ||||
| 136 | auto array = THPObjectPtr(PyArray_New(*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *)) __numpy_array_api [93])( | |||
| 137 | &PyArray_Type(*(PyTypeObject *)__numpy_array_api[2]), | |||
| 138 | tensor.dim(), | |||
| 139 | sizes.data(), | |||
| 140 | dtype, | |||
| 141 | strides.data(), | |||
| 142 | tensor.data_ptr(), | |||
| 143 | 0, | |||
| 144 | NPY_ARRAY_ALIGNED0x0100 | NPY_ARRAY_WRITEABLE0x0400, | |||
| 145 | nullptr)); | |||
| 146 | if (!array) return nullptr; | |||
| 147 | ||||
| 148 | // TODO: This attempts to keep the underlying memory alive by setting the base | |||
| 149 | // object of the ndarray to the tensor and disabling resizes on the storage. | |||
| 150 | // This is not sufficient. For example, the tensor's storage may be changed | |||
| 151 | // via Tensor.set_, which can free the underlying memory. | |||
| 152 | PyObject* py_tensor = THPVariable_Wrap(tensor); | |||
| 153 | if (!py_tensor) throw python_error(); | |||
| 154 | if (PyArray_SetBaseObject(*(int (*)(PyArrayObject *, PyObject *)) __numpy_array_api[282 ])((PyArrayObject*)array.get(), py_tensor) == -1) { | |||
| 155 | return nullptr; | |||
| 156 | } | |||
| 157 | // Use the private storage API | |||
| 158 | tensor.storage().unsafeGetStorageImpl()->set_resizable(false); | |||
| 159 | ||||
| 160 | return array.release(); | |||
| 161 | } | |||
| 162 | ||||
| 163 | at::Tensor tensor_from_numpy(PyObject* obj, bool warn_if_not_writeable/*=true*/) { | |||
| 164 | if (!is_numpy_available()) { | |||
| 165 | throw std::runtime_error("Numpy is not available"); | |||
| 166 | } | |||
| 167 | if (!PyArray_Check(obj)((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject *) __numpy_array_api[2])) || PyType_IsSubtype((((PyObject*)(obj) )->ob_type), (&(*(PyTypeObject *)__numpy_array_api[2]) )))) { | |||
| 168 | throw TypeError("expected np.ndarray (got %s)", Py_TYPE(obj)(((PyObject*)(obj))->ob_type)->tp_name); | |||
| 169 | } | |||
| 170 | auto array = (PyArrayObject*)obj; | |||
| 171 | ||||
| 172 | // warn_if_not_writable is true when a copy of numpy variable is created. | |||
| 173 | // the warning is suppressed when a copy is being created. | |||
| 174 | if (!PyArray_ISWRITEABLE(array)PyArray_CHKFLAGS((array), 0x0400) && warn_if_not_writeable) { | |||
| 175 | TORCH_WARN_ONCE(if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast <uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); } else { __attribute__((__unused__)) static const auto torch_warn_once_3 = [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp" , static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); return true ; }(); } | |||
| 176 | "The given NumPy array is not writeable, and PyTorch does "if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast <uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); } else { __attribute__((__unused__)) static const auto torch_warn_once_3 = [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp" , static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); return true ; }(); } | |||
| 177 | "not support non-writeable tensors. This means you can write to the "if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast <uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); } else { __attribute__((__unused__)) static const auto torch_warn_once_3 = [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp" , static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); return true ; }(); } | |||
| 178 | "underlying (supposedly non-writeable) NumPy array using the tensor. "if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast <uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); } else { __attribute__((__unused__)) static const auto torch_warn_once_3 = [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp" , static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); return true ; }(); } | |||
| 179 | "You may want to copy the array to protect its data or make it writeable "if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast <uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); } else { __attribute__((__unused__)) static const auto torch_warn_once_3 = [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp" , static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); return true ; }(); } | |||
| 180 | "before converting it to a tensor. This type of warning will be "if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast <uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); } else { __attribute__((__unused__)) static const auto torch_warn_once_3 = [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp" , static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); return true ; }(); } | |||
| 181 | "suppressed for the rest of this program.")if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast <uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); } else { __attribute__((__unused__)) static const auto torch_warn_once_3 = [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp" , static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does " "not support non-writeable tensors. This means you can write to the " "underlying (supposedly non-writeable) NumPy array using the tensor. " "You may want to copy the array to protect its data or make it writeable " "before converting it to a tensor. This type of warning will be " "suppressed for the rest of this program."), false); return true ; }(); }; | |||
| 182 | ||||
| 183 | } | |||
| 184 | ||||
| 185 | int ndim = PyArray_NDIM(array); | |||
| 186 | auto sizes = to_aten_shape(ndim, PyArray_DIMS(array)); | |||
| 187 | auto strides = to_aten_shape(ndim, PyArray_STRIDES(array)); | |||
| 188 | // NumPy strides use bytes. Torch strides use element counts. | |||
| 189 | auto element_size_in_bytes = PyArray_ITEMSIZE(array); | |||
| 190 | for (auto& stride : strides) { | |||
| 191 | if (stride%element_size_in_bytes != 0) { | |||
| 192 | throw ValueError( | |||
| 193 | "given numpy array strides not a multiple of the element byte size. " | |||
| 194 | "Copy the numpy array to reallocate the memory."); | |||
| 195 | } | |||
| 196 | stride /= element_size_in_bytes; | |||
| 197 | } | |||
| 198 | ||||
| 199 | size_t storage_size = 1; | |||
| 200 | for(const auto i : c10::irange(ndim)) { | |||
| 201 | if (strides[i] < 0) { | |||
| 202 | throw ValueError( | |||
| 203 | "At least one stride in the given numpy array is negative, " | |||
| 204 | "and tensors with negative strides are not currently supported. " | |||
| 205 | "(You can probably work around this by making a copy of your array " | |||
| 206 | " with array.copy().) "); | |||
| 207 | } | |||
| 208 | // XXX: this won't work for negative strides | |||
| 209 | storage_size += (sizes[i] - 1) * strides[i]; | |||
| 210 | } | |||
| 211 | ||||
| 212 | void* data_ptr = PyArray_DATA(array); | |||
| 213 | if (!PyArray_EquivByteorders(PyArray_DESCR(array)->byteorder, NPY_NATIVE)(((PyArray_DESCR(array)->byteorder) == ('=')) || (((PyArray_DESCR (array)->byteorder) != '>') == (('=') != '>')))) { | |||
| 214 | throw ValueError( | |||
| 215 | "given numpy array has byte order different from the native byte order. " | |||
| 216 | "Conversion between byte orders is currently not supported."); | |||
| 217 | } | |||
| 218 | Py_INCREF(obj)_Py_INCREF(((PyObject*)(obj))); | |||
| 219 | return at::from_blob( | |||
| 220 | data_ptr, | |||
| 221 | sizes, | |||
| 222 | strides, | |||
| 223 | [obj](void* data) { | |||
| 224 | pybind11::gil_scoped_acquire gil; | |||
| 225 | Py_DECREF(obj)_Py_DECREF(((PyObject*)(obj))); | |||
| 226 | }, | |||
| 227 | at::device(kCPU).dtype(numpy_dtype_to_aten(PyArray_TYPE(array))) | |||
| 228 | ); | |||
| 229 | } | |||
| 230 | ||||
| 231 | int aten_to_numpy_dtype(const ScalarType scalar_type) { | |||
| 232 | switch (scalar_type) { | |||
| 233 | case kDouble: return NPY_DOUBLE; | |||
| 234 | case kFloat: return NPY_FLOAT; | |||
| 235 | case kHalf: return NPY_HALF; | |||
| 236 | case kComplexDouble: return NPY_COMPLEX128NPY_CDOUBLE; | |||
| 237 | case kComplexFloat: return NPY_COMPLEX64NPY_CFLOAT; | |||
| 238 | case kLong: return NPY_INT64NPY_LONG; | |||
| 239 | case kInt: return NPY_INT32NPY_INT; | |||
| 240 | case kShort: return NPY_INT16NPY_SHORT; | |||
| 241 | case kChar: return NPY_INT8NPY_BYTE; | |||
| 242 | case kByte: return NPY_UINT8NPY_UBYTE; | |||
| 243 | case kBool: return NPY_BOOL; | |||
| 244 | default: | |||
| 245 | throw TypeError("Got unsupported ScalarType %s", toString(scalar_type)); | |||
| 246 | } | |||
| 247 | } | |||
| 248 | ||||
| 249 | ScalarType numpy_dtype_to_aten(int dtype) { | |||
| 250 | switch (dtype) { | |||
| 251 | case NPY_DOUBLE: return kDouble; | |||
| 252 | case NPY_FLOAT: return kFloat; | |||
| 253 | case NPY_HALF: return kHalf; | |||
| 254 | case NPY_COMPLEX64NPY_CFLOAT: return kComplexFloat; | |||
| 255 | case NPY_COMPLEX128NPY_CDOUBLE: return kComplexDouble; | |||
| 256 | case NPY_INT16NPY_SHORT: return kShort; | |||
| 257 | case NPY_INT8NPY_BYTE: return kChar; | |||
| 258 | case NPY_UINT8NPY_UBYTE: return kByte; | |||
| 259 | case NPY_BOOL: return kBool; | |||
| 260 | default: | |||
| 261 | // Workaround: MSVC does not support two switch cases that have the same value | |||
| 262 | if (dtype == NPY_INT || dtype == NPY_INT32NPY_INT) { | |||
| 263 | // To cover all cases we must use NPY_INT because | |||
| 264 | // NPY_INT32 is an alias which maybe equal to: | |||
| 265 | // - NPY_INT, when sizeof(int) = 4 and sizeof(long) = 8 | |||
| 266 | // - NPY_LONG, when sizeof(int) = 4 and sizeof(long) = 4 | |||
| 267 | return kInt; | |||
| 268 | } else if (dtype == NPY_LONGLONG || dtype == NPY_INT64NPY_LONG) { | |||
| 269 | // NPY_INT64 is an alias which maybe equal to: | |||
| 270 | // - NPY_LONG, when sizeof(long) = 8 and sizeof(long long) = 8 | |||
| 271 | // - NPY_LONGLONG, when sizeof(long) = 4 and sizeof(long long) = 8 | |||
| 272 | return kLong; | |||
| 273 | } else { | |||
| 274 | break; // break as if this is one of the cases above because this is only a workaround | |||
| 275 | } | |||
| 276 | } | |||
| 277 | auto pytype = THPObjectPtr(PyArray_TypeObjectFromType(*(PyObject * (*)(int)) __numpy_array_api[46])(dtype)); | |||
| 278 | if (!pytype) throw python_error(); | |||
| 279 | throw TypeError( | |||
| 280 | "can't convert np.ndarray of type %s. The only supported types are: " | |||
| 281 | "float64, float32, float16, complex64, complex128, int64, int32, int16, int8, uint8, and bool.", | |||
| 282 | ((PyTypeObject*)pytype.get())->tp_name); | |||
| 283 | } | |||
| 284 | ||||
| 285 | bool is_numpy_int(PyObject* obj) { | |||
| 286 | return is_numpy_available() && PyArray_IsScalar((obj), Integer)(((((PyObject*)((obj)))->ob_type) == (&(*(PyTypeObject *)__numpy_array_api[12])) || PyType_IsSubtype((((PyObject*)( (obj)))->ob_type), (&(*(PyTypeObject *)__numpy_array_api [12]))))); | |||
| 287 | } | |||
| 288 | ||||
| 289 | bool is_numpy_scalar(PyObject* obj) { | |||
| 290 | return is_numpy_available() && (is_numpy_int(obj) || PyArray_IsScalar(obj, Bool)(((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject * )__numpy_array_api[8])) || PyType_IsSubtype((((PyObject*)(obj ))->ob_type), (&(*(PyTypeObject *)__numpy_array_api[8] ))))) || | |||
| 291 | PyArray_IsScalar(obj, Floating)(((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject * )__numpy_array_api[16])) || PyType_IsSubtype((((PyObject*)(obj ))->ob_type), (&(*(PyTypeObject *)__numpy_array_api[16 ]))))) || PyArray_IsScalar(obj, ComplexFloating)(((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject * )__numpy_array_api[17])) || PyType_IsSubtype((((PyObject*)(obj ))->ob_type), (&(*(PyTypeObject *)__numpy_array_api[17 ])))))); | |||
| 292 | } | |||
| 293 | ||||
| 294 | at::Tensor tensor_from_cuda_array_interface(PyObject* obj) { | |||
| 295 | if (!is_numpy_available()) { | |||
| ||||
| 296 | throw std::runtime_error("Numpy is not available"); | |||
| 297 | } | |||
| 298 | auto cuda_dict = THPObjectPtr(PyObject_GetAttrString(obj, "__cuda_array_interface__")); | |||
| 299 | TORCH_INTERNAL_ASSERT(cuda_dict)if ((__builtin_expect(static_cast<bool>(!(cuda_dict)), 0 ))) { ::c10::detail::torchInternalAssertFail( __func__, "../torch/csrc/utils/tensor_numpy.cpp" , static_cast<uint32_t>(299), "cuda_dict" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/utils/tensor_numpy.cpp\"" ":" "299" ", please report a bug to PyTorch. " , c10::str()); }; | |||
| 300 | ||||
| 301 | if (!PyDict_Check(cuda_dict)((((((PyObject*)(cuda_dict))->ob_type))->tp_flags & ((1UL << 29))) != 0)) { | |||
| 302 | throw TypeError("`__cuda_array_interface__` must be a dict"); | |||
| 303 | } | |||
| 304 | ||||
| 305 | // Extract the `obj.__cuda_array_interface__['shape']` attribute | |||
| 306 | std::vector<int64_t> sizes; | |||
| 307 | { | |||
| 308 | PyObject *py_shape = PyDict_GetItemString(cuda_dict, "shape"); | |||
| 309 | if (py_shape == nullptr) { | |||
| 310 | throw TypeError("attribute `shape` must exist"); | |||
| 311 | } | |||
| 312 | sizes = seq_to_aten_shape(py_shape); | |||
| 313 | } | |||
| 314 | ||||
| 315 | // Extract the `obj.__cuda_array_interface__['typestr']` attribute | |||
| 316 | ScalarType dtype; | |||
| 317 | // NOLINTNEXTLINE(cppcoreguidelines-init-variables) | |||
| 318 | int dtype_size_in_bytes; | |||
| 319 | { | |||
| 320 | PyObject *py_typestr = PyDict_GetItemString(cuda_dict, "typestr"); | |||
| 321 | if (py_typestr == nullptr) { | |||
| 322 | throw TypeError("attribute `typestr` must exist"); | |||
| 323 | } | |||
| 324 | // NOLINTNEXTLINE(cppcoreguidelines-init-variables) | |||
| 325 | PyArray_Descr *descr; | |||
| 326 | if(!PyArray_DescrConverter(*(int (*)(PyObject *, PyArray_Descr **)) __numpy_array_api[174 ])(py_typestr, &descr)) { | |||
| 327 | throw ValueError("cannot parse `typestr`"); | |||
| 328 | } | |||
| 329 | dtype = numpy_dtype_to_aten(descr->type_num); | |||
| 330 | dtype_size_in_bytes = descr->elsize; | |||
| 331 | TORCH_INTERNAL_ASSERT(dtype_size_in_bytes > 0)if ((__builtin_expect(static_cast<bool>(!(dtype_size_in_bytes > 0)), 0))) { ::c10::detail::torchInternalAssertFail( __func__ , "../torch/csrc/utils/tensor_numpy.cpp", static_cast<uint32_t >(331), "dtype_size_in_bytes > 0" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/utils/tensor_numpy.cpp\"" ":" "331" ", please report a bug to PyTorch. " , c10::str()); }; | |||
| 332 | } | |||
| 333 | ||||
| 334 | // Extract the `obj.__cuda_array_interface__['data']` attribute | |||
| 335 | // NOLINTNEXTLINE(cppcoreguidelines-init-variables) | |||
| 336 | void *data_ptr; | |||
| 337 | { | |||
| 338 | PyObject *py_data = PyDict_GetItemString(cuda_dict, "data"); | |||
| 339 | if (py_data == nullptr) { | |||
| 340 | throw TypeError("attribute `shape` data exist"); | |||
| 341 | } | |||
| 342 | if(!PyTuple_Check(py_data)((((((PyObject*)(py_data))->ob_type))->tp_flags & ( (1UL << 26))) != 0) || PyTuple_GET_SIZE(py_data)(((PyVarObject*)(((PyTupleObject *)(py_data))))->ob_size) != 2) { | |||
| 343 | throw TypeError("`data` must be a 2-tuple of (int, bool)"); | |||
| 344 | } | |||
| 345 | data_ptr = PyLong_AsVoidPtr(PyTuple_GET_ITEM(py_data, 0)(((PyTupleObject *)(py_data))->ob_item[0])); | |||
| 346 | if (data_ptr == nullptr && PyErr_Occurred()) { | |||
| 347 | throw python_error(); | |||
| 348 | } | |||
| 349 | int read_only = PyObject_IsTrue(PyTuple_GET_ITEM(py_data, 1)(((PyTupleObject *)(py_data))->ob_item[1])); | |||
| 350 | if (read_only == -1) { | |||
| 351 | throw python_error(); | |||
| 352 | } | |||
| 353 | if (read_only) { | |||
| 354 | throw TypeError("the read only flag is not supported, should always be False"); | |||
| 355 | } | |||
| 356 | } | |||
| 357 | ||||
| 358 | // Extract the `obj.__cuda_array_interface__['strides']` attribute | |||
| 359 | std::vector<int64_t> strides; | |||
| 360 | { | |||
| 361 | PyObject *py_strides = PyDict_GetItemString(cuda_dict, "strides"); | |||
| 362 | if (py_strides != nullptr && py_strides != Py_None(&_Py_NoneStruct)) { | |||
| 363 | if (PySequence_LengthPySequence_Size(py_strides) == -1 || PySequence_LengthPySequence_Size(py_strides) != sizes.size()) { | |||
| 364 | throw TypeError("strides must be a sequence of the same length as shape"); | |||
| 365 | } | |||
| 366 | strides = seq_to_aten_shape(py_strides); | |||
| 367 | ||||
| 368 | // __cuda_array_interface__ strides use bytes. Torch strides use element counts. | |||
| 369 | for (auto& stride : strides) { | |||
| 370 | if (stride%dtype_size_in_bytes != 0) { | |||
| 371 | throw ValueError( | |||
| 372 | "given array strides not a multiple of the element byte size. " | |||
| 373 | "Make a copy of the array to reallocate the memory."); | |||
| 374 | } | |||
| 375 | stride /= dtype_size_in_bytes; | |||
| 376 | } | |||
| 377 | } else { | |||
| 378 | strides = at::detail::defaultStrides(sizes); | |||
| 379 | } | |||
| 380 | } | |||
| 381 | ||||
| 382 | Py_INCREF(obj)_Py_INCREF(((PyObject*)(obj))); | |||
| 383 | return at::from_blob( | |||
| 384 | data_ptr, | |||
| 385 | sizes, | |||
| 386 | strides, | |||
| 387 | [obj](void* data) { | |||
| 388 | pybind11::gil_scoped_acquire gil; | |||
| 389 | Py_DECREF(obj)_Py_DECREF(((PyObject*)(obj))); | |||
| 390 | }, | |||
| 391 | at::device(kCUDA).dtype(dtype) | |||
| 392 | ); | |||
| 393 | } | |||
| 394 | }} // namespace torch::utils | |||
| 395 | ||||
| 396 | #endif // USE_NUMPY |
| 1 | #ifndef PySequence_GetItem |
| 2 | struct _object; |
| 3 | typedef struct _object PyObject; |
| 4 | PyObject* clang_analyzer_PyObject_New_Reference(); |
| 5 | PyObject* PySequence_GetItem(PyObject *o, Py_ssize_t i) { |
| 6 | return clang_analyzer_PyObject_New_Reference(); |
| 7 | } |
| 8 | #else |
| 9 | #warning "API PySequence_GetItem is defined as a macro." |
| 10 | #endif |