Bug Summary

File:build/../torch/csrc/utils/tensor_numpy.cpp
Warning:line 99, column 30
PyObject ownership leak with reference count of 1

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name tensor_numpy.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-output=html -analyzer-checker=python -analyzer-disable-checker=deadcode -analyzer-config prune-paths=true,suppress-c++-stdlib=true,suppress-inlined-defensive-checks=false,suppress-null-return-paths=false,crosscheck-with-z3=true,model-path=/opt/pyrefcon/lib/pyrefcon/models/models -analyzer-config experimental-enable-naive-ctu-analysis=true,ctu-dir=/tmp/pyrefcon/pytorch/csa-scan,ctu-index-name=/tmp/pyrefcon/pytorch/csa-scan/externalDefMap.txt,ctu-invocation-list=/tmp/pyrefcon/pytorch/csa-scan/invocations.yaml,display-ctu-progress=false -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -relaxed-aliasing -fno-rounding-math -ffp-exception-behavior=ignore -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/tmp/pyrefcon/pytorch/build -resource-dir /opt/pyrefcon/lib/clang/13.0.0 -isystem third_party/gloo -isystem ../cmake/../third_party/gloo -isystem ../cmake/../third_party/googletest/googlemock/include -isystem ../cmake/../third_party/googletest/googletest/include -isystem ../third_party/protobuf/src -isystem ../third_party/gemmlowp -isystem ../third_party/neon2sse -isystem ../third_party/XNNPACK/include -isystem ../third_party -isystem ../cmake/../third_party/eigen -isystem /opt/pyrefcon/lib/pyrefcon/models/python3.8 -isystem /usr/lib/python3/dist-packages/numpy/core/include -isystem ../cmake/../third_party/pybind11/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem ../third_party/ideep/mkl-dnn/include -isystem ../third_party/ideep/include -D BUILDING_TESTS -D FMT_HEADER_ONLY=1 -D HAVE_MALLOC_USABLE_SIZE=1 -D HAVE_MMAP=1 -D HAVE_SHM_OPEN=1 -D HAVE_SHM_UNLINK=1 -D MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS -D ONNXIFI_ENABLE_EXT=1 -D ONNX_ML=1 -D ONNX_NAMESPACE=onnx_torch -D THP_BUILD_MAIN_LIB -D USE_C10D -D USE_C10D_GLOO -D USE_C10D_MPI -D USE_DISTRIBUTED -D USE_EXTERNAL_MZCRC -D USE_NUMPY -D USE_RPC -D USE_TENSORPIPE -D USE_VALGRIND -D _FILE_OFFSET_BITS=64 -D torch_python_EXPORTS -I aten/src -I ../aten/src -I . -I ../ -I ../cmake/../third_party/benchmark/include -I caffe2/contrib/aten -I ../third_party/onnx -I third_party/onnx -I ../third_party/foxi -I third_party/foxi -I ../torch/.. -I ../torch/../aten/src -I ../torch/../aten/src/TH -I caffe2/aten/src -I third_party -I ../torch/../third_party/valgrind-headers -I ../torch/../third_party/gloo -I ../torch/../third_party/onnx -I ../torch/csrc -I ../torch/csrc/api/include -I ../torch/lib -I ../torch/lib/libshm -I ../torch/csrc/distributed -I ../torch/csrc/api -I ../c10/.. -I third_party/ideep/mkl-dnn/include -I ../third_party/ideep/mkl-dnn/src/../include -I ../torch/lib/libshm/../../../torch/lib -I ../third_party/fmt/include -D USE_PTHREADPOOL -D NDEBUG -D USE_KINETO -D LIBKINETO_NOCUPTI -D USE_FBGEMM -D USE_QNNPACK -D USE_PYTORCH_QNNPACK -D USE_XNNPACK -D SYMBOLICATE_MOBILE_DEBUG_HANDLE -D HAVE_AVX_CPU_DEFINITION -D HAVE_AVX2_CPU_DEFINITION -D NDEBUG -D NDEBUG -D CAFFE2_USE_GLOO -D HAVE_GCC_GET_CPUID -D USE_AVX -D USE_AVX2 -D TH_HAVE_THREAD -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /opt/pyrefcon/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -Wno-unused-but-set-variable -Wno-maybe-uninitialized -Werror=format -Werror=cast-function-type -Wno-stringop-overflow -Wno-write-strings -Wno-strict-aliasing -w -std=gnu++14 -fdeprecated-macro -fdebug-compilation-dir=/tmp/pyrefcon/pytorch/build -ferror-limit 19 -fvisibility-inlines-hidden -fopenmp -fopenmp-cuda-parallel-target-regions -pthread -fgnuc-version=4.2.1 -fcxx-exceptions -fexceptions -faligned-allocation -fcolor-diagnostics -vectorize-loops -vectorize-slp -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/pyrefcon/pytorch/csa-scan/reports -x c++ ../torch/csrc/utils/tensor_numpy.cpp

../torch/csrc/utils/tensor_numpy.cpp

1#include <torch/csrc/THP.h>
2#include <torch/csrc/utils/tensor_numpy.h>
3#define WITH_NUMPY_IMPORT_ARRAY
4#include <torch/csrc/utils/numpy_stub.h>
5#include <c10/util/irange.h>
6
7#ifndef USE_NUMPY1
8namespace torch { namespace utils {
9PyObject* tensor_to_numpy(const at::Tensor& tensor) {
10 throw std::runtime_error("PyTorch was compiled without NumPy support");
11}
12at::Tensor tensor_from_numpy(PyObject* obj, bool warn_if_not_writeable/*=true*/) {
13 throw std::runtime_error("PyTorch was compiled without NumPy support");
14}
15
16bool is_numpy_available() {
17 throw std::runtime_error("PyTorch was compiled without NumPy support");
18}
19
20bool is_numpy_int(PyObject* obj) {
21 throw std::runtime_error("PyTorch was compiled without NumPy support");
22}
23bool is_numpy_scalar(PyObject* obj) {
24 throw std::runtime_error("PyTorch was compiled without NumPy support");
25}
26at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
27 throw std::runtime_error("PyTorch was compiled without NumPy support");
28}
29}}
30#else
31
32#include <torch/csrc/DynamicTypes.h>
33#include <torch/csrc/Exceptions.h>
34#include <torch/csrc/autograd/python_variable.h>
35#include <torch/csrc/utils/object_ptr.h>
36
37#include <ATen/ATen.h>
38#include <ATen/TensorUtils.h>
39#include <memory>
40#include <sstream>
41#include <stdexcept>
42
43using namespace at;
44using namespace torch::autograd;
45
46namespace torch { namespace utils {
47
48bool is_numpy_available() {
49 static bool available = []() {
50 if (_import_array() >= 0) {
51 return true;
52 }
53 // Try to get exception message, print warning and return false
54 std::string message = "Failed to initialize NumPy";
55 // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
56 PyObject *type, *value, *traceback;
57 PyErr_Fetch(&type, &value, &traceback);
58 if (auto str = value ? PyObject_Str(value) : nullptr) {
59 if (auto enc_str = PyUnicode_AsEncodedString(str, "utf-8", "strict")) {
60 if (auto byte_str = PyBytes_AS_STRING(enc_str)((((PyBytesObject *)(enc_str))->ob_sval))) {
61 message += ": " + std::string(byte_str);
62 }
63 Py_XDECREF(enc_str)_Py_XDECREF(((PyObject*)(enc_str)));
64 }
65 Py_XDECREF(str)_Py_XDECREF(((PyObject*)(str)));
66 }
67 PyErr_Clear();
68 TORCH_WARN(message)::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp"
, static_cast<uint32_t>(68)}, ::c10::str(message), false
)
;
69 return false;
70 }();
71 return available;
72}
73static std::vector<npy_intp> to_numpy_shape(IntArrayRef x) {
74 // shape and stride conversion from int64_t to npy_intp
75 auto nelem = x.size();
76 auto result = std::vector<npy_intp>(nelem);
77 for(const auto i : c10::irange(nelem)) {
78 result[i] = static_cast<npy_intp>(x[i]);
79 }
80 return result;
81}
82
83static std::vector<int64_t> to_aten_shape(int ndim, npy_intp* values) {
84 // shape and stride conversion from npy_intp to int64_t
85 auto result = std::vector<int64_t>(ndim);
86 for(const auto i : c10::irange(ndim)) {
87 result[i] = static_cast<int64_t>(values[i]);
88 }
89 return result;
90}
91
92static std::vector<int64_t> seq_to_aten_shape(PyObject *py_seq) {
93 int ndim = PySequence_LengthPySequence_Size(py_seq);
94 if (ndim == -1) {
9
Assuming the condition is false
10
Taking false branch
95 throw TypeError("shape and strides must be sequences");
96 }
97 auto result = std::vector<int64_t>(ndim);
98 for(const auto i : c10::irange(ndim)) {
99 auto item = THPObjectPtr(PySequence_GetItem(py_seq, i));
11
Calling 'PySequence_GetItem'
13
Returning from 'PySequence_GetItem'
19
PyObject ownership leak with reference count of 1
100 if (!item) throw python_error();
14
Assuming the condition is false
15
Taking false branch
101
102 result[i] = PyLong_AsLongLong(item);
103 if (result[i] == -1 && PyErr_Occurred()) throw python_error();
16
Assuming the condition is true
17
Assuming the condition is true
18
Taking true branch
104 }
105 return result;
106}
107
108PyObject* tensor_to_numpy(const at::Tensor& tensor) {
109 if (!is_numpy_available()) {
110 throw std::runtime_error("Numpy is not available");
111 }
112 if (tensor.device().type() != DeviceType::CPU) {
113 throw TypeError(
114 "can't convert %s device type tensor to numpy. Use Tensor.cpu() to "
115 "copy the tensor to host memory first.", tensor.device().str().c_str());
116 }
117 if (tensor.layout() != Layout::Strided) {
118 throw TypeError(
119 "can't convert %s layout tensor to numpy."
120 "convert the tensor to a strided layout first.", c10::str(tensor.layout()).c_str());
121 }
122 if (at::GradMode::is_enabled() && tensor.requires_grad()) {
123 throw std::runtime_error(
124 "Can't call numpy() on Tensor that requires grad. "
125 "Use tensor.detach().numpy() instead.");
126 }
127 auto dtype = aten_to_numpy_dtype(tensor.scalar_type());
128 auto sizes = to_numpy_shape(tensor.sizes());
129 auto strides = to_numpy_shape(tensor.strides());
130 // NumPy strides use bytes. Torch strides use element counts.
131 auto element_size_in_bytes = tensor.element_size();
132 for (auto& stride : strides) {
133 stride *= element_size_in_bytes;
134 }
135
136 auto array = THPObjectPtr(PyArray_New(*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int,
npy_intp const *, void *, int, int, PyObject *)) __numpy_array_api
[93])
(
137 &PyArray_Type(*(PyTypeObject *)__numpy_array_api[2]),
138 tensor.dim(),
139 sizes.data(),
140 dtype,
141 strides.data(),
142 tensor.data_ptr(),
143 0,
144 NPY_ARRAY_ALIGNED0x0100 | NPY_ARRAY_WRITEABLE0x0400,
145 nullptr));
146 if (!array) return nullptr;
147
148 // TODO: This attempts to keep the underlying memory alive by setting the base
149 // object of the ndarray to the tensor and disabling resizes on the storage.
150 // This is not sufficient. For example, the tensor's storage may be changed
151 // via Tensor.set_, which can free the underlying memory.
152 PyObject* py_tensor = THPVariable_Wrap(tensor);
153 if (!py_tensor) throw python_error();
154 if (PyArray_SetBaseObject(*(int (*)(PyArrayObject *, PyObject *)) __numpy_array_api[282
])
((PyArrayObject*)array.get(), py_tensor) == -1) {
155 return nullptr;
156 }
157 // Use the private storage API
158 tensor.storage().unsafeGetStorageImpl()->set_resizable(false);
159
160 return array.release();
161}
162
163at::Tensor tensor_from_numpy(PyObject* obj, bool warn_if_not_writeable/*=true*/) {
164 if (!is_numpy_available()) {
165 throw std::runtime_error("Numpy is not available");
166 }
167 if (!PyArray_Check(obj)((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject *)
__numpy_array_api[2])) || PyType_IsSubtype((((PyObject*)(obj)
)->ob_type), (&(*(PyTypeObject *)__numpy_array_api[2])
)))
) {
168 throw TypeError("expected np.ndarray (got %s)", Py_TYPE(obj)(((PyObject*)(obj))->ob_type)->tp_name);
169 }
170 auto array = (PyArrayObject*)obj;
171
172 // warn_if_not_writable is true when a copy of numpy variable is created.
173 // the warning is suppressed when a copy is being created.
174 if (!PyArray_ISWRITEABLE(array)PyArray_CHKFLAGS((array), 0x0400) && warn_if_not_writeable) {
175 TORCH_WARN_ONCE(if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn(
{__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast
<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); } else {
__attribute__((__unused__)) static const auto torch_warn_once_3
= [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp"
, static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); return true
; }(); }
176 "The given NumPy array is not writeable, and PyTorch does "if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn(
{__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast
<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); } else {
__attribute__((__unused__)) static const auto torch_warn_once_3
= [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp"
, static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); return true
; }(); }
177 "not support non-writeable tensors. This means you can write to the "if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn(
{__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast
<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); } else {
__attribute__((__unused__)) static const auto torch_warn_once_3
= [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp"
, static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); return true
; }(); }
178 "underlying (supposedly non-writeable) NumPy array using the tensor. "if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn(
{__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast
<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); } else {
__attribute__((__unused__)) static const auto torch_warn_once_3
= [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp"
, static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); return true
; }(); }
179 "You may want to copy the array to protect its data or make it writeable "if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn(
{__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast
<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); } else {
__attribute__((__unused__)) static const auto torch_warn_once_3
= [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp"
, static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); return true
; }(); }
180 "before converting it to a tensor. This type of warning will be "if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn(
{__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast
<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); } else {
__attribute__((__unused__)) static const auto torch_warn_once_3
= [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp"
, static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); return true
; }(); }
181 "suppressed for the rest of this program.")if (::c10::Warning::get_warnAlways()) { ::c10::Warning::warn(
{__func__, "../torch/csrc/utils/tensor_numpy.cpp", static_cast
<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); } else {
__attribute__((__unused__)) static const auto torch_warn_once_3
= [&] { ::c10::Warning::warn( {__func__, "../torch/csrc/utils/tensor_numpy.cpp"
, static_cast<uint32_t>(181)}, ::c10::str("The given NumPy array is not writeable, and PyTorch does "
"not support non-writeable tensors. This means you can write to the "
"underlying (supposedly non-writeable) NumPy array using the tensor. "
"You may want to copy the array to protect its data or make it writeable "
"before converting it to a tensor. This type of warning will be "
"suppressed for the rest of this program."), false); return true
; }(); }
;
182
183 }
184
185 int ndim = PyArray_NDIM(array);
186 auto sizes = to_aten_shape(ndim, PyArray_DIMS(array));
187 auto strides = to_aten_shape(ndim, PyArray_STRIDES(array));
188 // NumPy strides use bytes. Torch strides use element counts.
189 auto element_size_in_bytes = PyArray_ITEMSIZE(array);
190 for (auto& stride : strides) {
191 if (stride%element_size_in_bytes != 0) {
192 throw ValueError(
193 "given numpy array strides not a multiple of the element byte size. "
194 "Copy the numpy array to reallocate the memory.");
195 }
196 stride /= element_size_in_bytes;
197 }
198
199 size_t storage_size = 1;
200 for(const auto i : c10::irange(ndim)) {
201 if (strides[i] < 0) {
202 throw ValueError(
203 "At least one stride in the given numpy array is negative, "
204 "and tensors with negative strides are not currently supported. "
205 "(You can probably work around this by making a copy of your array "
206 " with array.copy().) ");
207 }
208 // XXX: this won't work for negative strides
209 storage_size += (sizes[i] - 1) * strides[i];
210 }
211
212 void* data_ptr = PyArray_DATA(array);
213 if (!PyArray_EquivByteorders(PyArray_DESCR(array)->byteorder, NPY_NATIVE)(((PyArray_DESCR(array)->byteorder) == ('=')) || (((PyArray_DESCR
(array)->byteorder) != '>') == (('=') != '>')))
) {
214 throw ValueError(
215 "given numpy array has byte order different from the native byte order. "
216 "Conversion between byte orders is currently not supported.");
217 }
218 Py_INCREF(obj)_Py_INCREF(((PyObject*)(obj)));
219 return at::from_blob(
220 data_ptr,
221 sizes,
222 strides,
223 [obj](void* data) {
224 pybind11::gil_scoped_acquire gil;
225 Py_DECREF(obj)_Py_DECREF(((PyObject*)(obj)));
226 },
227 at::device(kCPU).dtype(numpy_dtype_to_aten(PyArray_TYPE(array)))
228 );
229}
230
231int aten_to_numpy_dtype(const ScalarType scalar_type) {
232 switch (scalar_type) {
233 case kDouble: return NPY_DOUBLE;
234 case kFloat: return NPY_FLOAT;
235 case kHalf: return NPY_HALF;
236 case kComplexDouble: return NPY_COMPLEX128NPY_CDOUBLE;
237 case kComplexFloat: return NPY_COMPLEX64NPY_CFLOAT;
238 case kLong: return NPY_INT64NPY_LONG;
239 case kInt: return NPY_INT32NPY_INT;
240 case kShort: return NPY_INT16NPY_SHORT;
241 case kChar: return NPY_INT8NPY_BYTE;
242 case kByte: return NPY_UINT8NPY_UBYTE;
243 case kBool: return NPY_BOOL;
244 default:
245 throw TypeError("Got unsupported ScalarType %s", toString(scalar_type));
246 }
247}
248
249ScalarType numpy_dtype_to_aten(int dtype) {
250 switch (dtype) {
251 case NPY_DOUBLE: return kDouble;
252 case NPY_FLOAT: return kFloat;
253 case NPY_HALF: return kHalf;
254 case NPY_COMPLEX64NPY_CFLOAT: return kComplexFloat;
255 case NPY_COMPLEX128NPY_CDOUBLE: return kComplexDouble;
256 case NPY_INT16NPY_SHORT: return kShort;
257 case NPY_INT8NPY_BYTE: return kChar;
258 case NPY_UINT8NPY_UBYTE: return kByte;
259 case NPY_BOOL: return kBool;
260 default:
261 // Workaround: MSVC does not support two switch cases that have the same value
262 if (dtype == NPY_INT || dtype == NPY_INT32NPY_INT) {
263 // To cover all cases we must use NPY_INT because
264 // NPY_INT32 is an alias which maybe equal to:
265 // - NPY_INT, when sizeof(int) = 4 and sizeof(long) = 8
266 // - NPY_LONG, when sizeof(int) = 4 and sizeof(long) = 4
267 return kInt;
268 } else if (dtype == NPY_LONGLONG || dtype == NPY_INT64NPY_LONG) {
269 // NPY_INT64 is an alias which maybe equal to:
270 // - NPY_LONG, when sizeof(long) = 8 and sizeof(long long) = 8
271 // - NPY_LONGLONG, when sizeof(long) = 4 and sizeof(long long) = 8
272 return kLong;
273 } else {
274 break; // break as if this is one of the cases above because this is only a workaround
275 }
276 }
277 auto pytype = THPObjectPtr(PyArray_TypeObjectFromType(*(PyObject * (*)(int)) __numpy_array_api[46])(dtype));
278 if (!pytype) throw python_error();
279 throw TypeError(
280 "can't convert np.ndarray of type %s. The only supported types are: "
281 "float64, float32, float16, complex64, complex128, int64, int32, int16, int8, uint8, and bool.",
282 ((PyTypeObject*)pytype.get())->tp_name);
283}
284
285bool is_numpy_int(PyObject* obj) {
286 return is_numpy_available() && PyArray_IsScalar((obj), Integer)(((((PyObject*)((obj)))->ob_type) == (&(*(PyTypeObject
*)__numpy_array_api[12])) || PyType_IsSubtype((((PyObject*)(
(obj)))->ob_type), (&(*(PyTypeObject *)__numpy_array_api
[12])))))
;
287}
288
289bool is_numpy_scalar(PyObject* obj) {
290 return is_numpy_available() && (is_numpy_int(obj) || PyArray_IsScalar(obj, Bool)(((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject *
)__numpy_array_api[8])) || PyType_IsSubtype((((PyObject*)(obj
))->ob_type), (&(*(PyTypeObject *)__numpy_array_api[8]
)))))
||
291 PyArray_IsScalar(obj, Floating)(((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject *
)__numpy_array_api[16])) || PyType_IsSubtype((((PyObject*)(obj
))->ob_type), (&(*(PyTypeObject *)__numpy_array_api[16
])))))
|| PyArray_IsScalar(obj, ComplexFloating)(((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject *
)__numpy_array_api[17])) || PyType_IsSubtype((((PyObject*)(obj
))->ob_type), (&(*(PyTypeObject *)__numpy_array_api[17
])))))
);
292}
293
294at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
295 if (!is_numpy_available()) {
1
Taking false branch
296 throw std::runtime_error("Numpy is not available");
297 }
298 auto cuda_dict = THPObjectPtr(PyObject_GetAttrString(obj, "__cuda_array_interface__"));
299 TORCH_INTERNAL_ASSERT(cuda_dict)if ((__builtin_expect(static_cast<bool>(!(cuda_dict)), 0
))) { ::c10::detail::torchInternalAssertFail( __func__, "../torch/csrc/utils/tensor_numpy.cpp"
, static_cast<uint32_t>(299), "cuda_dict" "INTERNAL ASSERT FAILED at "
"\"../torch/csrc/utils/tensor_numpy.cpp\"" ":" "299" ", please report a bug to PyTorch. "
, c10::str()); }
;
2
Assuming the condition is false
3
Taking false branch
300
301 if (!PyDict_Check(cuda_dict)((((((PyObject*)(cuda_dict))->ob_type))->tp_flags &
((1UL << 29))) != 0)
) {
4
Assuming the condition is true
5
Taking false branch
302 throw TypeError("`__cuda_array_interface__` must be a dict");
303 }
304
305 // Extract the `obj.__cuda_array_interface__['shape']` attribute
306 std::vector<int64_t> sizes;
307 {
308 PyObject *py_shape = PyDict_GetItemString(cuda_dict, "shape");
309 if (py_shape == nullptr) {
6
Assuming the condition is false
7
Taking false branch
310 throw TypeError("attribute `shape` must exist");
311 }
312 sizes = seq_to_aten_shape(py_shape);
8
Calling 'seq_to_aten_shape'
313 }
314
315 // Extract the `obj.__cuda_array_interface__['typestr']` attribute
316 ScalarType dtype;
317 // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
318 int dtype_size_in_bytes;
319 {
320 PyObject *py_typestr = PyDict_GetItemString(cuda_dict, "typestr");
321 if (py_typestr == nullptr) {
322 throw TypeError("attribute `typestr` must exist");
323 }
324 // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
325 PyArray_Descr *descr;
326 if(!PyArray_DescrConverter(*(int (*)(PyObject *, PyArray_Descr **)) __numpy_array_api[174
])
(py_typestr, &descr)) {
327 throw ValueError("cannot parse `typestr`");
328 }
329 dtype = numpy_dtype_to_aten(descr->type_num);
330 dtype_size_in_bytes = descr->elsize;
331 TORCH_INTERNAL_ASSERT(dtype_size_in_bytes > 0)if ((__builtin_expect(static_cast<bool>(!(dtype_size_in_bytes
> 0)), 0))) { ::c10::detail::torchInternalAssertFail( __func__
, "../torch/csrc/utils/tensor_numpy.cpp", static_cast<uint32_t
>(331), "dtype_size_in_bytes > 0" "INTERNAL ASSERT FAILED at "
"\"../torch/csrc/utils/tensor_numpy.cpp\"" ":" "331" ", please report a bug to PyTorch. "
, c10::str()); }
;
332 }
333
334 // Extract the `obj.__cuda_array_interface__['data']` attribute
335 // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
336 void *data_ptr;
337 {
338 PyObject *py_data = PyDict_GetItemString(cuda_dict, "data");
339 if (py_data == nullptr) {
340 throw TypeError("attribute `shape` data exist");
341 }
342 if(!PyTuple_Check(py_data)((((((PyObject*)(py_data))->ob_type))->tp_flags & (
(1UL << 26))) != 0)
|| PyTuple_GET_SIZE(py_data)(((PyVarObject*)(((PyTupleObject *)(py_data))))->ob_size) != 2) {
343 throw TypeError("`data` must be a 2-tuple of (int, bool)");
344 }
345 data_ptr = PyLong_AsVoidPtr(PyTuple_GET_ITEM(py_data, 0)(((PyTupleObject *)(py_data))->ob_item[0]));
346 if (data_ptr == nullptr && PyErr_Occurred()) {
347 throw python_error();
348 }
349 int read_only = PyObject_IsTrue(PyTuple_GET_ITEM(py_data, 1)(((PyTupleObject *)(py_data))->ob_item[1]));
350 if (read_only == -1) {
351 throw python_error();
352 }
353 if (read_only) {
354 throw TypeError("the read only flag is not supported, should always be False");
355 }
356 }
357
358 // Extract the `obj.__cuda_array_interface__['strides']` attribute
359 std::vector<int64_t> strides;
360 {
361 PyObject *py_strides = PyDict_GetItemString(cuda_dict, "strides");
362 if (py_strides != nullptr && py_strides != Py_None(&_Py_NoneStruct)) {
363 if (PySequence_LengthPySequence_Size(py_strides) == -1 || PySequence_LengthPySequence_Size(py_strides) != sizes.size()) {
364 throw TypeError("strides must be a sequence of the same length as shape");
365 }
366 strides = seq_to_aten_shape(py_strides);
367
368 // __cuda_array_interface__ strides use bytes. Torch strides use element counts.
369 for (auto& stride : strides) {
370 if (stride%dtype_size_in_bytes != 0) {
371 throw ValueError(
372 "given array strides not a multiple of the element byte size. "
373 "Make a copy of the array to reallocate the memory.");
374 }
375 stride /= dtype_size_in_bytes;
376 }
377 } else {
378 strides = at::detail::defaultStrides(sizes);
379 }
380 }
381
382 Py_INCREF(obj)_Py_INCREF(((PyObject*)(obj)));
383 return at::from_blob(
384 data_ptr,
385 sizes,
386 strides,
387 [obj](void* data) {
388 pybind11::gil_scoped_acquire gil;
389 Py_DECREF(obj)_Py_DECREF(((PyObject*)(obj)));
390 },
391 at::device(kCUDA).dtype(dtype)
392 );
393}
394}} // namespace torch::utils
395
396#endif // USE_NUMPY

/opt/pyrefcon/lib/pyrefcon/models/models/PySequence_GetItem.model

1#ifndef PySequence_GetItem
2struct _object;
3typedef struct _object PyObject;
4PyObject* clang_analyzer_PyObject_New_Reference();
5PyObject* PySequence_GetItem(PyObject *o, Py_ssize_t i) {
6 return clang_analyzer_PyObject_New_Reference();
12
Setting reference count to 1
7}
8#else
9#warning "API PySequence_GetItem is defined as a macro."
10#endif