File: | build/../torch/csrc/utils/tensor_new.cpp |
Warning: | line 185, column 27 PyObject ownership leak with reference count of 1 |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | #include <torch/csrc/python_headers.h> | ||||
2 | #include <torch/csrc/utils/tensor_new.h> | ||||
3 | |||||
4 | #include <pybind11/pybind11.h> | ||||
5 | #include <torch/csrc/DynamicTypes.h> | ||||
6 | #include <torch/csrc/Exceptions.h> | ||||
7 | #include <torch/csrc/Size.h> | ||||
8 | #include <torch/csrc/autograd/variable.h> | ||||
9 | #include <torch/csrc/utils/cuda_lazy_init.h> | ||||
10 | #include <torch/csrc/utils/numpy_stub.h> | ||||
11 | #include <torch/csrc/utils/python_arg_parser.h> | ||||
12 | #include <torch/csrc/utils/python_numbers.h> | ||||
13 | #include <torch/csrc/utils/python_scalars.h> | ||||
14 | #include <torch/csrc/utils/python_strings.h> | ||||
15 | #include <torch/csrc/utils/tensor_numpy.h> | ||||
16 | #include <torch/csrc/autograd/generated/variable_factories.h> | ||||
17 | |||||
18 | #include <ATen/ATen.h> | ||||
19 | #include <ATen/InitialTensorOptions.h> | ||||
20 | #include <ATen/NamedTensorUtils.h> | ||||
21 | #include <ATen/TracerMode.h> | ||||
22 | #include <c10/core/Backend.h> | ||||
23 | #include <c10/core/Layout.h> | ||||
24 | #include <c10/util/Exception.h> | ||||
25 | #include <c10/util/irange.h> | ||||
26 | #include <c10/util/Optional.h> | ||||
27 | |||||
28 | #include <stdexcept> | ||||
29 | #include <vector> | ||||
30 | |||||
31 | using at::Backend; | ||||
32 | using at::Device; | ||||
33 | using at::IntArrayRef; | ||||
34 | using at::kCPU; | ||||
35 | using at::kCUDA; | ||||
36 | using at::kLong; | ||||
37 | using at::kInt; | ||||
38 | using at::Scalar; | ||||
39 | using at::ScalarType; | ||||
40 | using at::Storage; | ||||
41 | using at::Tensor; | ||||
42 | using at::TensorOptions; | ||||
43 | using at::Type; | ||||
44 | using c10::optional; | ||||
45 | |||||
46 | namespace torch { namespace utils { | ||||
47 | namespace { | ||||
48 | const int MAX_DIMS = 128; | ||||
49 | |||||
50 | TensorOptions build_options(c10::TensorOptions options, at::ScalarType scalar_type, const c10::optional<Device>& device=c10::nullopt) { | ||||
51 | options = options.dtype(scalar_type); | ||||
52 | if (device.has_value()) { | ||||
53 | return options.device(device); | ||||
54 | } | ||||
55 | return options; | ||||
56 | } | ||||
57 | |||||
58 | void maybe_initialize_cuda(const Device device) { | ||||
59 | if (device.is_cuda()) { | ||||
60 | torch::utils::cuda_lazy_init(); | ||||
61 | } | ||||
62 | } | ||||
63 | |||||
64 | // NB: It appears there is some consistency invariant between options and device, where | ||||
65 | // if device is non-empty, its type must be consistent with the device type in | ||||
66 | // options. | ||||
67 | // TODO: Refactor this so we just pass everything in via options | ||||
68 | |||||
69 | Tensor dispatch_ones(c10::TensorOptions options, at::ScalarType scalar_type, const optional<Device>& device, IntArrayRef sizes) { | ||||
70 | maybe_initialize_cuda(options.device()); | ||||
71 | pybind11::gil_scoped_release no_gil; | ||||
72 | return torch::ones(sizes, build_options(options, scalar_type, device)); | ||||
73 | } | ||||
74 | |||||
75 | Tensor new_with_sizes(c10::TensorOptions options, at::ScalarType scalar_type, const optional<Device>& device, IntArrayRef sizes) { | ||||
76 | maybe_initialize_cuda(options.device()); | ||||
77 | pybind11::gil_scoped_release no_gil; | ||||
78 | return torch::empty(sizes, build_options(options, scalar_type, device)); | ||||
79 | } | ||||
80 | |||||
81 | Tensor new_with_storage(c10::TensorOptions options, at::ScalarType scalar_type, Storage storage) { | ||||
82 | auto tensor = at::empty({}, build_options(options, scalar_type)); | ||||
83 | tensor.set_(std::move(storage)); | ||||
84 | return tensor; | ||||
85 | } | ||||
86 | |||||
87 | Tensor new_with_tensor(c10::TensorOptions options, at::ScalarType scalar_type, const Tensor& other) { | ||||
88 | options = options.dtype(scalar_type); | ||||
89 | TORCH_CHECK_TYPE(other.options().type_equal(options), "expected ",if ((__builtin_expect(static_cast<bool>(!(other.options ().type_equal(options))), 0))) { throw ::c10::TypeError( {__func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(90)}, (::c10::detail::torchCheckMsgImpl( "Expected " "other.options().type_equal(options)" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "expected " , options, " (got ", other.options(), ")"))); } | ||||
90 | options, " (got ", other.options(), ")")if ((__builtin_expect(static_cast<bool>(!(other.options ().type_equal(options))), 0))) { throw ::c10::TypeError( {__func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(90)}, (::c10::detail::torchCheckMsgImpl( "Expected " "other.options().type_equal(options)" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "expected " , options, " (got ", other.options(), ")"))); }; | ||||
91 | return other.alias(); | ||||
92 | } | ||||
93 | |||||
94 | std::vector<int64_t> compute_sizes(PyObject* seq) { | ||||
95 | std::vector<int64_t> sizes; | ||||
96 | THPObjectPtr handle; | ||||
97 | while (PySequence_Check(seq)) { | ||||
98 | auto length = PySequence_LengthPySequence_Size(seq); | ||||
99 | if (length < 0) throw python_error(); | ||||
100 | sizes.push_back(length); | ||||
101 | if (sizes.size() > MAX_DIMS) { | ||||
102 | throw ValueError("too many dimensions '%s'", Py_TYPE(seq)(((PyObject*)(seq))->ob_type)->tp_name); | ||||
103 | } | ||||
104 | if (length == 0) break; | ||||
105 | handle = THPObjectPtr(PySequence_GetItem(seq, 0)); | ||||
106 | if (!handle) { | ||||
107 | throw ValueError("could not determine the shape of object type '%s'", Py_TYPE(seq)(((PyObject*)(seq))->ob_type)->tp_name); | ||||
108 | } | ||||
109 | seq = handle.get(); | ||||
110 | } | ||||
111 | |||||
112 | return sizes; | ||||
113 | } | ||||
114 | |||||
115 | ScalarType infer_scalar_type(PyObject *obj) { | ||||
116 | #ifdef USE_NUMPY1 | ||||
117 | if (is_numpy_available()) { | ||||
118 | if (PyArray_Check(obj)((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject *) __numpy_array_api[2])) || PyType_IsSubtype((((PyObject*)(obj) )->ob_type), (&(*(PyTypeObject *)__numpy_array_api[2]) )))) { | ||||
119 | return numpy_dtype_to_aten(PyArray_TYPE((PyArrayObject*)obj)); | ||||
120 | } | ||||
121 | if (PyArray_CheckScalar(obj)((((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject * )__numpy_array_api[10])) || PyType_IsSubtype((((PyObject*)(obj ))->ob_type), (&(*(PyTypeObject *)__numpy_array_api[10 ]))))) || (((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject *)__numpy_array_api[2])) || PyType_IsSubtype((((PyObject*)(obj ))->ob_type), (&(*(PyTypeObject *)__numpy_array_api[2] )))) && (PyArray_NDIM((PyArrayObject *)obj) == 0)))) { | ||||
122 | THPObjectPtr arr(PyArray_FromScalar(*(PyObject * (*)(PyObject *, PyArray_Descr *)) __numpy_array_api [61])(obj, nullptr)); | ||||
123 | return numpy_dtype_to_aten(PyArray_TYPE((PyArrayObject*) arr.get())); | ||||
124 | } | ||||
125 | } | ||||
126 | #endif | ||||
127 | if (PyFloat_Check(obj)((((PyObject*)(obj))->ob_type) == (&PyFloat_Type) || PyType_IsSubtype ((((PyObject*)(obj))->ob_type), (&PyFloat_Type)))) { | ||||
128 | // this is always guaranteed to be a floating-point type, and makes it more | ||||
129 | // convenient to write e.g. torch.tensor(0.) than torch.tensor(0., dtype=torch.Tensor.dtype). | ||||
130 | return torch::tensors::get_default_scalar_type(); | ||||
131 | } | ||||
132 | if (THPUtils_checkLong(obj)) { | ||||
133 | return ScalarType::Long; | ||||
134 | } | ||||
135 | if (PyBool_Check(obj)((((PyObject*)(obj))->ob_type) == &PyBool_Type)) { | ||||
136 | return ScalarType::Bool; | ||||
137 | } | ||||
138 | if (PyComplex_Check(obj)((((PyObject*)(obj))->ob_type) == (&PyComplex_Type) || PyType_IsSubtype((((PyObject*)(obj))->ob_type), (&PyComplex_Type )))) { | ||||
139 | switch (torch::tensors::get_default_scalar_type()) { | ||||
140 | case ScalarType::Float: return ScalarType::ComplexFloat; | ||||
141 | case ScalarType::Double: return ScalarType::ComplexDouble; | ||||
142 | default: TORCH_CHECK(false, "invalid default scalar type for complex")if ((__builtin_expect(static_cast<bool>(!(false)), 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(142), (::c10::detail::torchCheckMsgImpl ( "Expected " "false" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "invalid default scalar type for complex" ))); }; | ||||
143 | } | ||||
144 | } | ||||
145 | if (THPVariable_Check(obj)) { | ||||
146 | const auto& var = THPVariable_Unpack(obj); | ||||
147 | return var.scalar_type(); | ||||
148 | } | ||||
149 | if (THPUtils_checkString(obj)) { | ||||
150 | throw TypeError("new(): invalid data type '%s'", Py_TYPE(obj)(((PyObject*)(obj))->ob_type)->tp_name); | ||||
151 | } | ||||
152 | if (PySequence_Check(obj)) { | ||||
153 | c10::optional<ScalarType> scalarType; | ||||
154 | auto length = PySequence_LengthPySequence_Size(obj); | ||||
155 | if (length < 0) throw python_error(); | ||||
156 | // match NumPy semantics, except use default tensor type instead of double. | ||||
157 | if (length == 0) return torch::tensors::get_default_scalar_type(); | ||||
158 | for (const auto i : c10::irange(length)) { | ||||
159 | THPObjectPtr handle(PySequence_GetItem(obj, i)); | ||||
160 | if (!handle) throw python_error(); | ||||
161 | auto cur_item = handle.get(); | ||||
162 | if (cur_item == obj) throw TypeError("new(): self-referential lists are incompatible"); | ||||
163 | ScalarType item_scalarType = infer_scalar_type(cur_item); | ||||
164 | scalarType = (scalarType) ? | ||||
165 | at::promoteTypes(*scalarType, item_scalarType) : item_scalarType; | ||||
166 | if (scalarType == ScalarType::ComplexDouble) { | ||||
167 | // this won't change (unless we hit undefined, but that will fail later). | ||||
168 | return *scalarType; | ||||
169 | } | ||||
170 | } | ||||
171 | return *scalarType; | ||||
172 | } | ||||
173 | AT_ERROR("Could not infer dtype of ", Py_TYPE(obj)->tp_name)do { ::c10::detail::deprecated_AT_ERROR(); if ((__builtin_expect (static_cast<bool>(!(false)), 0))) { ::c10::detail::torchCheckFail ( __func__, "../torch/csrc/utils/tensor_new.cpp", static_cast <uint32_t>(173), (::c10::detail::torchCheckMsgImpl( "Expected " "false" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", ::c10:: str("Could not infer dtype of ", (((PyObject*)(obj))->ob_type )->tp_name)))); }; } while (false); | ||||
174 | } | ||||
175 | |||||
176 | void recursive_store(char* data, IntArrayRef sizes, IntArrayRef strides, int64_t dim, | ||||
177 | ScalarType scalarType, int elementSize, PyObject* obj) { | ||||
178 | int64_t ndim = sizes.size(); | ||||
179 | if (dim == ndim) { | ||||
180 | torch::utils::store_scalar(data, scalarType, obj); | ||||
181 | return; | ||||
182 | } | ||||
183 | |||||
184 | auto n = sizes[dim]; | ||||
185 | auto seq = THPObjectPtr(PySequence_Fast(obj, "not a sequence")); | ||||
| |||||
186 | if (!seq) throw python_error(); | ||||
187 | // NOLINTNEXTLINE(bugprone-branch-clone) | ||||
188 | auto seq_size = PySequence_Fast_GET_SIZE(seq.get())(((((((PyObject*)(seq.get()))->ob_type))->tp_flags & ((1UL << 25))) != 0) ? ((((PyVarObject*)(seq.get()))-> ob_size)) : (((PyVarObject*)(((PyTupleObject *)(seq.get())))) ->ob_size)); | ||||
189 | if (seq_size != n) { | ||||
190 | throw ValueError("expected sequence of length %lld at dim %lld (got %lld)", | ||||
191 | (long long)n, (long long)dim, (long long)seq_size); | ||||
192 | } | ||||
193 | |||||
194 | PyObject** items = PySequence_Fast_ITEMS(seq.get())(((((((PyObject*)(seq.get()))->ob_type))->tp_flags & ((1UL << 25))) != 0) ? ((PyListObject *)(seq.get()))-> ob_item : ((PyTupleObject *)(seq.get()))->ob_item); | ||||
195 | for(const auto i : c10::irange(n)) { | ||||
196 | recursive_store(data, sizes, strides, dim + 1, scalarType, elementSize, items[i]); | ||||
197 | data += strides[dim] * elementSize; | ||||
198 | } | ||||
199 | } | ||||
200 | |||||
201 | Tensor internal_new_from_data( | ||||
202 | c10::TensorOptions options, | ||||
203 | at::ScalarType scalar_type, | ||||
204 | c10::optional<Device> device_opt, | ||||
205 | PyObject* data, | ||||
206 | bool copy_variables, | ||||
207 | bool copy_numpy, | ||||
208 | bool type_inference, | ||||
209 | bool pin_memory = false) { | ||||
210 | |||||
211 | if (THPUtils_checkString(data)) { | ||||
212 | throw TypeError("new(): invalid data type '%s'", Py_TYPE(data)(((PyObject*)(data))->ob_type)->tp_name); | ||||
213 | } | ||||
214 | |||||
215 | if (THPVariable_Check(data)) { | ||||
216 | TORCH_CHECK(!pin_memory, "Can't pin tensor constructed from a variable")if ((__builtin_expect(static_cast<bool>(!(!pin_memory)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(216), (::c10::detail::torchCheckMsgImpl ( "Expected " "!pin_memory" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Can't pin tensor constructed from a variable" ))); }; | ||||
217 | // TODO: use MaybeOwned | ||||
218 | auto var = THPVariable_Unpack(data); | ||||
219 | if (copy_variables) { | ||||
220 | var = var.detach(); | ||||
221 | } | ||||
222 | // infer the scalar type and device type; it's not expected to infer the layout since these constructors | ||||
223 | // are defined per-layout-type (e.g. tensor vs sparse_coo_tensor). | ||||
224 | const auto& inferred_scalar_type = type_inference ? var.scalar_type() : scalar_type; | ||||
225 | auto device = device_opt.has_value() ? *device_opt : var.device(); | ||||
226 | pybind11::gil_scoped_release no_gil; | ||||
227 | maybe_initialize_cuda(device); | ||||
228 | return var.to(device, inferred_scalar_type, /*non_blocking=*/false, /*copy=*/copy_variables); | ||||
229 | } | ||||
230 | |||||
231 | #ifdef USE_NUMPY1 | ||||
232 | if (PyObject_HasAttrString(data, "__cuda_array_interface__")) { | ||||
233 | TORCH_CHECK(!pin_memory, "Can't pin tensor constructed from __cuda_array_interface__")if ((__builtin_expect(static_cast<bool>(!(!pin_memory)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(233), (::c10::detail::torchCheckMsgImpl ( "Expected " "!pin_memory" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Can't pin tensor constructed from __cuda_array_interface__" ))); }; | ||||
234 | auto tensor = tensor_from_cuda_array_interface(data); | ||||
235 | const auto& inferred_scalar_type = type_inference ? tensor.scalar_type() : scalar_type; | ||||
236 | auto device = device_opt.has_value() ? *device_opt : options.device(); | ||||
237 | pybind11::gil_scoped_release no_gil; | ||||
238 | maybe_initialize_cuda(device); | ||||
239 | return tensor.to(device, inferred_scalar_type, /*non_blocking=*/false, /*copy=*/copy_numpy); | ||||
240 | } | ||||
241 | |||||
242 | if (is_numpy_available() && PyArray_Check(data)((((PyObject*)(data))->ob_type) == (&(*(PyTypeObject * )__numpy_array_api[2])) || PyType_IsSubtype((((PyObject*)(data ))->ob_type), (&(*(PyTypeObject *)__numpy_array_api[2] ))))) { | ||||
243 | TORCH_CHECK(!pin_memory, "Can't pin tensor constructed from numpy")if ((__builtin_expect(static_cast<bool>(!(!pin_memory)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(243), (::c10::detail::torchCheckMsgImpl ( "Expected " "!pin_memory" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Can't pin tensor constructed from numpy" ))); }; | ||||
244 | auto tensor = tensor_from_numpy(data, /*warn_if_not_writeable=*/!copy_numpy); | ||||
245 | const auto& inferred_scalar_type = type_inference ? tensor.scalar_type() : scalar_type; | ||||
246 | auto device = device_opt.has_value() ? *device_opt : options.device(); | ||||
247 | pybind11::gil_scoped_release no_gil; | ||||
248 | maybe_initialize_cuda(device); | ||||
249 | return tensor.to(device, inferred_scalar_type, /*non_blocking=*/false, /*copy=*/copy_numpy); | ||||
250 | } | ||||
251 | #endif | ||||
252 | |||||
253 | auto sizes = compute_sizes(data); | ||||
254 | ScalarType inferred_scalar_type = type_inference
| ||||
255 | // This exists to prevent us from tracing the call to empty(). The actual | ||||
256 | // autograd code doesn't really matter, because requires_grad is always false | ||||
257 | // here. | ||||
258 | Tensor tensor; | ||||
259 | { | ||||
260 | at::AutoDispatchBelowADInplaceOrView guard; // TODO: remove | ||||
261 | at::tracer::impl::NoTracerDispatchMode tracer_guard; | ||||
262 | tensor = at::empty(sizes, at::initialTensorOptions().dtype(inferred_scalar_type).pinned_memory(pin_memory)); | ||||
263 | recursive_store( | ||||
264 | (char*)tensor.data_ptr(), tensor.sizes(), tensor.strides(), 0, | ||||
265 | inferred_scalar_type, tensor.dtype().itemsize(), data); | ||||
266 | } | ||||
267 | auto device = device_opt.has_value() ? *device_opt : options.device(); | ||||
268 | pybind11::gil_scoped_release no_gil; | ||||
269 | maybe_initialize_cuda(device); | ||||
270 | // However, it is VERY important that we trace the to() call here (even | ||||
271 | // though the reason this is important is a hack). Without *some* factory | ||||
272 | // function call that is traced at construction time, we will consider | ||||
273 | // a tensor constant as originating from "outside" the trace, and if you | ||||
274 | // try to return it directly we will fail with the error saying no | ||||
275 | // "no observable data dependence". In an ideal world, we wouldn't trace | ||||
276 | // a to() call but I need to think harder about what exactly we should trace | ||||
277 | // in this case. | ||||
278 | return tensor.to(device, inferred_scalar_type, /*non_blocking=*/false, /*copy=*/false); | ||||
279 | } | ||||
280 | |||||
281 | Tensor new_from_data_copy( | ||||
282 | c10::TensorOptions options, | ||||
283 | at::ScalarType scalar_type, | ||||
284 | c10::optional<Device> device, | ||||
285 | PyObject* data) { | ||||
286 | return internal_new_from_data(options, scalar_type, device, data, | ||||
287 | /*copy_variables=*/true, /*copy_numpy=*/true, | ||||
288 | /*type_inference=*/false); | ||||
289 | } | ||||
290 | |||||
291 | Tensor legacy_new_from_sequence( | ||||
292 | c10::TensorOptions options, | ||||
293 | at::ScalarType scalar_type, | ||||
294 | c10::optional<Device> device, | ||||
295 | PyObject* data) { | ||||
296 | if (!PySequence_Check(data)) { | ||||
297 | throw TypeError("new(): data must be a sequence (got %s)", Py_TYPE(data)(((PyObject*)(data))->ob_type)->tp_name); | ||||
298 | } | ||||
299 | return internal_new_from_data(options, scalar_type, device, data, | ||||
300 | /*copy_variables=*/false, /*copy_numpy=*/false, | ||||
301 | /*type_inference=*/false); | ||||
302 | } | ||||
303 | |||||
304 | // "base" here refers to the Tensor type on which the function was invoked, e.g.: | ||||
305 | // in x.new(y), 'x' is the base. | ||||
306 | // TODO: Rewrite this using dispatchKeyToTensorOptions | ||||
307 | void check_base_legacy_new(c10::DispatchKey dispatch_key, at::Layout expected_layout) { | ||||
308 | if (expected_layout == c10::kStrided) { | ||||
309 | TORCH_CHECK(if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
310 | dispatch_key == c10::DispatchKey::CPU ||if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
311 | dispatch_key == c10::DispatchKey::CUDA ||if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
312 | dispatch_key == c10::DispatchKey::HIP ||if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
313 | dispatch_key == c10::DispatchKey::XLA ||if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
314 | dispatch_key == c10::DispatchKey::XPU,if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
315 | "new(): expected DispatchKey: ",if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
316 | c10::DispatchKey::CPU,if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
317 | " or ",if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
318 | c10::DispatchKey::CUDA,if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
319 | " or ",if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
320 | c10::DispatchKey::HIP,if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
321 | " or ",if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
322 | c10::DispatchKey::XLA,if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
323 | " or ",if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
324 | c10::DispatchKey::XPU,if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
325 | " but got: ",if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); } | ||||
326 | dispatch_key)if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10 ::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU)) , 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(326), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::CPU || dispatch_key == c10::DispatchKey::CUDA || dispatch_key == c10::DispatchKey::HIP || dispatch_key == c10::DispatchKey::XLA || dispatch_key == c10::DispatchKey::XPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::CPU, " or ", c10::DispatchKey::CUDA, " or " , c10::DispatchKey::HIP, " or ", c10::DispatchKey::XLA, " or " , c10::DispatchKey::XPU, " but got: ", dispatch_key))); }; | ||||
327 | } else if(expected_layout == c10::kSparse) { | ||||
328 | // NOTE: no sparse XLA | ||||
329 | TORCH_CHECK(if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
330 | dispatch_key == c10::DispatchKey::SparseCPU ||if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
331 | dispatch_key == c10::DispatchKey::SparseCUDA ||if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
332 | dispatch_key == c10::DispatchKey::SparseHIP ||if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
333 | dispatch_key == c10::DispatchKey::SparseXPU,if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
334 | "new(): expected DispatchKey: ",if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
335 | c10::DispatchKey::SparseCPU,if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
336 | " or ",if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
337 | c10::DispatchKey::SparseCUDA,if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
338 | " or ",if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
339 | c10::DispatchKey::SparseHIP,if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
340 | " or ",if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
341 | c10::DispatchKey::SparseXPU,if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
342 | " but got: ",if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); } | ||||
343 | dispatch_key)if ((__builtin_expect(static_cast<bool>(!(dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey ::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU)), 0))) { ::c10:: detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(343), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatch_key == c10::DispatchKey::SparseCPU || dispatch_key == c10::DispatchKey::SparseCUDA || dispatch_key == c10::DispatchKey::SparseHIP || dispatch_key == c10::DispatchKey::SparseXPU" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "new(): expected DispatchKey: " , c10::DispatchKey::SparseCPU, " or ", c10::DispatchKey::SparseCUDA , " or ", c10::DispatchKey::SparseHIP, " or ", c10::DispatchKey ::SparseXPU, " but got: ", dispatch_key))); }; | ||||
344 | } else { | ||||
345 | TORCH_INTERNAL_ASSERT(false, "unexpected layout")if ((__builtin_expect(static_cast<bool>(!(false)), 0))) { ::c10::detail::torchInternalAssertFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(345), "false" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/utils/tensor_new.cpp\"" ":" "345" ", please report a bug to PyTorch. " , c10::str("unexpected layout")); }; | ||||
346 | } | ||||
347 | } | ||||
348 | |||||
349 | // TODO: Make this accept options instead of dispatch key | ||||
350 | void check_legacy_ctor_device(c10::DispatchKey dispatch_key, c10::optional<Device> device) { | ||||
351 | if (device.has_value()) { | ||||
352 | TORCH_CHECK(dispatchKeyToDeviceType(dispatch_key) == device.value().type(),if ((__builtin_expect(static_cast<bool>(!(dispatchKeyToDeviceType (dispatch_key) == device.value().type())), 0))) { ::c10::detail ::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(354), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatchKeyToDeviceType(dispatch_key) == device.value().type()" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "legacy constructor expects device type: " , dispatchKeyToDeviceType(dispatch_key), " but device type: " , device.value().type(), " was passed"))); } | ||||
353 | "legacy constructor expects device type: ", dispatchKeyToDeviceType(dispatch_key),if ((__builtin_expect(static_cast<bool>(!(dispatchKeyToDeviceType (dispatch_key) == device.value().type())), 0))) { ::c10::detail ::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(354), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatchKeyToDeviceType(dispatch_key) == device.value().type()" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "legacy constructor expects device type: " , dispatchKeyToDeviceType(dispatch_key), " but device type: " , device.value().type(), " was passed"))); } | ||||
354 | " but device type: ", device.value().type(), " was passed")if ((__builtin_expect(static_cast<bool>(!(dispatchKeyToDeviceType (dispatch_key) == device.value().type())), 0))) { ::c10::detail ::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(354), (::c10::detail::torchCheckMsgImpl ( "Expected " "dispatchKeyToDeviceType(dispatch_key) == device.value().type()" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "legacy constructor expects device type: " , dispatchKeyToDeviceType(dispatch_key), " but device type: " , device.value().type(), " was passed"))); }; | ||||
355 | } | ||||
356 | } | ||||
357 | |||||
358 | Tensor legacy_sparse_tensor_ctor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
359 | auto options = dispatchKeyToTensorOptions(dispatch_key); | ||||
360 | static PythonArgParser parser({ | ||||
361 | "new(*, Device? device=None)", | ||||
362 | "new(*, int64_t cdata)|hidden", | ||||
363 | "new(Tensor indices, Tensor values, *, Device? device=None)", | ||||
364 | "new(Tensor indices, Tensor values, IntArrayRef size, *, Device? device=None)", | ||||
365 | "new(IntArrayRef size, *, Device? device=None)", | ||||
366 | }); | ||||
367 | ParsedArgs<4> parsed_args; | ||||
368 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
369 | if (r.idx == 0) { | ||||
370 | auto deviceOptional = r.deviceOptional(0); | ||||
371 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
372 | return at::empty({0}, build_options(options, scalar_type, deviceOptional)); | ||||
373 | } else if (r.idx == 1) { | ||||
374 | auto cdata = reinterpret_cast<void*>(r.toInt64(0)); | ||||
375 | return at::unsafeTensorFromTH(cdata, true); | ||||
376 | } else if (r.idx == 2) { | ||||
377 | auto deviceOptional = r.deviceOptional(2); | ||||
378 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
379 | at::OptionalDeviceGuard device_guard(deviceOptional); | ||||
380 | return at::sparse_coo_tensor(r.tensor(0), r.tensor(1)); | ||||
381 | } else if (r.idx == 3) { | ||||
382 | auto deviceOptional = r.deviceOptional(3); | ||||
383 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
384 | at::OptionalDeviceGuard device_guard(deviceOptional); | ||||
385 | return at::sparse_coo_tensor(r.tensor(0), r.tensor(1), r.intlist(2)); | ||||
386 | } else if (r.idx == 4) { | ||||
387 | PyObject* arg = r.pyobject(0); | ||||
388 | auto deviceOptional = r.deviceOptional(1); | ||||
389 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
390 | if (!THPSize_Check(arg)((((PyObject*)(arg))->ob_type) == &THPSizeType) && PyTuple_GET_SIZE(args)(((PyVarObject*)(((PyTupleObject *)(args))))->ob_size) >= 1 && arg == PyTuple_GET_ITEM(args, 0)(((PyTupleObject *)(args))->ob_item[0])) { | ||||
391 | // new(sequence) binds to this signature but should be treated differently | ||||
392 | // unless the sequences is a torch.Size | ||||
393 | throw TypeError("torch.SparseTensor(sequence) only accepts sizes. Please use torch.sparse_coo_tensor() " \ | ||||
394 | "or construct a strided tensor and convert it to sparse via to_sparse."); | ||||
395 | } | ||||
396 | return new_with_sizes(options, scalar_type, r.deviceOptional(1), r.intlist(0)); | ||||
397 | } | ||||
398 | throw std::runtime_error("new(): invalid arguments"); | ||||
399 | } | ||||
400 | |||||
401 | Tensor legacy_sparse_tensor_new(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
402 | auto options = dispatchKeyToTensorOptions(dispatch_key); | ||||
403 | static PythonArgParser parser({ | ||||
404 | "new(*, Device? device=None)", | ||||
405 | "new(*, int64_t cdata)|hidden", | ||||
406 | "new(Tensor indices, Tensor values, *, Device? device=None)", | ||||
407 | "new(Tensor indices, Tensor values, IntArrayRef size, *, Device? device=None)", | ||||
408 | "new(IntArrayRef size, *, Device? device=None)", | ||||
409 | }); | ||||
410 | check_base_legacy_new(dispatch_key, c10::kSparse); | ||||
411 | ParsedArgs<5> parsed_args; | ||||
412 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
413 | if (r.idx == 0) { | ||||
414 | auto deviceOptional = r.deviceOptional(0); | ||||
415 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
416 | at::OptionalDeviceGuard device_guard(deviceOptional); | ||||
417 | return at::empty({0}, build_options(options, scalar_type)); | ||||
418 | } else if (r.idx == 1) { | ||||
419 | auto cdata = reinterpret_cast<void*>(r.toInt64(0)); | ||||
420 | return at::unsafeTensorFromTH(cdata, true); | ||||
421 | } else if (r.idx == 2) { | ||||
422 | // Note: this signature doesn't have a dtype, even though it has a device; it probably shouldn't | ||||
423 | // have a device (we should infer it). | ||||
424 | auto deviceOptional = r.deviceOptional(2); | ||||
425 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
426 | at::OptionalDeviceGuard device_guard(deviceOptional); | ||||
427 | return at::sparse_coo_tensor(r.tensor(0), r.tensor(1)); | ||||
428 | } else if (r.idx == 3) { | ||||
429 | // Note: this signature doesn't have a dtype, even though it has a device; it probably shouldn't | ||||
430 | // have a device (we should infer it). | ||||
431 | auto deviceOptional = r.deviceOptional(3); | ||||
432 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
433 | at::OptionalDeviceGuard device_guard(deviceOptional); | ||||
434 | return at::sparse_coo_tensor(r.tensor(0), r.tensor(1), r.intlist(2)); | ||||
435 | } else if (r.idx == 4) { | ||||
436 | PyObject* arg = r.pyobject(0); | ||||
437 | auto deviceOptional = r.deviceOptional(1); | ||||
438 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
439 | if (!THPSize_Check(arg)((((PyObject*)(arg))->ob_type) == &THPSizeType) && PyTuple_GET_SIZE(args)(((PyVarObject*)(((PyTupleObject *)(args))))->ob_size) >= 1 && arg == PyTuple_GET_ITEM(args, 0)(((PyTupleObject *)(args))->ob_item[0])) { | ||||
440 | // new(sequence) binds to this signature but should be treated differently | ||||
441 | // unless the sequences is a torch.Size | ||||
442 | throw TypeError("SparseTensor.new(sequence) only accepts sizes. Please use torch.sparse_coo_tensor() " \ | ||||
443 | "or construct a strided tensor and convert it to sparse via to_sparse."); | ||||
444 | } | ||||
445 | return new_with_sizes(options, scalar_type, r.deviceOptional(1), r.intlist(0)); | ||||
446 | } | ||||
447 | throw std::runtime_error("new(): invalid arguments"); | ||||
448 | } | ||||
449 | |||||
450 | // NB: device_idx here is NOT a DeviceIndex, but index into PythonArgs | ||||
451 | c10::TensorOptions typeIdWithDefault(PythonArgs& r, int64_t device_idx, c10::DispatchKey dispatch_key) { | ||||
452 | auto options = dispatchKeyToTensorOptions(dispatch_key); | ||||
453 | if (!r.isNone(device_idx)) { | ||||
454 | // TODO: This line doesn't seem to be exercised at all in tests | ||||
455 | options = options.device(r.device(device_idx).type()); | ||||
456 | } | ||||
457 | return options; | ||||
458 | } | ||||
459 | |||||
460 | } // namespace | ||||
461 | |||||
462 | Tensor legacy_tensor_ctor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
463 | auto options = dispatchKeyToTensorOptions(dispatch_key); | ||||
464 | static PythonArgParser parser({ | ||||
465 | "new(*, Device? device=None)", | ||||
466 | "new(Storage storage)", | ||||
467 | "new(*, int64_t cdata)|hidden", | ||||
468 | "new(Tensor other)", | ||||
469 | "new(Tensor other, *, Device? device=None)|hidden", // prevent Tensor matching with IntArrayRef, PyObject* | ||||
470 | "new(IntArrayRef size, *, Device? device=None)", | ||||
471 | "new(PyObject* data, *, Device? device=None)", | ||||
472 | }); | ||||
473 | |||||
474 | if (isSparse(dispatchKeyToBackend(dispatch_key))) { | ||||
475 | return legacy_sparse_tensor_ctor(dispatch_key, scalar_type, args, kwargs); | ||||
476 | } | ||||
477 | |||||
478 | ParsedArgs<2> parsed_args; | ||||
479 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
480 | if (r.idx == 0) { | ||||
481 | auto deviceOptional = r.deviceOptional(0); | ||||
482 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
483 | at::OptionalDeviceGuard device_guard(deviceOptional); | ||||
484 | return at::empty({0}, build_options(options, scalar_type)); | ||||
485 | } else if (r.idx == 1) { | ||||
486 | THPObjectPtr dtype_attr(PyObject_GetAttrString(r.pyobject(0), "dtype")); | ||||
487 | if (!dtype_attr) throw python_error(); | ||||
488 | at::ScalarType storage_scalar_type = reinterpret_cast<THPDtype*>( | ||||
489 | dtype_attr.get())->scalar_type; | ||||
490 | TORCH_CHECK(if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(496), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
491 | storage_scalar_type == scalar_type,if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(496), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
492 | "Expected Storage of type ",if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(496), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
493 | scalar_type,if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(496), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
494 | " but got type ",if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(496), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
495 | storage_scalar_type,if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(496), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
496 | " for argument 1 'storage'")if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(496), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); }; | ||||
497 | return new_with_storage(options, scalar_type, r.storage(0)); | ||||
498 | } else if (r.idx == 2) { | ||||
499 | auto cdata = reinterpret_cast<void*>(r.toInt64(0)); | ||||
500 | return at::unsafeTensorFromTH(cdata, true); | ||||
501 | } else if (r.idx == 3) { | ||||
502 | return new_with_tensor(options, scalar_type, r.tensor(0)); | ||||
503 | } else if (r.idx == 4) { | ||||
504 | TORCH_CHECK(false, "Legacy tensor constructor of the form torch.Tensor(tensor, device=device) " \if ((__builtin_expect(static_cast<bool>(!(false)), 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(505), (::c10::detail::torchCheckMsgImpl ( "Expected " "false" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Legacy tensor constructor of the form torch.Tensor(tensor, device=device) " "is not supported. Use torch.tensor(...) or torch.as_tensor(...) instead." ))); } | ||||
505 | "is not supported. Use torch.tensor(...) or torch.as_tensor(...) instead.")if ((__builtin_expect(static_cast<bool>(!(false)), 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(505), (::c10::detail::torchCheckMsgImpl ( "Expected " "false" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Legacy tensor constructor of the form torch.Tensor(tensor, device=device) " "is not supported. Use torch.tensor(...) or torch.as_tensor(...) instead." ))); }; | ||||
506 | } else if (r.idx == 5) { | ||||
507 | PyObject* arg = r.pyobject(0); | ||||
508 | auto deviceOptional = r.deviceOptional(1); | ||||
509 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
510 | if (!THPSize_Check(arg)((((PyObject*)(arg))->ob_type) == &THPSizeType) && PyTuple_GET_SIZE(args)(((PyVarObject*)(((PyTupleObject *)(args))))->ob_size) >= 1 && arg == PyTuple_GET_ITEM(args, 0)(((PyTupleObject *)(args))->ob_item[0])) { | ||||
511 | // new(sequence) binds to this signature but should be treated differently | ||||
512 | // unless the sequences is a torch.Size | ||||
513 | return legacy_new_from_sequence(options, scalar_type, deviceOptional, r.pyobject(0)); | ||||
514 | } | ||||
515 | return new_with_sizes(options, scalar_type, r.deviceOptional(1), r.intlist(0)); | ||||
516 | } else if (r.idx == 6) { | ||||
517 | auto deviceOptional = r.deviceOptional(1); | ||||
518 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
519 | return legacy_new_from_sequence(options, scalar_type, deviceOptional, r.pyobject(0)); | ||||
520 | } | ||||
521 | throw std::runtime_error("new(): invalid arguments"); | ||||
522 | } | ||||
523 | |||||
524 | Tensor legacy_tensor_new(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
525 | auto options = dispatchKeyToTensorOptions(dispatch_key); | ||||
526 | static PythonArgParser parser({ | ||||
527 | "new(*, Device? device=None)", | ||||
528 | "new(Storage storage)", | ||||
529 | "new(*, int64_t cdata)|hidden", | ||||
530 | "new(Tensor other)", // this doesn't have a dtype/device because it creates an alias. | ||||
531 | "new(Tensor other, *, Device? device=None)|hidden", // prevent Tensor matching with IntArrayRef, PyObject* | ||||
532 | "new(IntArrayRef size, *, Device? device=None)", | ||||
533 | "new(PyObject* data, *, Device? device=None)", | ||||
534 | }); | ||||
535 | |||||
536 | if (isSparse(dispatchKeyToBackend(dispatch_key))) { | ||||
537 | return legacy_sparse_tensor_new(dispatch_key, scalar_type, args, kwargs); | ||||
538 | } | ||||
539 | |||||
540 | check_base_legacy_new(dispatch_key, c10::kStrided); | ||||
541 | ParsedArgs<3> parsed_args; | ||||
542 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
543 | if (r.idx == 0) { | ||||
544 | auto deviceOptional = r.deviceOptional(0); | ||||
545 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
546 | at::OptionalDeviceGuard device_guard(deviceOptional); | ||||
547 | return at::empty({0}, build_options(options, scalar_type)); | ||||
548 | } else if (r.idx == 1) { | ||||
549 | THPObjectPtr dtype_attr(PyObject_GetAttrString(r.pyobject(0), "dtype")); | ||||
550 | if (!dtype_attr) throw python_error(); | ||||
551 | at::ScalarType storage_scalar_type = reinterpret_cast<THPDtype*>( | ||||
552 | dtype_attr.get())->scalar_type; | ||||
553 | TORCH_CHECK(if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(559), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
554 | storage_scalar_type == scalar_type,if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(559), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
555 | "Expected Storage of type ",if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(559), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
556 | scalar_type,if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(559), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
557 | " but got type ",if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(559), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
558 | storage_scalar_type,if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(559), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); } | ||||
559 | " for argument 1 'storage'")if ((__builtin_expect(static_cast<bool>(!(storage_scalar_type == scalar_type)), 0))) { ::c10::detail::torchCheckFail( __func__ , "../torch/csrc/utils/tensor_new.cpp", static_cast<uint32_t >(559), (::c10::detail::torchCheckMsgImpl( "Expected " "storage_scalar_type == scalar_type" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Expected Storage of type " , scalar_type, " but got type ", storage_scalar_type, " for argument 1 'storage'" ))); }; | ||||
560 | return new_with_storage(options, scalar_type, r.storage(0)); | ||||
561 | } else if (r.idx == 2) { | ||||
562 | auto cdata = reinterpret_cast<void*>(r.toInt64(0)); | ||||
563 | return at::unsafeTensorFromTH(cdata, true); | ||||
564 | } else if (r.idx == 3) { | ||||
565 | return new_with_tensor(options, scalar_type, r.tensor(0)); | ||||
566 | } else if (r.idx == 4) { | ||||
567 | TORCH_CHECK(false, "Legacy tensor new of the form tensor.new(tensor, device=device) " \if ((__builtin_expect(static_cast<bool>(!(false)), 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(568), (::c10::detail::torchCheckMsgImpl ( "Expected " "false" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Legacy tensor new of the form tensor.new(tensor, device=device) " "is not supported. Use torch.as_tensor(...) instead."))); } | ||||
568 | "is not supported. Use torch.as_tensor(...) instead.")if ((__builtin_expect(static_cast<bool>(!(false)), 0))) { ::c10::detail::torchCheckFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(568), (::c10::detail::torchCheckMsgImpl ( "Expected " "false" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "Legacy tensor new of the form tensor.new(tensor, device=device) " "is not supported. Use torch.as_tensor(...) instead."))); }; | ||||
569 | } else if (r.idx == 5) { | ||||
570 | PyObject* arg = r.pyobject(0); | ||||
571 | auto deviceOptional = r.deviceOptional(1); | ||||
572 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
573 | if (!THPSize_Check(arg)((((PyObject*)(arg))->ob_type) == &THPSizeType) && PyTuple_GET_SIZE(args)(((PyVarObject*)(((PyTupleObject *)(args))))->ob_size) >= 1 && arg == PyTuple_GET_ITEM(args, 0)(((PyTupleObject *)(args))->ob_item[0])) { | ||||
574 | // new(sequence) binds to this signature but should be treated differently | ||||
575 | // unless the sequences is a torch.Size | ||||
576 | return legacy_new_from_sequence(options, scalar_type, deviceOptional, r.pyobject(0)); | ||||
577 | } | ||||
578 | return new_with_sizes(options, scalar_type, r.deviceOptional(1), r.intlist(0)); | ||||
579 | } else if (r.idx == 6) { | ||||
580 | auto deviceOptional = r.deviceOptional(1); | ||||
581 | check_legacy_ctor_device(dispatch_key, deviceOptional); | ||||
582 | return legacy_new_from_sequence(options, scalar_type, r.deviceOptional(1), r.pyobject(0)); | ||||
583 | } | ||||
584 | throw std::runtime_error("new(): invalid arguments"); | ||||
585 | } | ||||
586 | |||||
587 | Tensor indexing_tensor_from_data( | ||||
588 | c10::TensorOptions options, | ||||
589 | at::ScalarType scalar_type, | ||||
590 | c10::optional<Device> device, | ||||
591 | PyObject* data) { | ||||
592 | // Specific to tensor indexing, converts an indexing list to an | ||||
593 | // indexing tensor (type Byte or Long) | ||||
594 | ScalarType inferred_scalar_type = infer_scalar_type(data); | ||||
595 | if (inferred_scalar_type == ScalarType::Byte || inferred_scalar_type == ScalarType::Bool) { | ||||
596 | return internal_new_from_data(options, inferred_scalar_type, device, data, | ||||
597 | /*copy_variables=*/false, /*copy_numpy=*/false, | ||||
598 | /*type_inference=*/false); | ||||
599 | } else { | ||||
600 | return internal_new_from_data(options, scalar_type, device, data, | ||||
601 | /*copy_variables=*/false, /*copy_numpy=*/false, | ||||
602 | /*type_inference=*/false); | ||||
603 | } | ||||
604 | } | ||||
605 | |||||
606 | Tensor sparse_csr_tensor_ctor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
607 | TORCH_INTERNAL_ASSERT(!isSparseCsr(dispatchKeyToBackend(dispatch_key)))if ((__builtin_expect(static_cast<bool>(!(!isSparseCsr( dispatchKeyToBackend(dispatch_key)))), 0))) { ::c10::detail:: torchInternalAssertFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(607), "!isSparseCsr(dispatchKeyToBackend(dispatch_key))" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/utils/tensor_new.cpp\"" ":" "607" ", please report a bug to PyTorch. ", c10::str()); }; | ||||
608 | TORCH_INTERNAL_ASSERT(!isSparse(dispatchKeyToBackend(dispatch_key)))if ((__builtin_expect(static_cast<bool>(!(!isSparse(dispatchKeyToBackend (dispatch_key)))), 0))) { ::c10::detail::torchInternalAssertFail ( __func__, "../torch/csrc/utils/tensor_new.cpp", static_cast <uint32_t>(608), "!isSparse(dispatchKeyToBackend(dispatch_key))" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/utils/tensor_new.cpp\"" ":" "608" ", please report a bug to PyTorch. ", c10::str()); }; | ||||
609 | static PythonArgParser parser({ | ||||
610 | "sparse_csr_tensor(PyObject* crow_indices, PyObject* col_indices, PyObject* values, IntArrayRef size, *, ScalarType dtype=None, Layout? layout=None, Device? device=None, bool pin_memory=False, bool requires_grad=False)", | ||||
611 | "sparse_csr_tensor(PyObject* crow_indices, PyObject* col_indices, PyObject* values, *, ScalarType dtype=None, Layout? layout=None, Device? device=None, bool pin_memory=False, bool requires_grad=False)", | ||||
612 | }); | ||||
613 | const int NUM_ARGS = 9, CROW_INDICES_ARG = 0, COL_INDICES_ARG = 1, VALUES_ARG = 2; | ||||
614 | ParsedArgs<NUM_ARGS> parsed_args; | ||||
615 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
616 | auto safe_get_attr_string = [](PyObject *o, const char *attr_name) -> PyObject* { | ||||
617 | // Clear error indicator if attribute does not exists. | ||||
618 | // Otherwise subsequent Python C API calls might return bogus values. | ||||
619 | // See https://github.com/pytorch/pytorch/issues/58520 for more details | ||||
620 | auto rc = PyObject_GetAttrString(o, attr_name); | ||||
621 | if (!rc) { | ||||
622 | if (!PyErr_ExceptionMatches(PyExc_AttributeError)) { | ||||
623 | throw python_error(); | ||||
624 | } | ||||
625 | // Warning: a wrong attribute error may be suppressed here | ||||
626 | PyErr_Clear(); | ||||
627 | } | ||||
628 | return rc; | ||||
629 | }; | ||||
630 | THPObjectPtr crow_indices_dtype_attr(safe_get_attr_string(r.pyobject(CROW_INDICES_ARG), "dtype")); | ||||
631 | THPObjectPtr col_indices_dtype_attr(safe_get_attr_string(r.pyobject(COL_INDICES_ARG), "dtype")); | ||||
632 | at::ScalarType crow_indices_scalar_type = crow_indices_dtype_attr ? reinterpret_cast<THPDtype*>( | ||||
633 | crow_indices_dtype_attr.get())->scalar_type : kInt; | ||||
634 | at::ScalarType col_indices_scalar_type = col_indices_dtype_attr ? reinterpret_cast<THPDtype*>( | ||||
635 | col_indices_dtype_attr.get())->scalar_type : kInt; | ||||
636 | |||||
637 | if (r.idx == 0) { | ||||
638 | const int SIZE_ARRAY_ARG = 3, TYPE_INFERENCE_ARG = 4, DEVICE_TYPE_ARG = 6, REQ_GRAD_ARG = 8; | ||||
639 | bool type_inference = r.isNone(TYPE_INFERENCE_ARG); | ||||
640 | const auto inferred_options = typeIdWithDefault(r, DEVICE_TYPE_ARG, dispatch_key); | ||||
641 | const auto inferred_scalar_type = r.scalartypeWithDefault(TYPE_INFERENCE_ARG, scalar_type); | ||||
642 | at::OptionalDeviceGuard device_guard(r.deviceOptional(DEVICE_TYPE_ARG)); | ||||
643 | |||||
644 | Tensor values = internal_new_from_data(inferred_options, inferred_scalar_type, r.deviceOptional(DEVICE_TYPE_ARG), | ||||
645 | r.pyobject(VALUES_ARG), /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
646 | /*type_inference=*/type_inference); | ||||
647 | Tensor crow_indices = internal_new_from_data(values.options(), | ||||
648 | crow_indices_scalar_type, r.deviceOptional(DEVICE_TYPE_ARG), r.pyobject(CROW_INDICES_ARG), | ||||
649 | /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
650 | /*type_inference=*/true); | ||||
651 | Tensor col_indices = internal_new_from_data(values.options(), | ||||
652 | col_indices_scalar_type, r.deviceOptional(DEVICE_TYPE_ARG), r.pyobject(COL_INDICES_ARG), | ||||
653 | /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
654 | /*type_inference=*/true); | ||||
655 | |||||
656 | return at::sparse_csr_tensor(crow_indices, col_indices, values, r.intlist(SIZE_ARRAY_ARG), | ||||
657 | values.options().layout(at::kSparseCsr)).set_requires_grad(r.toBool(REQ_GRAD_ARG)); | ||||
658 | } else if (r.idx == 1) { | ||||
659 | const int TYPE_INFERENCE_ARG = 3, DEVICE_TYPE_ARG = 5, REQ_GRAD_ARG = 7; | ||||
660 | bool type_inference = r.isNone(TYPE_INFERENCE_ARG); | ||||
661 | const auto inferred_options = typeIdWithDefault(r, DEVICE_TYPE_ARG, dispatch_key); | ||||
662 | const auto inferred_scalar_type = r.scalartypeWithDefault(TYPE_INFERENCE_ARG, scalar_type); | ||||
663 | at::OptionalDeviceGuard device_guard(r.deviceOptional(DEVICE_TYPE_ARG)); | ||||
664 | |||||
665 | Tensor values = internal_new_from_data(inferred_options, inferred_scalar_type, r.deviceOptional(DEVICE_TYPE_ARG), | ||||
666 | r.pyobject(VALUES_ARG), /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
667 | /*type_inference=*/type_inference); | ||||
668 | Tensor crow_indices = internal_new_from_data(values.options(), | ||||
669 | crow_indices_scalar_type, r.deviceOptional(DEVICE_TYPE_ARG), | ||||
670 | r.pyobject(CROW_INDICES_ARG), /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
671 | /*type_inference=*/true); | ||||
672 | Tensor col_indices = internal_new_from_data(values.options(), col_indices_scalar_type, r.deviceOptional(DEVICE_TYPE_ARG), | ||||
673 | r.pyobject(COL_INDICES_ARG), /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
674 | /*type_inference=*/true); | ||||
675 | return at::sparse_csr_tensor(crow_indices, col_indices, values, | ||||
676 | values.options().layout(at::kSparseCsr)).set_requires_grad(r.toBool(REQ_GRAD_ARG)); | ||||
677 | } | ||||
678 | throw std::runtime_error("sparse_csr_tensor(): invalid arguments"); | ||||
679 | } | ||||
680 | |||||
681 | Tensor _sparse_csr_tensor_unsafe_ctor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
682 | TORCH_INTERNAL_ASSERT(!isSparseCsr(dispatchKeyToBackend(dispatch_key)))if ((__builtin_expect(static_cast<bool>(!(!isSparseCsr( dispatchKeyToBackend(dispatch_key)))), 0))) { ::c10::detail:: torchInternalAssertFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(682), "!isSparseCsr(dispatchKeyToBackend(dispatch_key))" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/utils/tensor_new.cpp\"" ":" "682" ", please report a bug to PyTorch. ", c10::str()); }; | ||||
683 | TORCH_INTERNAL_ASSERT(!isSparse(dispatchKeyToBackend(dispatch_key)))if ((__builtin_expect(static_cast<bool>(!(!isSparse(dispatchKeyToBackend (dispatch_key)))), 0))) { ::c10::detail::torchInternalAssertFail ( __func__, "../torch/csrc/utils/tensor_new.cpp", static_cast <uint32_t>(683), "!isSparse(dispatchKeyToBackend(dispatch_key))" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/utils/tensor_new.cpp\"" ":" "683" ", please report a bug to PyTorch. ", c10::str()); }; | ||||
684 | enum { | ||||
685 | ARG_CROW_INDICES = 0, | ||||
686 | ARG_COL_INDICES, | ||||
687 | ARG_VALUES, | ||||
688 | ARG_SIZE, | ||||
689 | ARG_TYPE, | ||||
690 | ARG_DEVICE, | ||||
691 | ARG_REQUIRES_GRAD, | ||||
692 | ARGS_COUNT | ||||
693 | }; | ||||
694 | static PythonArgParser parser({ | ||||
695 | "_sparse_csr_tensor_unsafe(PyObject* crow_indices, PyObject* col_indices, PyObject* values, IntArrayRef size, *, ScalarType dtype=None, Device? device=None, bool requires_grad=False)", | ||||
696 | }); | ||||
697 | |||||
698 | ParsedArgs<ARGS_COUNT> parsed_args; | ||||
699 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
700 | bool type_inference = r.isNone(ARG_TYPE); | ||||
701 | const auto inferred_options = typeIdWithDefault(r, ARG_DEVICE, dispatch_key); | ||||
702 | const auto inferred_scalar_type = r.scalartypeWithDefault(ARG_TYPE, scalar_type); | ||||
703 | at::OptionalDeviceGuard device_guard(r.deviceOptional(ARG_DEVICE)); | ||||
704 | Tensor values = internal_new_from_data(inferred_options, inferred_scalar_type, r.deviceOptional(ARG_DEVICE), r.pyobject(ARG_VALUES), | ||||
705 | /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
706 | /*type_inference=*/type_inference); | ||||
707 | |||||
708 | Tensor crow_indices = internal_new_from_data(values.options(), kInt, r.deviceOptional(ARG_DEVICE), r.pyobject(ARG_CROW_INDICES), | ||||
709 | /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
710 | /*type_inference=*/true); | ||||
711 | |||||
712 | Tensor col_indices = internal_new_from_data(values.options(), kInt, r.deviceOptional(ARG_DEVICE), r.pyobject(ARG_COL_INDICES), | ||||
713 | /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
714 | /*type_inference=*/true); | ||||
715 | |||||
716 | return at::_sparse_csr_tensor_unsafe(crow_indices, col_indices, values, r.intlist(ARG_SIZE), values.options().layout(at::kSparseCsr)).set_requires_grad(r.toBool(ARG_REQUIRES_GRAD)); | ||||
717 | } | ||||
718 | |||||
719 | // Note [Ensuring sparse values and indices match devices] | ||||
720 | // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
721 | // In all places where we construct indices, we read out options from values | ||||
722 | // (rather than use inferred_options). Why? This handles the case when | ||||
723 | // values is a CUDA tensor, but indices is a non-Tensor value (and the device | ||||
724 | // argument is not set). Example: | ||||
725 | // | ||||
726 | // torch.sparse_coo_tensor(([0, 1],), self.empty(2, 0).cuda(), (4, 0)) | ||||
727 | // | ||||
728 | // Sparse tensors require both indices and values to live on the same device. | ||||
729 | // If values lives on CUDA, we can infer where the indices should live, and | ||||
730 | // should accept even ordinary index sequences (and just make sure we write them | ||||
731 | // into the correct device). values is the ONLY way we know that the index | ||||
732 | // tensor should go to CUDA, so we have to get the information in somehow. | ||||
733 | // | ||||
734 | // This code is kind of jank. For one, the dtype in options is silently ignored | ||||
735 | // by internal_new_from_data. Also, in classic janky code style, it used to | ||||
736 | // not work quite right: if values lives on "cuda:1", before all we said was | ||||
737 | // "this needs to be CUDA" and indices would be allocated on the wrong tensor. | ||||
738 | // Options is more right and gets this correct. | ||||
739 | |||||
740 | Tensor sparse_coo_tensor_ctor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
741 | TORCH_INTERNAL_ASSERT(!isSparse(dispatchKeyToBackend(dispatch_key)))if ((__builtin_expect(static_cast<bool>(!(!isSparse(dispatchKeyToBackend (dispatch_key)))), 0))) { ::c10::detail::torchInternalAssertFail ( __func__, "../torch/csrc/utils/tensor_new.cpp", static_cast <uint32_t>(741), "!isSparse(dispatchKeyToBackend(dispatch_key))" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/utils/tensor_new.cpp\"" ":" "741" ", please report a bug to PyTorch. ", c10::str()); }; | ||||
742 | TORCH_INTERNAL_ASSERT(!isSparseCsr(dispatchKeyToBackend(dispatch_key)))if ((__builtin_expect(static_cast<bool>(!(!isSparseCsr( dispatchKeyToBackend(dispatch_key)))), 0))) { ::c10::detail:: torchInternalAssertFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(742), "!isSparseCsr(dispatchKeyToBackend(dispatch_key))" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/utils/tensor_new.cpp\"" ":" "742" ", please report a bug to PyTorch. ", c10::str()); }; | ||||
743 | static PythonArgParser parser({ | ||||
744 | "sparse_coo_tensor(PyObject* indices, PyObject* values, *, ScalarType dtype=None, Device? device=None, bool requires_grad=False)", | ||||
745 | "sparse_coo_tensor(PyObject* indices, PyObject* values, IntArrayRef size, *, ScalarType dtype=None, Device? device=None, bool requires_grad=False)", | ||||
746 | "sparse_coo_tensor(IntArrayRef size, *, ScalarType dtype=None, Device? device=None, bool requires_grad=False)", | ||||
747 | }); | ||||
748 | |||||
749 | ParsedArgs<6> parsed_args; | ||||
750 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
751 | if (r.idx == 0) { | ||||
752 | bool type_inference = r.isNone(2); | ||||
753 | const auto inferred_options = typeIdWithDefault(r, 3, dispatch_key); | ||||
754 | const auto inferred_scalar_type = r.scalartypeWithDefault(2, scalar_type); | ||||
755 | at::OptionalDeviceGuard device_guard(r.deviceOptional(3)); | ||||
756 | // if no dtype provided, infer type based on value type. | ||||
757 | Tensor values = internal_new_from_data(inferred_options, inferred_scalar_type, r.deviceOptional(3), r.pyobject(1), | ||||
758 | /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
759 | /*type_inference=*/type_inference); | ||||
760 | // See Note [Ensuring sparse values and indices match devices] | ||||
761 | Tensor indices = internal_new_from_data(values.options(), kLong, r.deviceOptional(3), r.pyobject(0), | ||||
762 | /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
763 | /*type_inference=*/false); | ||||
764 | return at::sparse_coo_tensor(indices, values, values.options().layout(at::kSparse)).set_requires_grad(r.toBool(4)); | ||||
765 | } else if (r.idx == 1) { | ||||
766 | bool type_inference = r.isNone(3); | ||||
767 | const auto inferred_options = typeIdWithDefault(r, 4, dispatch_key); | ||||
768 | const auto inferred_scalar_type = r.scalartypeWithDefault(3, scalar_type); | ||||
769 | at::OptionalDeviceGuard device_guard(r.deviceOptional(4)); | ||||
770 | Tensor values = internal_new_from_data(inferred_options, inferred_scalar_type, r.deviceOptional(4), r.pyobject(1), | ||||
771 | /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
772 | /*type_inference=*/type_inference); | ||||
773 | // See Note [Ensuring sparse values and indices match devices] | ||||
774 | Tensor indices = internal_new_from_data(values.options(), kLong, r.deviceOptional(4), r.pyobject(0), | ||||
775 | /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
776 | /*type_inference=*/false); | ||||
777 | return at::sparse_coo_tensor(indices, values, r.intlist(2), values.options().layout(at::kSparse)).set_requires_grad(r.toBool(5)); | ||||
778 | } else if (r.idx == 2) { | ||||
779 | const auto inferred_options = typeIdWithDefault(r, 2, dispatch_key); | ||||
780 | const auto inferred_scalar_type = r.scalartypeWithDefault(1, scalar_type); | ||||
781 | at::OptionalDeviceGuard device_guard(r.deviceOptional(2)); | ||||
782 | return at::sparse_coo_tensor(r.intlist(0), inferred_options.dtype(inferred_scalar_type).layout(at::kSparse)).set_requires_grad(r.toBool(3)); | ||||
783 | } | ||||
784 | throw std::runtime_error("sparse_coo_tensor(): invalid arguments"); | ||||
785 | } | ||||
786 | |||||
787 | Tensor _sparse_coo_tensor_unsafe_ctor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
788 | TORCH_INTERNAL_ASSERT(!isSparse(dispatchKeyToBackend(dispatch_key)))if ((__builtin_expect(static_cast<bool>(!(!isSparse(dispatchKeyToBackend (dispatch_key)))), 0))) { ::c10::detail::torchInternalAssertFail ( __func__, "../torch/csrc/utils/tensor_new.cpp", static_cast <uint32_t>(788), "!isSparse(dispatchKeyToBackend(dispatch_key))" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/utils/tensor_new.cpp\"" ":" "788" ", please report a bug to PyTorch. ", c10::str()); }; | ||||
789 | TORCH_INTERNAL_ASSERT(!isSparseCsr(dispatchKeyToBackend(dispatch_key)))if ((__builtin_expect(static_cast<bool>(!(!isSparseCsr( dispatchKeyToBackend(dispatch_key)))), 0))) { ::c10::detail:: torchInternalAssertFail( __func__, "../torch/csrc/utils/tensor_new.cpp" , static_cast<uint32_t>(789), "!isSparseCsr(dispatchKeyToBackend(dispatch_key))" "INTERNAL ASSERT FAILED at " "\"../torch/csrc/utils/tensor_new.cpp\"" ":" "789" ", please report a bug to PyTorch. ", c10::str()); }; | ||||
790 | enum { | ||||
791 | ARG_INDICES = 0, | ||||
792 | ARG_VALUES, | ||||
793 | ARG_SIZE, | ||||
794 | ARG_TYPE, | ||||
795 | ARG_DEVICE, | ||||
796 | ARG_REQUIRES_GRAD, | ||||
797 | ARGS_COUNT | ||||
798 | }; | ||||
799 | static PythonArgParser parser({ | ||||
800 | "_sparse_coo_tensor_unsafe(PyObject* indices, PyObject* values, IntArrayRef size, *, ScalarType dtype=None, Device? device=None, bool requires_grad=False)", | ||||
801 | }); | ||||
802 | |||||
803 | ParsedArgs<ARGS_COUNT> parsed_args; | ||||
804 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
805 | bool type_inference = r.isNone(ARG_TYPE); | ||||
806 | const auto inferred_options = typeIdWithDefault(r, ARG_DEVICE, dispatch_key); | ||||
807 | const auto inferred_scalar_type = r.scalartypeWithDefault(ARG_TYPE, scalar_type); | ||||
808 | at::OptionalDeviceGuard device_guard(r.deviceOptional(ARG_DEVICE)); | ||||
809 | Tensor values = internal_new_from_data(inferred_options, inferred_scalar_type, r.deviceOptional(ARG_DEVICE), r.pyobject(ARG_VALUES), | ||||
810 | /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
811 | /*type_inference=*/type_inference); | ||||
812 | // See Note [Ensuring sparse values and indices match devices] | ||||
813 | Tensor indices = internal_new_from_data(values.options(), kLong, r.deviceOptional(ARG_DEVICE), r.pyobject(ARG_INDICES), | ||||
814 | /*copy_variables=*/false, /*copy_numpy=*/true, | ||||
815 | /*type_inference=*/false); | ||||
816 | return at::_sparse_coo_tensor_unsafe(indices, values, r.intlist(ARG_SIZE), values.options().layout(at::kSparse)).set_requires_grad(r.toBool(ARG_REQUIRES_GRAD)); | ||||
817 | } | ||||
818 | |||||
819 | void _validate_sparse_coo_tensor_args(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
820 | auto options = dispatchKeyToTensorOptions(dispatch_key); | ||||
821 | static PythonArgParser parser({ | ||||
822 | "_validate_sparse_coo_tensor(PyObject* indices, PyObject* values, IntArrayRef size)", | ||||
823 | }); | ||||
824 | |||||
825 | ParsedArgs<3> parsed_args; | ||||
826 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
827 | Tensor values = internal_new_from_data( | ||||
828 | options, scalar_type, c10::nullopt, r.pyobject(1), | ||||
829 | /*copy_variables=*/false, /*copy_numpy=*/true, /*type_inference=*/true); | ||||
830 | // See Note [Ensuring sparse values and indices match devices] | ||||
831 | Tensor indices = internal_new_from_data( | ||||
832 | values.options(), kLong, c10::nullopt, r.pyobject(0), | ||||
833 | /*copy_variables=*/false, /*copy_numpy=*/true, /*type_inference=*/false); | ||||
834 | at::native::_validate_sparse_coo_tensor_args(indices, values, r.intlist(2)); | ||||
835 | } | ||||
836 | |||||
837 | |||||
838 | void _validate_sparse_csr_tensor_args(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
839 | auto options = dispatchKeyToTensorOptions(dispatch_key); | ||||
840 | static PythonArgParser parser({ | ||||
841 | "_validate_sparse_csr_tensor(PyObject* crow_indices, PyObject* col_indices, PyObject* values, IntArrayRef size)", | ||||
842 | }); | ||||
843 | |||||
844 | ParsedArgs<4> parsed_args; | ||||
845 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
846 | Tensor values = internal_new_from_data( | ||||
847 | options, scalar_type, c10::nullopt, r.pyobject(2), | ||||
848 | /*copy_variables=*/false, /*copy_numpy=*/true, /*type_inference=*/true); | ||||
849 | // See Note [Ensuring sparse values and indices match devices] | ||||
850 | Tensor crow_indices = internal_new_from_data( | ||||
851 | values.options(), kInt, c10::nullopt, r.pyobject(0), | ||||
852 | /*copy_variables=*/false, /*copy_numpy=*/true, /*type_inference=*/true); | ||||
853 | Tensor col_indices = internal_new_from_data( | ||||
854 | values.options(), kInt, c10::nullopt, r.pyobject(1), | ||||
855 | /*copy_variables=*/false, /*copy_numpy=*/true, /*type_inference=*/true); | ||||
856 | |||||
857 | at::native::_validate_sparse_csr_tensor_args(crow_indices, col_indices, values, r.intlist(3)); | ||||
858 | } | ||||
859 | |||||
860 | Tensor tensor_ctor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
861 | static PythonArgParser parser({ | ||||
862 | "tensor(PyObject* data, *, ScalarType dtype=None, Device? device=None, bool pin_memory=False, bool requires_grad=False, DimnameList? names=None)", | ||||
863 | }); | ||||
864 | |||||
865 | constexpr int ctor_num_args = 6; | ||||
866 | ParsedArgs<ctor_num_args> parsed_args; | ||||
867 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
868 | if (r.idx == 0) { | ||||
869 | PyObject* data = r.pyobject(0); | ||||
870 | if (THPVariable_Check(data)) { | ||||
871 | auto ret = PyErr_WarnEx(PyExc_UserWarning, | ||||
872 | "To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() " | ||||
873 | "or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).", 1); | ||||
874 | if (ret != 0) throw python_error(); | ||||
875 | } | ||||
876 | |||||
877 | bool type_inference = r.isNone(1); | ||||
878 | bool pin_memory = r.toBool(3); | ||||
879 | bool args_requires_grad = r.toBool(4); | ||||
880 | auto new_tensor = internal_new_from_data( | ||||
881 | typeIdWithDefault(r, 2, dispatch_key), | ||||
882 | r.scalartypeWithDefault(1, scalar_type), | ||||
883 | r.deviceOptional(2), | ||||
884 | data, | ||||
885 | /*copy_variables=*/true, | ||||
886 | /*copy_numpy=*/true, | ||||
887 | /*type_inference=*/type_inference, | ||||
888 | pin_memory); | ||||
889 | auto names = r.toDimnameListOptional(5); | ||||
890 | if (names) { | ||||
891 | at::namedinference::propagate_names(new_tensor, *names, /*validate_names=*/true); | ||||
892 | } | ||||
893 | new_tensor.detach_(); // ensure new_tensor a leaf node | ||||
894 | new_tensor.set_requires_grad(args_requires_grad); | ||||
895 | return new_tensor; | ||||
896 | } | ||||
897 | throw std::runtime_error("tensor(): invalid arguments"); | ||||
898 | } | ||||
899 | |||||
900 | Tensor as_tensor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
901 | // TODO: add requires_grad once we decide on semantics for sharing data. | ||||
902 | static PythonArgParser parser({ | ||||
903 | "as_tensor(PyObject* data, *, ScalarType dtype=None, Device? device=None)", | ||||
904 | }); | ||||
905 | |||||
906 | ParsedArgs<3> parsed_args; | ||||
907 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
908 | if (r.idx == 0) { | ||||
909 | bool type_inference = r.isNone(1); | ||||
910 | return internal_new_from_data( | ||||
911 | typeIdWithDefault(r, 2, dispatch_key), | ||||
912 | r.scalartypeWithDefault(1, scalar_type), | ||||
913 | r.deviceOptional(2), | ||||
914 | r.pyobject(0), | ||||
915 | /*copy_variables=*/false, | ||||
916 | /*copy_numpy=*/false, | ||||
917 | /*type_inference=*/type_inference); | ||||
918 | } | ||||
919 | throw std::runtime_error("tensor(): invalid arguments"); | ||||
920 | } | ||||
921 | |||||
922 | Tensor new_tensor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { | ||||
923 | static PythonArgParser parser({ | ||||
924 | "new_tensor(PyObject* data, *, ScalarType dtype=None, Device? device=None, bool requires_grad=False)", | ||||
925 | }); | ||||
926 | |||||
927 | ParsedArgs<4> parsed_args; | ||||
928 | auto r = parser.parse(args, kwargs, parsed_args); | ||||
929 | if (r.idx == 0) { | ||||
| |||||
930 | PyObject* data = r.pyobject(0); | ||||
931 | if (THPVariable_Check(data)) { | ||||
932 | auto ret = PyErr_WarnEx(PyExc_UserWarning, | ||||
933 | "To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() " | ||||
934 | "or sourceTensor.clone().detach().requires_grad_(True), rather than tensor.new_tensor(sourceTensor).", 1); | ||||
935 | if (ret != 0) throw python_error(); | ||||
936 | } | ||||
937 | |||||
938 | bool args_requires_grad = r.toBool(3); | ||||
939 | auto new_tensor = new_from_data_copy( | ||||
940 | typeIdWithDefault(r, 2, dispatch_key), | ||||
941 | r.scalartypeWithDefault(1, scalar_type), | ||||
942 | r.deviceOptional(2), | ||||
943 | data); | ||||
944 | new_tensor.detach_(); // ensure new_tensor a leaf node | ||||
945 | new_tensor.set_requires_grad(args_requires_grad); | ||||
946 | return new_tensor; | ||||
947 | } | ||||
948 | throw std::runtime_error("new_tensor(): invalid arguments"); | ||||
949 | } | ||||
950 | |||||
951 | }} // namespace torch::utils |
1 | #ifndef PySequence_Fast |
2 | struct _object; |
3 | typedef struct _object PyObject; |
4 | PyObject* clang_analyzer_PyObject_New_Reference(); |
5 | PyObject* PySequence_Fast(PyObject *o, const char *m) { |
6 | return clang_analyzer_PyObject_New_Reference(); |
7 | } |
8 | #else |
9 | #warning "API PySequence_Fast is defined as a macro." |
10 | #endif |