Bug Summary

File:home/liujun/Analysis/pyrefcon_ws/Paddle/build/paddle/fluid/pybind/../../../../paddle/fluid/pybind/eager_py_layer.cc
Warning:line 164, column 18
PyObject ownership leak with reference count of 1

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name eager_py_layer.cc -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-display-progress -analyzer-output=html -analyzer-checker=python -analyzer-disable-checker=deadcode -analyzer-config suppress-inlined-defensive-checks=false -analyzer-config suppress-null-return-paths=false -analyzer-config crosscheck-with-z3=true -analyzer-config model-path=/opt/pyrefcon/lib/pyrefcon/models -analyzer-config experimental-enable-naive-ctu-analysis=true -analyzer-config ctu-dir=/home/liujun/Analysis/pyrefcon_ws/Paddle/build/panda-output -analyzer-config ctu-index-name=/home/liujun/Analysis/pyrefcon_ws/Paddle/build/panda-output/externalDefMap.txt -analyzer-config ctu-invocation-list=/home/liujun/Analysis/pyrefcon_ws/Paddle/build/panda-output/invocations.yaml -analyzer-config display-ctu-progress=true -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +avx -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/home/liujun/Analysis/pyrefcon_ws/Paddle/build/paddle/fluid/pybind -resource-dir /opt/pyrefcon/lib/clang/13.0.0 -D HPPL_STUB_FUNC -D LAPACK_FOUND -D PADDLE_DISABLE_PROFILER -D PADDLE_DLL_EXPORT -D PADDLE_ON_INFERENCE -D PADDLE_USE_PTHREAD_BARRIER -D PADDLE_USE_PTHREAD_SPINLOCK -D PADDLE_VERSION=0.0.0 -D PADDLE_VERSION_INTEGER=0 -D PADDLE_WITH_AVX -D PADDLE_WITH_CRYPTO -D PADDLE_WITH_CUSTOM_DEVICE -D PADDLE_WITH_DNNL -D PADDLE_WITH_MKLML -D PADDLE_WITH_POCKETFFT -D PADDLE_WITH_SSE3 -D PADDLE_WITH_XBYAK -D STATIC_IR -D XBYAK64 -D XBYAK_NO_OP_NAMES -D paddle_EXPORTS -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build -I /home/liujun/Analysis/pyrefcon_ws/Paddle/paddle/fluid/framework/io -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/zlib/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/gflags/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/glog/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/third_party/eigen3 -I /home/liujun/Analysis/pyrefcon_ws/Paddle/third_party/threadpool -I /home/liujun/Analysis/pyrefcon_ws/Paddle/third_party/dlpack/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/xxhash/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/warpctc/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/warprnnt/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/utf8proc/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/mklml/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/mkldnn/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/protobuf/include -I /opt/pyrefcon/include/python3.8 -I /home/liujun/Software/miniconda3/envs/paddle_venv/lib/python3.8/site-packages/numpy/core/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/pybind/src/extern_pybind/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/gloo/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/xbyak/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/xbyak/include/xbyak -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/cryptopp/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/pocketfft/src -I /home/liujun/Analysis/pyrefcon_ws/Paddle -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/../paddle/fluid/framework/io -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /opt/pyrefcon/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/home/liujun/Analysis/pyrefcon_ws/Paddle/build/paddle/fluid/pybind -ferror-limit 19 -fopenmp -fopenmp-cuda-parallel-target-regions -fgnuc-version=4.2.1 -fcxx-exceptions -fexceptions -fcolor-diagnostics -vectorize-loops -vectorize-slp -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liujun/Analysis/pyrefcon_ws/Paddle/build/panda-output/reports -x c++ ../../../../paddle/fluid/pybind/eager_py_layer.cc

../../../../paddle/fluid/pybind/eager_py_layer.cc

1/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2Licensed under the Apache License, Version 2.0 (the "License");
3you may not use this file except in compliance with the License.
4You may obtain a copy of the License at
5http://www.apache.org/licenses/LICENSE-2.0
6Unless required by applicable law or agreed to in writing, software
7distributed under the License is distributed on an "AS IS" BASIS,
8WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9See the License for the specific language governing permissions and
10limitations under the License. */
11// disable numpy compile error
12#include <Python.h>
13// Avoid a problem with copysign defined in pyconfig.h on Windows.
14#ifdef copysign
15#undef copysign
16#endif
17
18#include <set>
19#include <string>
20#include <vector>
21
22#pragma GCC diagnostic ignored "-Wattributes"
23#include "paddle/fluid/eager/accumulation/accumulation_node.h"
24#include "paddle/fluid/eager/api/all.h"
25#include "paddle/fluid/eager/autograd_meta.h"
26#include "paddle/fluid/eager/pylayer/py_layer_node.h"
27#include "paddle/fluid/eager/utils.h"
28#include "paddle/fluid/framework/convert_utils.h"
29#include "paddle/fluid/memory/allocation/allocator.h"
30#include "paddle/fluid/memory/memcpy.h"
31#include "paddle/fluid/platform/enforce.h"
32#include "paddle/fluid/pybind/eager.h"
33#include "paddle/fluid/pybind/eager_utils.h"
34#include "paddle/fluid/pybind/exception.h"
35#include "paddle/phi/common/data_type.h"
36#include "paddle/phi/core/compat/convert_utils.h"
37#include "paddle/phi/core/dense_tensor.h"
38#include "pybind11/detail/internals.h"
39#include "pybind11/pytypes.h"
40#pragma GCC diagnostic ignored "-Wwrite-strings"
41#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
42
43namespace paddle {
44namespace pybind {
45
46PyTypeObject* p_pylayer_type;
47extern PyTypeObject* p_tensor_type;
48
49std::set<paddle::Tensor*> GetTensorsFromPyObject(PyObject* obj) {
50 std::set<paddle::Tensor*> result;
51 if (obj == nullptr) {
52 return result;
53 }
54 if (PyCheckTensor(obj)) {
55 result.insert(&reinterpret_cast<TensorObject*>(obj)->tensor); // NOLINT
56 } else if (PyList_Check(obj)((((((PyObject*)(obj))->ob_type))->tp_flags & ((1UL
<< 25))) != 0)
) {
57 Py_ssize_t len = PyList_Size(obj);
58 for (Py_ssize_t i = 0; i < len; i++) {
59 if (PyCheckTensor(PyList_GetItem(obj, i))) {
60 result.insert(
61 &reinterpret_cast<TensorObject*>(PyList_GetItem(obj, i)) // NOLINT
62 ->tensor);
63 }
64 }
65 } else if (PyTuple_Check(obj)((((((PyObject*)(obj))->ob_type))->tp_flags & ((1UL
<< 26))) != 0)
) {
66 Py_ssize_t len = PyTuple_Size(obj);
67 for (Py_ssize_t i = 0; i < len; i++) {
68 if (PyCheckTensor(PyTuple_GetItem(obj, i))) {
69 result.insert(
70 &reinterpret_cast<TensorObject*>(PyTuple_GetItem(obj, i)) // NOLINT
71 ->tensor);
72 }
73 }
74 }
75 return result;
76}
77
78PyObject* PyLayerNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) {
79 PyObject* obj = type->tp_alloc(type, 0);
80 if (obj) {
81 auto v = reinterpret_cast<PyLayerObject*>(obj);
82 v->materialize_grads = true;
83 v->container_be_packed = false;
84 new (&v->grad_node) std::weak_ptr<egr::GradNodePyLayer>();
85 new (&v->forward_input_tensor_is_duplicable) std::vector<bool>();
86 new (&v->forward_output_tensor_is_duplicable) std::vector<bool>();
87 }
88 return obj;
89}
90
91static void PyLayerDealloc(PyLayerObject* self) {
92 if (self->container) {
93 Py_DECREF(self->container)_Py_DECREF(((PyObject*)(self->container)));
94 }
95 if (self->non_differentiable) {
96 Py_DECREF(self->non_differentiable)_Py_DECREF(((PyObject*)(self->non_differentiable)));
97 }
98 if (self->not_inplace_tensors) {
99 Py_DECREF(self->not_inplace_tensors)_Py_DECREF(((PyObject*)(self->not_inplace_tensors)));
100 }
101 self->grad_node.~weak_ptr<egr::GradNodePyLayer>();
102 self->unpack_hook = nullptr;
103 self->forward_input_tensor_is_duplicable.~vector();
104 self->forward_output_tensor_is_duplicable.~vector();
105 Py_TYPE(self)(((PyObject*)(self))->ob_type)->tp_free(reinterpret_cast<PyObject*>(self));
106}
107
108PyObject* pylayer_method_name(PyObject* self, PyObject* noargs) {
109 EAGER_TRYtry {
110 return ToPyObject(
111 reinterpret_cast<PyLayerObject*>(self)->grad_node.lock()->name());
112 EAGER_CATCH_AND_THROW_RETURN_NULL} catch (...) { ThrowExceptionToPython(std::current_exception
()); return nullptr; }
113}
114
115PyObject* new_tensor_with_impl(paddle::Tensor* tensor) {
116 PyObject* obj = p_tensor_type->tp_alloc(p_tensor_type, 0);
117 if (obj) {
118 auto v = reinterpret_cast<TensorObject*>(obj);
119 new (&(v->tensor)) paddle::Tensor();
120 v->tensor.set_impl(tensor->impl());
121 v->tensor.set_name(egr::Controller::Instance().GenerateUniqueName());
122 } else {
123 PADDLE_THROW(platform::errors::Fatal(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::Fatal( "tp_alloc return null, can not new a PyObject."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 124)
; } while (0)
124 "tp_alloc return null, can not new a PyObject."))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::Fatal( "tp_alloc return null, can not new a PyObject."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 124)
; } while (0)
;
125 }
126 return obj;
127}
128
129PyObject* pylayer_method_apply(PyObject* cls,
130 PyObject* args,
131 PyObject* kwargs) {
132 EAGER_TRYtry {
133 VLOG(6)static_cast<void>(0), !(__extension__ ({ static google::
int32* vlocal__ = &google::kLogSiteUninitialized; google::
int32 verbose_level__ = (6); (*vlocal__ >= verbose_level__
) && ((vlocal__ != &google::kLogSiteUninitialized
) || (google::InitVLOG3__(&vlocal__, &FLAGS_v, "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, verbose_level__))); })) ? (void) 0 : google::LogMessageVoidify
() & google::LogMessage( "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 133).stream()
<< "Begin run PyLayer apply...";
1
Assuming the condition is false
2
'?' condition is true
134 PyObject* backward_function =
135 PyObject_GetAttrString(cls, "_backward_function");
136 if (!backward_function) {
3
Assuming 'backward_function' is non-null
4
Taking false branch
137 PADDLE_THROW(paddle::platform::errors::InvalidArgument(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::InvalidArgument( "Get _backward_function failed."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 138)
; } while (0)
138 "Get _backward_function failed."))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::InvalidArgument( "Get _backward_function failed."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 138)
; } while (0)
;
139 }
140 PyLayerObject* ctx = reinterpret_cast<PyLayerObject*>(
141 PyObject_CallFunctionObjArgs(backward_function, nullptr));
142 if (!ctx) {
5
Assuming 'ctx' is non-null
6
Taking false branch
143 PADDLE_THROW(paddle::platform::errors::External(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::External( pybind11::detail::error_string
().c_str())), "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 144); } while (0)
144 pybind11::detail::error_string().c_str()))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::External( pybind11::detail::error_string
().c_str())), "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 144); } while (0)
;
145 return nullptr;
146 }
147 VLOG(6)static_cast<void>(0), !(__extension__ ({ static google::
int32* vlocal__ = &google::kLogSiteUninitialized; google::
int32 verbose_level__ = (6); (*vlocal__ >= verbose_level__
) && ((vlocal__ != &google::kLogSiteUninitialized
) || (google::InitVLOG3__(&vlocal__, &FLAGS_v, "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, verbose_level__))); })) ? (void) 0 : google::LogMessageVoidify
() & google::LogMessage( "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 147).stream()
<< "PyLayer construct PyLayerContext finish...";
7
Assuming the condition is false
8
'?' condition is true
148
149 bool require_any_grad = false;
150
151 size_t inputs_size = 0;
152 size_t args_size = 0;
153 size_t kwargs_size = 0;
154 PyObject* forward_args = nullptr;
155 PyObject* kwargs_value_list = nullptr;
156 if (kwargs) {
9
Assuming 'kwargs' is null
10
Taking false branch
157 kwargs_size = PyDict_Size(kwargs);
158 kwargs_value_list = PyDict_Values(kwargs);
159 }
160 if (args) {
11
Assuming 'args' is null
12
Taking false branch
161 args_size = PyTuple_GET_SIZE(args)(((PyVarObject*)(((PyTupleObject *)(args))))->ob_size);
162 }
163 inputs_size = kwargs_size + args_size;
164 forward_args = PyTuple_New(args_size + 1);
13
Calling 'PyTuple_New'
15
Returning from 'PyTuple_New'
21
PyObject ownership leak with reference count of 1
165 Py_INCREF(ctx)_Py_INCREF(((PyObject*)(ctx)));
166 PyTuple_SET_ITEM(forward_args, 0, reinterpret_cast<PyObject*>(ctx))PyTuple_SetItem(forward_args, 0, reinterpret_cast<PyObject
*>(ctx))
;
167
168 std::vector<std::vector<egr::AutogradMeta*>> inputs_autograd_meta;
169 inputs_autograd_meta.reserve(inputs_size);
170 std::vector<std::vector<paddle::Tensor*>> inputs_tensor;
171 inputs_tensor.reserve(inputs_size);
172 ctx->forward_input_tensor_is_duplicable.clear();
173 ctx->forward_input_tensor_is_duplicable.reserve(inputs_size);
174 std::set<phi::TensorBase*> input_tensorbases;
175 for (size_t i = 0; i < inputs_size; i++) {
16
Loop condition is false. Execution continues on line 250
176 PyObject* obj = nullptr;
177 if (i >= args_size) {
178 obj = PyList_GetItem(kwargs_value_list, i - args_size);
179 } else {
180 obj = PyTuple_GET_ITEM(args, i)(((PyTupleObject *)(args))->ob_item[i]);
181 }
182 if (PyCheckTensor(obj)) {
183 input_tensorbases.insert(
184 reinterpret_cast<TensorObject*>(obj)->tensor.impl().get());
185 auto autograd_meta = egr::EagerUtils::nullable_autograd_meta(
186 reinterpret_cast<TensorObject*>(obj)->tensor);
187 inputs_autograd_meta.push_back({autograd_meta});
188 inputs_tensor.push_back(
189 {&(reinterpret_cast<TensorObject*>(obj)->tensor)}); // NOLINT
190 bool stop_gradient =
191 autograd_meta == nullptr ? true : autograd_meta->StopGradient();
192 if (!stop_gradient) {
193 require_any_grad = true;
194 }
195 ctx->forward_input_tensor_is_duplicable.push_back(false);
196 } else if (PyList_Check(obj)((((((PyObject*)(obj))->ob_type))->tp_flags & ((1UL
<< 25))) != 0)
) {
197 std::vector<paddle::Tensor*> tensors;
198 Py_ssize_t len = PyList_Size(obj);
199 for (Py_ssize_t j = 0; j < len; j++) {
200 PyObject* o = PyList_GetItem(obj, j);
201 if (PyCheckTensor(o)) {
202 input_tensorbases.insert(
203 reinterpret_cast<TensorObject*>(o)->tensor.impl().get());
204 tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor));
205 }
206 }
207 if (!tensors.empty()) {
208 auto autograd_meta = egr::EagerUtils::nullable_autograd_meta(tensors);
209 for (auto iter : autograd_meta) {
210 bool stop_gradient = iter == nullptr ? true : iter->StopGradient();
211 if (!stop_gradient) {
212 require_any_grad = true;
213 }
214 }
215 inputs_autograd_meta.push_back(autograd_meta);
216 inputs_tensor.push_back(tensors);
217 ctx->forward_input_tensor_is_duplicable.push_back(true);
218 }
219 } else if (PyTuple_Check(obj)((((((PyObject*)(obj))->ob_type))->tp_flags & ((1UL
<< 26))) != 0)
) {
220 std::vector<paddle::Tensor*> tensors;
221 Py_ssize_t len = PyTuple_Size(obj);
222 for (Py_ssize_t j = 0; j < len; j++) {
223 PyObject* o = PyTuple_GetItem(obj, j);
224 if (PyCheckTensor(o)) {
225 input_tensorbases.insert(
226 reinterpret_cast<TensorObject*>(o)->tensor.impl().get());
227 tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor));
228 }
229 }
230 if (!tensors.empty()) {
231 auto autograd_meta = egr::EagerUtils::nullable_autograd_meta(tensors);
232 for (auto iter : autograd_meta) {
233 bool stop_gradient = iter == nullptr ? true : iter->StopGradient();
234 if (!stop_gradient) {
235 require_any_grad = true;
236 }
237 }
238 inputs_autograd_meta.push_back(autograd_meta);
239 inputs_tensor.push_back(tensors);
240 ctx->forward_input_tensor_is_duplicable.push_back(true);
241 }
242 }
243
244 if (i < args_size) {
245 Py_INCREF(obj)_Py_INCREF(((PyObject*)(obj)));
246 PyTuple_SET_ITEM(forward_args, i + 1, obj)PyTuple_SetItem(forward_args, i + 1, obj);
247 }
248 }
249
250 VLOG(6)static_cast<void>(0), !(__extension__ ({ static google::
int32* vlocal__ = &google::kLogSiteUninitialized; google::
int32 verbose_level__ = (6); (*vlocal__ >= verbose_level__
) && ((vlocal__ != &google::kLogSiteUninitialized
) || (google::InitVLOG3__(&vlocal__, &FLAGS_v, "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, verbose_level__))); })) ? (void) 0 : google::LogMessageVoidify
() & google::LogMessage( "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 250).stream()
17
Assuming the condition is false
18
'?' condition is true
251 << "PyLayer forward args is ready, begin call user's forward function...";
252 // call forward
253 auto forward_fn = PyObject_GetAttrString(cls, "forward");
254 if (!forward_fn) {
19
Assuming 'forward_fn' is null
20
Taking true branch
255 PADDLE_THROW(paddle::platform::errors::InvalidArgument(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::InvalidArgument( "Get forward function failed."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 256)
; } while (0)
256 "Get forward function failed."))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::InvalidArgument( "Get forward function failed."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 256)
; } while (0)
;
257 }
258 bool trace_backward = egr::Controller::Instance().HasGrad();
259 egr::Controller::Instance().SetHasGrad(false);
260 auto outputs = PyObject_Call(forward_fn, forward_args, kwargs);
261 egr::Controller::Instance().SetHasGrad(trace_backward);
262 if (!outputs) {
263 Py_XDECREF(forward_args)_Py_XDECREF(((PyObject*)(forward_args)));
264 Py_XDECREF(kwargs_value_list)_Py_XDECREF(((PyObject*)(kwargs_value_list)));
265 Py_XDECREF(backward_function)_Py_XDECREF(((PyObject*)(backward_function)));
266 Py_XDECREF(forward_fn)_Py_XDECREF(((PyObject*)(forward_fn)));
267 return nullptr;
268 }
269
270 PyObject* outputs_tuple = nullptr;
271 if (PyTuple_Check(outputs)((((((PyObject*)(outputs))->ob_type))->tp_flags & (
(1UL << 26))) != 0)
) {
272 outputs_tuple = outputs;
273 } else if (PyList_Check(outputs)((((((PyObject*)(outputs))->ob_type))->tp_flags & (
(1UL << 25))) != 0)
) {
274 outputs_tuple = PyList_AsTuple(outputs);
275 } else {
276 outputs_tuple = PyTuple_New(1);
277 Py_INCREF(outputs)_Py_INCREF(((PyObject*)(outputs)));
278 PyTuple_SET_ITEM(outputs_tuple, 0, outputs)PyTuple_SetItem(outputs_tuple, 0, outputs);
279 }
280
281 std::set<paddle::Tensor*> inplace_tensors;
282 std::set<phi::TensorBase*> not_inplace_tensorbases;
283 auto not_inplace_tensors = GetTensorsFromPyObject(ctx->not_inplace_tensors);
284 for (auto it : not_inplace_tensors) {
285 not_inplace_tensorbases.insert(it->impl().get());
286 }
287
288 auto outputs_size = PyTuple_GET_SIZE(outputs_tuple)(((PyVarObject*)(((PyTupleObject *)(outputs_tuple))))->ob_size
)
;
289 std::vector<std::vector<paddle::Tensor*>> outputs_tensor;
290 outputs_tensor.reserve(outputs_size);
291 std::vector<std::vector<egr::AutogradMeta*>> outputs_autograd_meta;
292 outputs_autograd_meta.reserve(outputs_size);
293 ctx->forward_output_tensor_is_duplicable.clear();
294 ctx->forward_output_tensor_is_duplicable.reserve(outputs_size);
295 for (Py_ssize_t i = 0; i < outputs_size; i++) {
296 PyObject* obj = PyTuple_GET_ITEM(outputs_tuple, i)(((PyTupleObject *)(outputs_tuple))->ob_item[i]);
297 if (PyCheckTensor(obj)) {
298 outputs_tensor.push_back(
299 {&(reinterpret_cast<TensorObject*>(obj)->tensor)});
300 outputs_autograd_meta.push_back({egr::EagerUtils::autograd_meta(
301 &(reinterpret_cast<TensorObject*>(obj)->tensor))});
302 ctx->forward_output_tensor_is_duplicable.push_back(false);
303 if (input_tensorbases.count(
304 reinterpret_cast<TensorObject*>(obj)->tensor.impl().get())) {
305 if (not_inplace_tensorbases.count(
306 reinterpret_cast<TensorObject*>(obj)->tensor.impl().get())) {
307 PyTuple_SET_ITEM(outputs_tuple,PyTuple_SetItem(outputs_tuple, i, new_tensor_with_impl(&(
reinterpret_cast<TensorObject*>(obj)->tensor)))
308 i,PyTuple_SetItem(outputs_tuple, i, new_tensor_with_impl(&(
reinterpret_cast<TensorObject*>(obj)->tensor)))
309 new_tensor_with_impl(&(PyTuple_SetItem(outputs_tuple, i, new_tensor_with_impl(&(
reinterpret_cast<TensorObject*>(obj)->tensor)))
310 reinterpret_cast<TensorObject*>(obj)->tensor)))PyTuple_SetItem(outputs_tuple, i, new_tensor_with_impl(&(
reinterpret_cast<TensorObject*>(obj)->tensor)))
;
311 } else {
312 inplace_tensors.insert(
313 &(reinterpret_cast<TensorObject*>(obj)->tensor));
314 }
315 }
316 } else if (PyList_Check(obj)((((((PyObject*)(obj))->ob_type))->tp_flags & ((1UL
<< 25))) != 0)
) {
317 std::vector<paddle::Tensor*> tensors;
318 Py_ssize_t len = PyList_Size(obj);
319 for (Py_ssize_t j = 0; j < len; j++) {
320 PyObject* o = PyList_GetItem(obj, j);
321 if (PyCheckTensor(o)) {
322 tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor));
323 if (input_tensorbases.count(
324 reinterpret_cast<TensorObject*>(o)->tensor.impl().get())) {
325 if (not_inplace_tensorbases.count(
326 reinterpret_cast<TensorObject*>(o)->tensor.impl().get())) {
327 PyTuple_SetItem(obj,
328 j,
329 new_tensor_with_impl(&(
330 reinterpret_cast<TensorObject*>(o)->tensor)));
331 } else {
332 inplace_tensors.insert(
333 &(reinterpret_cast<TensorObject*>(o)->tensor));
334 }
335 }
336 }
337 }
338 if (!tensors.empty()) {
339 outputs_tensor.push_back(tensors);
340 outputs_autograd_meta.push_back(
341 egr::EagerUtils::autograd_meta(&tensors));
342 ctx->forward_output_tensor_is_duplicable.push_back(true);
343 }
344 } else if (PyTuple_Check(obj)((((((PyObject*)(obj))->ob_type))->tp_flags & ((1UL
<< 26))) != 0)
) {
345 std::vector<paddle::Tensor*> tensors;
346 Py_ssize_t len = PyTuple_Size(obj);
347 for (Py_ssize_t j = 0; j < len; j++) {
348 PyObject* o = PyTuple_GetItem(obj, j);
349 if (PyCheckTensor(o)) {
350 tensors.push_back(&(reinterpret_cast<TensorObject*>(o)->tensor));
351 if (input_tensorbases.count(
352 reinterpret_cast<TensorObject*>(o)->tensor.impl().get())) {
353 if (not_inplace_tensorbases.count(
354 reinterpret_cast<TensorObject*>(o)->tensor.impl().get())) {
355 PyTuple_SetItem(obj,
356 j,
357 new_tensor_with_impl(&(
358 reinterpret_cast<TensorObject*>(o)->tensor)));
359 } else {
360 inplace_tensors.insert(
361 &(reinterpret_cast<TensorObject*>(o)->tensor));
362 }
363 }
364 }
365 }
366 if (!tensors.empty()) {
367 outputs_tensor.push_back(tensors);
368 outputs_autograd_meta.push_back(
369 egr::EagerUtils::autograd_meta(&tensors));
370 ctx->forward_output_tensor_is_duplicable.push_back(true);
371 }
372 }
373 }
374
375 if (outputs_tensor.empty()) {
376 PADDLE_THROW(platform::errors::InvalidArgument(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::InvalidArgument( "At least one output of `PyLayer.forward` is a `Tensor`."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 377)
; } while (0)
377 "At least one output of `PyLayer.forward` is a `Tensor`."))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::InvalidArgument( "At least one output of `PyLayer.forward` is a `Tensor`."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 377)
; } while (0)
;
378 }
379 VLOG(6)static_cast<void>(0), !(__extension__ ({ static google::
int32* vlocal__ = &google::kLogSiteUninitialized; google::
int32 verbose_level__ = (6); (*vlocal__ >= verbose_level__
) && ((vlocal__ != &google::kLogSiteUninitialized
) || (google::InitVLOG3__(&vlocal__, &FLAGS_v, "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, verbose_level__))); })) ? (void) 0 : google::LogMessageVoidify
() & google::LogMessage( "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 379).stream()
<< "PyLayer forward function finish...";
380
381 if (require_any_grad && trace_backward) {
382 auto non_differentiable = GetTensorsFromPyObject(ctx->non_differentiable);
383 for (size_t i = 0; i < outputs_autograd_meta.size(); i++) {
384 for (size_t j = 0; j < outputs_autograd_meta[i].size(); j++) {
385 if (non_differentiable.find(outputs_tensor[i][j]) !=
386 non_differentiable.end()) {
387 outputs_autograd_meta[i][j]->SetStopGradient(true);
388 } else {
389 outputs_autograd_meta[i][j]->SetStopGradient(false);
390 }
391 }
392 }
393
394 for (auto inplace_tensor : inplace_tensors) {
395 auto inplace_tensor_autograd_meta =
396 egr::EagerUtils::autograd_meta(inplace_tensor);
397 PADDLE_ENFORCE_EQ(!inplace_tensor_autograd_meta->StopGradient() &&do { auto __val1 = (!inplace_tensor_autograd_meta->StopGradient
() && egr::EagerUtils::IsLeafTensor(*inplace_tensor))
; auto __val2 = (false); using __TYPE1__ = decltype(__val1); using
__TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ = ::phi
::details::CommonType1<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__
= ::phi::details::CommonType2<__TYPE1__, __TYPE2__>; bool
__is_not_error = (static_cast<__COMMON_TYPE1__>(__val1
))==( static_cast<__COMMON_TYPE2__>(__val2)); if (__builtin_expect
(static_cast<bool>(!__is_not_error), 0)) { auto __summary__
= phi::ErrorSummary(paddle::platform::errors::InvalidArgument
( "Leaf Var (%s) that doesn't stop gradient " "can't use inplace strategy."
, inplace_tensor->name())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, "false", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("false", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 403);
} while (0); } } while (0)
398 egr::EagerUtils::IsLeafTensor(*inplace_tensor),do { auto __val1 = (!inplace_tensor_autograd_meta->StopGradient
() && egr::EagerUtils::IsLeafTensor(*inplace_tensor))
; auto __val2 = (false); using __TYPE1__ = decltype(__val1); using
__TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ = ::phi
::details::CommonType1<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__
= ::phi::details::CommonType2<__TYPE1__, __TYPE2__>; bool
__is_not_error = (static_cast<__COMMON_TYPE1__>(__val1
))==( static_cast<__COMMON_TYPE2__>(__val2)); if (__builtin_expect
(static_cast<bool>(!__is_not_error), 0)) { auto __summary__
= phi::ErrorSummary(paddle::platform::errors::InvalidArgument
( "Leaf Var (%s) that doesn't stop gradient " "can't use inplace strategy."
, inplace_tensor->name())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, "false", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("false", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 403);
} while (0); } } while (0)
399 false,do { auto __val1 = (!inplace_tensor_autograd_meta->StopGradient
() && egr::EagerUtils::IsLeafTensor(*inplace_tensor))
; auto __val2 = (false); using __TYPE1__ = decltype(__val1); using
__TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ = ::phi
::details::CommonType1<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__
= ::phi::details::CommonType2<__TYPE1__, __TYPE2__>; bool
__is_not_error = (static_cast<__COMMON_TYPE1__>(__val1
))==( static_cast<__COMMON_TYPE2__>(__val2)); if (__builtin_expect
(static_cast<bool>(!__is_not_error), 0)) { auto __summary__
= phi::ErrorSummary(paddle::platform::errors::InvalidArgument
( "Leaf Var (%s) that doesn't stop gradient " "can't use inplace strategy."
, inplace_tensor->name())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, "false", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("false", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 403);
} while (0); } } while (0)
400 paddle::platform::errors::InvalidArgument(do { auto __val1 = (!inplace_tensor_autograd_meta->StopGradient
() && egr::EagerUtils::IsLeafTensor(*inplace_tensor))
; auto __val2 = (false); using __TYPE1__ = decltype(__val1); using
__TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ = ::phi
::details::CommonType1<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__
= ::phi::details::CommonType2<__TYPE1__, __TYPE2__>; bool
__is_not_error = (static_cast<__COMMON_TYPE1__>(__val1
))==( static_cast<__COMMON_TYPE2__>(__val2)); if (__builtin_expect
(static_cast<bool>(!__is_not_error), 0)) { auto __summary__
= phi::ErrorSummary(paddle::platform::errors::InvalidArgument
( "Leaf Var (%s) that doesn't stop gradient " "can't use inplace strategy."
, inplace_tensor->name())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, "false", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("false", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 403);
} while (0); } } while (0)
401 "Leaf Var (%s) that doesn't stop gradient "do { auto __val1 = (!inplace_tensor_autograd_meta->StopGradient
() && egr::EagerUtils::IsLeafTensor(*inplace_tensor))
; auto __val2 = (false); using __TYPE1__ = decltype(__val1); using
__TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ = ::phi
::details::CommonType1<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__
= ::phi::details::CommonType2<__TYPE1__, __TYPE2__>; bool
__is_not_error = (static_cast<__COMMON_TYPE1__>(__val1
))==( static_cast<__COMMON_TYPE2__>(__val2)); if (__builtin_expect
(static_cast<bool>(!__is_not_error), 0)) { auto __summary__
= phi::ErrorSummary(paddle::platform::errors::InvalidArgument
( "Leaf Var (%s) that doesn't stop gradient " "can't use inplace strategy."
, inplace_tensor->name())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, "false", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("false", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 403);
} while (0); } } while (0)
402 "can't use inplace strategy.",do { auto __val1 = (!inplace_tensor_autograd_meta->StopGradient
() && egr::EagerUtils::IsLeafTensor(*inplace_tensor))
; auto __val2 = (false); using __TYPE1__ = decltype(__val1); using
__TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ = ::phi
::details::CommonType1<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__
= ::phi::details::CommonType2<__TYPE1__, __TYPE2__>; bool
__is_not_error = (static_cast<__COMMON_TYPE1__>(__val1
))==( static_cast<__COMMON_TYPE2__>(__val2)); if (__builtin_expect
(static_cast<bool>(!__is_not_error), 0)) { auto __summary__
= phi::ErrorSummary(paddle::platform::errors::InvalidArgument
( "Leaf Var (%s) that doesn't stop gradient " "can't use inplace strategy."
, inplace_tensor->name())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, "false", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("false", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 403);
} while (0); } } while (0)
403 inplace_tensor->name()))do { auto __val1 = (!inplace_tensor_autograd_meta->StopGradient
() && egr::EagerUtils::IsLeafTensor(*inplace_tensor))
; auto __val2 = (false); using __TYPE1__ = decltype(__val1); using
__TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ = ::phi
::details::CommonType1<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__
= ::phi::details::CommonType2<__TYPE1__, __TYPE2__>; bool
__is_not_error = (static_cast<__COMMON_TYPE1__>(__val1
))==( static_cast<__COMMON_TYPE2__>(__val2)); if (__builtin_expect
(static_cast<bool>(!__is_not_error), 0)) { auto __summary__
= phi::ErrorSummary(paddle::platform::errors::InvalidArgument
( "Leaf Var (%s) that doesn't stop gradient " "can't use inplace strategy."
, inplace_tensor->name())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, "false", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("!inplace_tensor_autograd_meta->StopGradient() && egr::EagerUtils::IsLeafTensor(*inplace_tensor)"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("false", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 403);
} while (0); } } while (0)
;
404 inplace_tensor->bump_inplace_version();
405 VLOG(3)static_cast<void>(0), !(__extension__ ({ static google::
int32* vlocal__ = &google::kLogSiteUninitialized; google::
int32 verbose_level__ = (3); (*vlocal__ >= verbose_level__
) && ((vlocal__ != &google::kLogSiteUninitialized
) || (google::InitVLOG3__(&vlocal__, &FLAGS_v, "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, verbose_level__))); })) ? (void) 0 : google::LogMessageVoidify
() & google::LogMessage( "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 405).stream()
<< "Tensor(" << inplace_tensor->name()
406 << ") uses Inplace Strategy.";
407 }
408
409 auto grad_node =
410 std::make_shared<egr::GradNodePyLayer>(reinterpret_cast<PyObject*>(ctx),
411 outputs_autograd_meta.size(),
412 inputs_autograd_meta.size());
413 ctx->grad_node = grad_node;
414
415 if (ctx->materialize_grads) {
416 grad_node->SaveForwardOutputsMeta(outputs_tensor);
417 }
418
419 for (size_t i = 0; i < inputs_autograd_meta.size(); i++) {
420 if (ctx->forward_input_tensor_is_duplicable[i]) {
421 for (auto t : inputs_tensor[i]) {
422 grad_node->SetGradOutMeta(*t, i);
423 }
424 } else {
425 grad_node->SetGradOutMeta(*inputs_tensor[i][0], i);
426 }
427 }
428
429 for (size_t i = 0; i < outputs_autograd_meta.size(); i++) {
430 if (ctx->forward_output_tensor_is_duplicable[i]) {
431 egr::EagerUtils::SetOutRankWithSlot(&outputs_autograd_meta[i], i);
432 egr::EagerUtils::SetHistory(&outputs_autograd_meta[i], grad_node);
433 for (auto t : outputs_tensor[i]) {
434 grad_node->SetGradInMeta(*t, i);
435 }
436 } else {
437 egr::EagerUtils::SetOutRankWithSlot(outputs_autograd_meta[i][0], i);
438 egr::EagerUtils::SetHistory(outputs_autograd_meta[i][0], grad_node);
439 grad_node->SetGradInMeta(*outputs_tensor[i][0], i);
440 }
441 }
442 VLOG(6)static_cast<void>(0), !(__extension__ ({ static google::
int32* vlocal__ = &google::kLogSiteUninitialized; google::
int32 verbose_level__ = (6); (*vlocal__ >= verbose_level__
) && ((vlocal__ != &google::kLogSiteUninitialized
) || (google::InitVLOG3__(&vlocal__, &FLAGS_v, "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, verbose_level__))); })) ? (void) 0 : google::LogMessageVoidify
() & google::LogMessage( "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 442).stream()
<< "PyLayer construct backward node finish...";
443 }
444
445 if (outputs_size == 1) {
446 if (!PyTuple_Check(outputs)((((((PyObject*)(outputs))->ob_type))->tp_flags & (
(1UL << 26))) != 0)
&& !PyList_Check(outputs)((((((PyObject*)(outputs))->ob_type))->tp_flags & (
(1UL << 25))) != 0)
) {
447 Py_XDECREF(outputs)_Py_XDECREF(((PyObject*)(outputs)));
448 outputs = PyTuple_GetItem(outputs_tuple, 0);
449 Py_INCREF(outputs)_Py_INCREF(((PyObject*)(outputs)));
450 Py_XDECREF(outputs_tuple)_Py_XDECREF(((PyObject*)(outputs_tuple)));
451 }
452 }
453
454 Py_XDECREF(forward_args)_Py_XDECREF(((PyObject*)(forward_args)));
455 Py_XDECREF(kwargs_value_list)_Py_XDECREF(((PyObject*)(kwargs_value_list)));
456 Py_XDECREF(backward_function)_Py_XDECREF(((PyObject*)(backward_function)));
457 Py_XDECREF(forward_fn)_Py_XDECREF(((PyObject*)(forward_fn)));
458 Py_XDECREF(ctx)_Py_XDECREF(((PyObject*)(ctx)));
459
460 return outputs;
461 EAGER_CATCH_AND_THROW_RETURN_NULL} catch (...) { ThrowExceptionToPython(std::current_exception
()); return nullptr; }
462}
463
464PyObject* call_unpack_hook(PyLayerObject* self) {
465 auto unpack_hook = self->unpack_hook;
466 auto packed_value = self->container;
467
468 auto packed_value_size = PyTuple_GET_SIZE(packed_value)(((PyVarObject*)(((PyTupleObject *)(packed_value))))->ob_size
)
;
469 auto unpacked_value = PyTuple_New(packed_value_size);
470
471 for (Py_ssize_t i = 0; i < packed_value_size; i++) {
472 PyObject* obj = PyTuple_GET_ITEM(packed_value, i)(((PyTupleObject *)(packed_value))->ob_item[i]);
473 if (PyList_Check(obj)((((((PyObject*)(obj))->ob_type))->tp_flags & ((1UL
<< 25))) != 0)
) {
474 Py_ssize_t len = PyList_Size(obj);
475 auto tmp_list = PyList_New(len);
476 for (Py_ssize_t j = 0; j < len; j++) {
477 PyObject* o = PyList_GetItem(obj, j);
478 PyTuple_SET_ITEM(tmp_list,PyTuple_SetItem(tmp_list, j, reinterpret_cast<PyObject*>
(((*unpack_hook)( reinterpret_cast<void*>(o), nullptr))
))
479 j,PyTuple_SetItem(tmp_list, j, reinterpret_cast<PyObject*>
(((*unpack_hook)( reinterpret_cast<void*>(o), nullptr))
))
480 reinterpret_cast<PyObject*>(((*unpack_hook)(PyTuple_SetItem(tmp_list, j, reinterpret_cast<PyObject*>
(((*unpack_hook)( reinterpret_cast<void*>(o), nullptr))
))
481 reinterpret_cast<void*>(o), nullptr))))PyTuple_SetItem(tmp_list, j, reinterpret_cast<PyObject*>
(((*unpack_hook)( reinterpret_cast<void*>(o), nullptr))
))
;
482 }
483 PyTuple_SET_ITEM(unpacked_value, i, tmp_list)PyTuple_SetItem(unpacked_value, i, tmp_list);
484 } else if (PyTuple_Check(obj)((((((PyObject*)(obj))->ob_type))->tp_flags & ((1UL
<< 26))) != 0)
) {
485 Py_ssize_t len = PyTuple_Size(obj);
486 auto tmp_tuple = PyTuple_New(len);
487 for (Py_ssize_t j = 0; j < len; j++) {
488 PyObject* o = PyTuple_GetItem(obj, j);
489 PyTuple_SET_ITEM(tmp_tuple,PyTuple_SetItem(tmp_tuple, j, reinterpret_cast<PyObject*>
((*unpack_hook)( reinterpret_cast<void*>(o), nullptr)))
490 j,PyTuple_SetItem(tmp_tuple, j, reinterpret_cast<PyObject*>
((*unpack_hook)( reinterpret_cast<void*>(o), nullptr)))
491 reinterpret_cast<PyObject*>((*unpack_hook)(PyTuple_SetItem(tmp_tuple, j, reinterpret_cast<PyObject*>
((*unpack_hook)( reinterpret_cast<void*>(o), nullptr)))
492 reinterpret_cast<void*>(o), nullptr)))PyTuple_SetItem(tmp_tuple, j, reinterpret_cast<PyObject*>
((*unpack_hook)( reinterpret_cast<void*>(o), nullptr)))
;
493 }
494 PyTuple_SET_ITEM(unpacked_value, i, tmp_tuple)PyTuple_SetItem(unpacked_value, i, tmp_tuple);
495 } else {
496 PyTuple_SET_ITEM(unpacked_value,PyTuple_SetItem(unpacked_value, i, reinterpret_cast<PyObject
*>((*unpack_hook)( reinterpret_cast<void*>(obj), nullptr
)))
497 i,PyTuple_SetItem(unpacked_value, i, reinterpret_cast<PyObject
*>((*unpack_hook)( reinterpret_cast<void*>(obj), nullptr
)))
498 reinterpret_cast<PyObject*>((*unpack_hook)(PyTuple_SetItem(unpacked_value, i, reinterpret_cast<PyObject
*>((*unpack_hook)( reinterpret_cast<void*>(obj), nullptr
)))
499 reinterpret_cast<void*>(obj), nullptr)))PyTuple_SetItem(unpacked_value, i, reinterpret_cast<PyObject
*>((*unpack_hook)( reinterpret_cast<void*>(obj), nullptr
)))
;
500 }
501 }
502
503 return unpacked_value;
504}
505
506PyObject* tensor_properties_get_container(PyLayerObject* self, void* closure) {
507 EAGER_TRYtry {
508 if (self->container == nullptr) {
509 RETURN_PY_NONE_Py_INCREF(((PyObject*)((&_Py_NoneStruct)))); return (&
_Py_NoneStruct);
;
510 }
511 if (self->container_be_packed) {
512 return call_unpack_hook(self);
513 } else {
514 Py_INCREF(self->container)_Py_INCREF(((PyObject*)(self->container)));
515 return self->container;
516 }
517 EAGER_CATCH_AND_THROW_RETURN_NULL} catch (...) { ThrowExceptionToPython(std::current_exception
()); return nullptr; }
518}
519
520void call_pack_hook(PyLayerObject* self, PyObject* value) {
521 PyObject* saved_value = nullptr;
522 if (PyTuple_Check(value)((((((PyObject*)(value))->ob_type))->tp_flags & ((1UL
<< 26))) != 0)
) {
523 saved_value = value;
524 } else if (PyList_Check(value)((((((PyObject*)(value))->ob_type))->tp_flags & ((1UL
<< 25))) != 0)
) {
525 saved_value = PyList_AsTuple(value);
526 } else {
527 saved_value = PyTuple_New(1);
528 Py_INCREF(value)_Py_INCREF(((PyObject*)(value)));
529 PyTuple_SET_ITEM(saved_value, 0, value)PyTuple_SetItem(saved_value, 0, value);
530 }
531
532 auto pack_hook = egr::SavedTensorsHooks::GetInstance().GetPackHook();
533 self->unpack_hook = egr::SavedTensorsHooks::GetInstance().GetUnPackHook();
534
535 auto saved_value_size = PyTuple_GET_SIZE(saved_value)(((PyVarObject*)(((PyTupleObject *)(saved_value))))->ob_size
)
;
536 PyObject* packed_value = PyTuple_New(saved_value_size);
537
538 for (Py_ssize_t i = 0; i < saved_value_size; i++) {
539 PyObject* obj = PyTuple_GET_ITEM(saved_value, i)(((PyTupleObject *)(saved_value))->ob_item[i]);
540 if (PyCheckTensor(obj)) {
541 PyTuple_SET_ITEM(packed_value,PyTuple_SetItem(packed_value, i, reinterpret_cast<PyObject
*>( (*pack_hook)(reinterpret_cast<void*>(obj))))
542 i,PyTuple_SetItem(packed_value, i, reinterpret_cast<PyObject
*>( (*pack_hook)(reinterpret_cast<void*>(obj))))
543 reinterpret_cast<PyObject*>(PyTuple_SetItem(packed_value, i, reinterpret_cast<PyObject
*>( (*pack_hook)(reinterpret_cast<void*>(obj))))
544 (*pack_hook)(reinterpret_cast<void*>(obj))))PyTuple_SetItem(packed_value, i, reinterpret_cast<PyObject
*>( (*pack_hook)(reinterpret_cast<void*>(obj))))
;
545 } else if (PyList_Check(obj)((((((PyObject*)(obj))->ob_type))->tp_flags & ((1UL
<< 25))) != 0)
) {
546 Py_ssize_t len = PyList_Size(obj);
547 auto tmp_list = PyList_New(len);
548 for (Py_ssize_t j = 0; j < len; j++) {
549 PyObject* o = PyList_GetItem(obj, j);
550 if (PyCheckTensor(o)) {
551 PyTuple_SET_ITEM(tmp_list,PyTuple_SetItem(tmp_list, j, reinterpret_cast<PyObject*>
( (*pack_hook)(reinterpret_cast<void*>(o))))
552 j,PyTuple_SetItem(tmp_list, j, reinterpret_cast<PyObject*>
( (*pack_hook)(reinterpret_cast<void*>(o))))
553 reinterpret_cast<PyObject*>(PyTuple_SetItem(tmp_list, j, reinterpret_cast<PyObject*>
( (*pack_hook)(reinterpret_cast<void*>(o))))
554 (*pack_hook)(reinterpret_cast<void*>(o))))PyTuple_SetItem(tmp_list, j, reinterpret_cast<PyObject*>
( (*pack_hook)(reinterpret_cast<void*>(o))))
;
555 } else {
556 PADDLE_THROW(platform::errors::InvalidArgument(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::InvalidArgument( "save_for_backward only support Tensor, list of Tensor, tuple of "
"Tensor.")), "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 558); } while (0)
557 "save_for_backward only support Tensor, list of Tensor, tuple of "do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::InvalidArgument( "save_for_backward only support Tensor, list of Tensor, tuple of "
"Tensor.")), "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 558); } while (0)
558 "Tensor."))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::InvalidArgument( "save_for_backward only support Tensor, list of Tensor, tuple of "
"Tensor.")), "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 558); } while (0)
;
559 }
560 }
561 PyTuple_SET_ITEM(packed_value, i, tmp_list)PyTuple_SetItem(packed_value, i, tmp_list);
562 } else if (PyTuple_Check(obj)((((((PyObject*)(obj))->ob_type))->tp_flags & ((1UL
<< 26))) != 0)
) {
563 Py_ssize_t len = PyTuple_Size(obj);
564 auto tmp_tuple = PyTuple_New(len);
565 for (Py_ssize_t j = 0; j < len; j++) {
566 PyObject* o = PyTuple_GetItem(obj, j);
567 if (PyCheckTensor(o)) {
568 PyTuple_SET_ITEM(tmp_tuple,PyTuple_SetItem(tmp_tuple, j, reinterpret_cast<PyObject*>
( (*pack_hook)(reinterpret_cast<void*>(o))))
569 j,PyTuple_SetItem(tmp_tuple, j, reinterpret_cast<PyObject*>
( (*pack_hook)(reinterpret_cast<void*>(o))))
570 reinterpret_cast<PyObject*>(PyTuple_SetItem(tmp_tuple, j, reinterpret_cast<PyObject*>
( (*pack_hook)(reinterpret_cast<void*>(o))))
571 (*pack_hook)(reinterpret_cast<void*>(o))))PyTuple_SetItem(tmp_tuple, j, reinterpret_cast<PyObject*>
( (*pack_hook)(reinterpret_cast<void*>(o))))
;
572 } else {
573 PADDLE_THROW(platform::errors::InvalidArgument(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::InvalidArgument( "save_for_backward only support Tensor, list of Tensor, tuple of "
"Tensor.")), "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 575); } while (0)
574 "save_for_backward only support Tensor, list of Tensor, tuple of "do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::InvalidArgument( "save_for_backward only support Tensor, list of Tensor, tuple of "
"Tensor.")), "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 575); } while (0)
575 "Tensor."))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::InvalidArgument( "save_for_backward only support Tensor, list of Tensor, tuple of "
"Tensor.")), "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 575); } while (0)
;
576 }
577 }
578 PyTuple_SET_ITEM(packed_value, i, tmp_tuple)PyTuple_SetItem(packed_value, i, tmp_tuple);
579 } else {
580 PADDLE_THROW(platform::errors::InvalidArgument(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::InvalidArgument( "save_for_backward only support Tensor, list of Tensor, tuple of "
"Tensor.")), "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 582); } while (0)
581 "save_for_backward only support Tensor, list of Tensor, tuple of "do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::InvalidArgument( "save_for_backward only support Tensor, list of Tensor, tuple of "
"Tensor.")), "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 582); } while (0)
582 "Tensor."))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::InvalidArgument( "save_for_backward only support Tensor, list of Tensor, tuple of "
"Tensor.")), "../../../../paddle/fluid/pybind/eager_py_layer.cc"
, 582); } while (0)
;
583 }
584 }
585
586 if (PyTuple_Check(value)((((((PyObject*)(value))->ob_type))->tp_flags & ((1UL
<< 26))) != 0)
) {
587 Py_XDECREF(saved_value)_Py_XDECREF(((PyObject*)(saved_value)));
588 }
589
590 Py_XDECREF(self->container)_Py_XDECREF(((PyObject*)(self->container)));
591 self->container = packed_value;
592 self->container_be_packed = true;
593}
594
595int tensor_properties_set_container(PyLayerObject* self,
596 PyObject* value,
597 void* closure) {
598 EAGER_TRYtry {
599 if (egr::SavedTensorsHooks::GetInstance().IsEnable()) {
600 call_pack_hook(self, value);
601 } else {
602 Py_XINCREF(value)_Py_XINCREF(((PyObject*)(value)));
603 Py_XDECREF(self->container)_Py_XDECREF(((PyObject*)(self->container)));
604 self->container = value;
605 }
606 return 0;
607 EAGER_CATCH_AND_THROW_RETURN_NEG} catch (...) { ThrowExceptionToPython(std::current_exception
()); return -1; }
608}
609
610PyObject* tensor_properties_get_non_differentiable(PyLayerObject* self,
611 void* closure) {
612 EAGER_TRYtry {
613 if (self->non_differentiable == nullptr) {
614 RETURN_PY_NONE_Py_INCREF(((PyObject*)((&_Py_NoneStruct)))); return (&
_Py_NoneStruct);
;
615 }
616 Py_INCREF(self->non_differentiable)_Py_INCREF(((PyObject*)(self->non_differentiable)));
617 return self->non_differentiable;
618 EAGER_CATCH_AND_THROW_RETURN_NULL} catch (...) { ThrowExceptionToPython(std::current_exception
()); return nullptr; }
619}
620
621int tensor_properties_set_non_differentiable(PyLayerObject* self,
622 PyObject* value,
623 void* closure) {
624 EAGER_TRYtry {
625 Py_XINCREF(value)_Py_XINCREF(((PyObject*)(value)));
626 Py_XDECREF(self->non_differentiable)_Py_XDECREF(((PyObject*)(self->non_differentiable)));
627 self->non_differentiable = value;
628 return 0;
629 EAGER_CATCH_AND_THROW_RETURN_NEG} catch (...) { ThrowExceptionToPython(std::current_exception
()); return -1; }
630}
631
632PyObject* tensor_properties_get_not_inplace_tensors(PyLayerObject* self,
633 void* closure) {
634 EAGER_TRYtry {
635 if (self->not_inplace_tensors == nullptr) {
636 RETURN_PY_NONE_Py_INCREF(((PyObject*)((&_Py_NoneStruct)))); return (&
_Py_NoneStruct);
;
637 }
638 Py_INCREF(self->not_inplace_tensors)_Py_INCREF(((PyObject*)(self->not_inplace_tensors)));
639 return self->not_inplace_tensors;
640 EAGER_CATCH_AND_THROW_RETURN_NULL} catch (...) { ThrowExceptionToPython(std::current_exception
()); return nullptr; }
641}
642
643int tensor_properties_set_not_inplace_tensors(PyLayerObject* self,
644 PyObject* value,
645 void* closure) {
646 EAGER_TRYtry {
647 Py_XINCREF(value)_Py_XINCREF(((PyObject*)(value)));
648 Py_XDECREF(self->not_inplace_tensors)_Py_XDECREF(((PyObject*)(self->not_inplace_tensors)));
649 self->not_inplace_tensors = value;
650 return 0;
651 EAGER_CATCH_AND_THROW_RETURN_NEG} catch (...) { ThrowExceptionToPython(std::current_exception
()); return -1; }
652}
653
654int tensor_properties_set_materialize_grads(PyLayerObject* self,
655 PyObject* value,
656 void* closure) {
657 EAGER_TRYtry {
658 self->materialize_grads = CastPyArg2AttrBoolean(value, 0);
659 return 0;
660 EAGER_CATCH_AND_THROW_RETURN_NEG} catch (...) { ThrowExceptionToPython(std::current_exception
()); return -1; }
661}
662
663PyMethodDef pylayer_methods[] = {{"name", // NOLINT
664 (PyCFunction)(void (*)())pylayer_method_name,
665 METH_NOARGS0x0004,
666 nullptr},
667 {"apply",
668 (PyCFunction)(void (*)())pylayer_method_apply,
669 METH_CLASS0x0010 | METH_VARARGS0x0001 | METH_KEYWORDS0x0002,
670 nullptr},
671 {nullptr, nullptr, 0, nullptr}};
672
673struct PyGetSetDef pylayer_properties[] { // NOLINT
674 {"container",
675 (getter)tensor_properties_get_container,
676 (setter)tensor_properties_set_container,
677 nullptr,
678 nullptr},
679 {"non_differentiable",
680 (getter)tensor_properties_get_non_differentiable,
681 (setter)tensor_properties_set_non_differentiable,
682 nullptr,
683 nullptr},
684 {"not_inplace_tensors",
685 (getter)tensor_properties_get_not_inplace_tensors,
686 (setter)tensor_properties_set_not_inplace_tensors,
687 nullptr,
688 nullptr},
689 {"materialize_grads",
690 nullptr,
691 (setter)tensor_properties_set_materialize_grads,
692 nullptr,
693 nullptr},
694 {
695 nullptr, nullptr, nullptr, nullptr, nullptr
696 }
697};
698
699void BindEagerPyLayer(PyObject* module) {
700 auto heap_type = reinterpret_cast<PyHeapTypeObject*>(
701 PyType_Type.tp_alloc(&PyType_Type, 0));
702 heap_type->ht_name = ToPyObject("PyLayer");
703 heap_type->ht_qualname = ToPyObject("PyLayer");
704 auto type = &heap_type->ht_type;
705 type->tp_name = "PyLayer";
706 type->tp_basicsize = sizeof(PyLayerObject);
707 type->tp_dealloc = (destructor)PyLayerDealloc;
708 type->tp_methods = pylayer_methods;
709 type->tp_getset = pylayer_properties;
710 type->tp_new = (newfunc)PyLayerNew;
711 Py_INCREF(&PyBaseObject_Type)_Py_INCREF(((PyObject*)(&PyBaseObject_Type)));
712 type->tp_base = reinterpret_cast<PyTypeObject*>(&PyBaseObject_Type);
713 type->tp_flags |=
714 Py_TPFLAGS_DEFAULT( 0 | (1UL << 18) | 0) | Py_TPFLAGS_BASETYPE(1UL << 10) | Py_TPFLAGS_HEAPTYPE(1UL << 9);
715#if PY_VERSION_HEX((3 << 24) | (8 << 16) | (5 << 8) | (0xF <<
4) | (0 << 0))
>= 0x03050000
716 type->tp_as_async = &heap_type->as_async;
717#endif
718 p_pylayer_type = type;
719
720 if (PyType_Ready(type) < 0) {
721 PADDLE_THROW(platform::errors::Fatal(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::Fatal( "Init Paddle error in BindEager(PyType_Ready)."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 722)
; } while (0)
722 "Init Paddle error in BindEager(PyType_Ready)."))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::Fatal( "Init Paddle error in BindEager(PyType_Ready)."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 722)
; } while (0)
;
723 return;
724 }
725
726 Py_INCREF(type)_Py_INCREF(((PyObject*)(type)));
727 if (PyModule_AddObject(module, "PyLayer", reinterpret_cast<PyObject*>(type)) <
728 0) {
729 Py_DECREF(type)_Py_DECREF(((PyObject*)(type)));
730 Py_DECREF(module)_Py_DECREF(((PyObject*)(module)));
731 PADDLE_THROW(platform::errors::Fatal(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::Fatal( "Init Paddle error in BindEager(PyModule_AddObject)."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 732)
; } while (0)
732 "Init Paddle error in BindEager(PyModule_AddObject)."))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(platform::errors::Fatal( "Init Paddle error in BindEager(PyModule_AddObject)."
)), "../../../../paddle/fluid/pybind/eager_py_layer.cc", 732)
; } while (0)
;
733 return;
734 }
735}
736
737} // namespace pybind
738} // namespace paddle

/opt/pyrefcon/lib/pyrefcon/models/PyTuple_New.model

1#ifndef PyTuple_New
2struct _object;
3typedef struct _object PyObject;
4PyObject* clang_analyzer_PyObject_New_Reference();
5PyObject* PyTuple_New(Py_ssize_t len) {
6 return clang_analyzer_PyObject_New_Reference();
14
Setting reference count to 1
7}
8#else
9#warning "API PyTuple_New is defined as a macro."
10#endif