Bug Summary

File:home/liujun/Analysis/pyrefcon_ws/Paddle/build/paddle/fluid/eager/pylayer/../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc
Warning:line 125, column 21
PyObject ownership leak with reference count of 1

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name py_layer_node.cc -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-display-progress -analyzer-output=html -analyzer-checker=python -analyzer-disable-checker=deadcode -analyzer-config suppress-inlined-defensive-checks=false -analyzer-config suppress-null-return-paths=false -analyzer-config crosscheck-with-z3=true -analyzer-config model-path=/opt/pyrefcon/lib/pyrefcon/models -analyzer-config experimental-enable-naive-ctu-analysis=true -analyzer-config ctu-dir=/home/liujun/Analysis/pyrefcon_ws/Paddle/build/panda-output -analyzer-config ctu-index-name=/home/liujun/Analysis/pyrefcon_ws/Paddle/build/panda-output/externalDefMap.txt -analyzer-config ctu-invocation-list=/home/liujun/Analysis/pyrefcon_ws/Paddle/build/panda-output/invocations.yaml -analyzer-config display-ctu-progress=true -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +avx -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/home/liujun/Analysis/pyrefcon_ws/Paddle/build/paddle/fluid/eager/pylayer -resource-dir /opt/pyrefcon/lib/clang/13.0.0 -D HPPL_STUB_FUNC -D LAPACK_FOUND -D PADDLE_DISABLE_PROFILER -D PADDLE_DLL_EXPORT -D PADDLE_ON_INFERENCE -D PADDLE_USE_PTHREAD_BARRIER -D PADDLE_USE_PTHREAD_SPINLOCK -D PADDLE_VERSION=0.0.0 -D PADDLE_VERSION_INTEGER=0 -D PADDLE_WITH_AVX -D PADDLE_WITH_CRYPTO -D PADDLE_WITH_CUSTOM_DEVICE -D PADDLE_WITH_DNNL -D PADDLE_WITH_MKLML -D PADDLE_WITH_POCKETFFT -D PADDLE_WITH_SSE3 -D PADDLE_WITH_XBYAK -D STATIC_IR -D XBYAK64 -D XBYAK_NO_OP_NAMES -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build -I /home/liujun/Analysis/pyrefcon_ws/Paddle/paddle/fluid/framework/io -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/zlib/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/gflags/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/glog/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/third_party/eigen3 -I /home/liujun/Analysis/pyrefcon_ws/Paddle/third_party/threadpool -I /home/liujun/Analysis/pyrefcon_ws/Paddle/third_party/dlpack/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/xxhash/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/warpctc/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/warprnnt/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/utf8proc/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/mklml/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/mkldnn/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/protobuf/include -I /opt/pyrefcon/include/python3.8 -I /home/liujun/Software/miniconda3/envs/paddle_venv/lib/python3.8/site-packages/numpy/core/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/pybind/src/extern_pybind/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/gloo/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/xbyak/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/xbyak/include/xbyak -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/install/cryptopp/include -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/third_party/pocketfft/src -I /home/liujun/Analysis/pyrefcon_ws/Paddle -I /home/liujun/Analysis/pyrefcon_ws/Paddle/build/../paddle/fluid/framework/io -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /opt/pyrefcon/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/home/liujun/Analysis/pyrefcon_ws/Paddle/build/paddle/fluid/eager/pylayer -ferror-limit 19 -fopenmp -fopenmp-cuda-parallel-target-regions -fgnuc-version=4.2.1 -fcxx-exceptions -fexceptions -fcolor-diagnostics -vectorize-loops -vectorize-slp -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/liujun/Analysis/pyrefcon_ws/Paddle/build/panda-output/reports -x c++ ../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc

../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc

1// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#include "paddle/fluid/eager/pylayer/py_layer_node.h"
16
17#include "glog/logging.h"
18#include "paddle/fluid/eager/eager_tensor.h"
19#include "paddle/fluid/platform/device_context.h"
20#include "paddle/fluid/platform/enforce.h"
21#include "paddle/fluid/platform/errors.h"
22#include "paddle/fluid/pybind/eager.h"
23#include "paddle/fluid/pybind/eager_utils.h"
24#include "paddle/phi/api/all.h"
25#include "paddle/phi/core/dense_tensor.h"
26#pragma GCC diagnostic ignored "-Wattributes"
27#include "pybind11/pytypes.h"
28
29namespace egr {
30GradNodePyLayer::~GradNodePyLayer() { // NOLINT
31 pybind11::gil_scoped_acquire gil;
32 Py_XDECREF(ctx_)_Py_XDECREF(((PyObject*)(ctx_)));
33}
34
35paddle::small_vector<std::vector<paddle::Tensor>, kSlotSmallVectorSize>
36GradNodePyLayer::operator()(
37 paddle::small_vector<std::vector<paddle::Tensor>,
38 kSlotSmallVectorSize>& grads, // NOLINT
39 bool create_graph,
40 bool is_new_grad) {
41 pybind11::gil_scoped_acquire gil;
42 VLOG(3)static_cast<void>(0), !(__extension__ ({ static google::
int32* vlocal__ = &google::kLogSiteUninitialized; google::
int32 verbose_level__ = (3); (*vlocal__ >= verbose_level__
) && ((vlocal__ != &google::kLogSiteUninitialized
) || (google::InitVLOG3__(&vlocal__, &FLAGS_v, "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, verbose_level__))); })) ? (void) 0 : google::LogMessageVoidify
() & google::LogMessage( "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 42).stream()
<< "Running Eager Backward Node: " << name();
1
Assuming the condition is false
2
'?' condition is true
43
44 paddle::small_vector<std::vector<paddle::Tensor>, kSlotSmallVectorSize>
45 hooked_grads = GradNodePyLayer::ApplyGradientHooks(grads);
46
47 paddle::pybind::PyLayerObject* ctx =
48 reinterpret_cast<paddle::pybind::PyLayerObject*>(ctx_);
49
50 PADDLE_ENFORCE_EQ(ctx->forward_output_tensor_is_duplicable.size(),do { auto __val1 = (ctx->forward_output_tensor_is_duplicable
.size()); auto __val2 = (grads.size()); using __TYPE1__ = decltype
(__val1); using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__
= ::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's grad input size(%s) mast be equal with it's "
"forward's output size(%s).", name(), grads.size(), ctx->
forward_output_tensor_is_duplicable.size())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "ctx->forward_output_tensor_is_duplicable.size()", "grads.size()"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("ctx->forward_output_tensor_is_duplicable.size()"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("grads.size()", __val2)); do { throw ::phi::enforce
::EnforceNotMet(phi::ErrorSummary(__summary__.code(), std::move
(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 57); } while (0); } } while (0)
3
Assuming '__val1' is equal to '__val2'
4
Taking false branch
5
Loop condition is false. Exiting loop
51 grads.size(),do { auto __val1 = (ctx->forward_output_tensor_is_duplicable
.size()); auto __val2 = (grads.size()); using __TYPE1__ = decltype
(__val1); using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__
= ::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's grad input size(%s) mast be equal with it's "
"forward's output size(%s).", name(), grads.size(), ctx->
forward_output_tensor_is_duplicable.size())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "ctx->forward_output_tensor_is_duplicable.size()", "grads.size()"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("ctx->forward_output_tensor_is_duplicable.size()"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("grads.size()", __val2)); do { throw ::phi::enforce
::EnforceNotMet(phi::ErrorSummary(__summary__.code(), std::move
(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 57); } while (0); } } while (0)
52 paddle::platform::errors::InvalidArgument(do { auto __val1 = (ctx->forward_output_tensor_is_duplicable
.size()); auto __val2 = (grads.size()); using __TYPE1__ = decltype
(__val1); using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__
= ::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's grad input size(%s) mast be equal with it's "
"forward's output size(%s).", name(), grads.size(), ctx->
forward_output_tensor_is_duplicable.size())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "ctx->forward_output_tensor_is_duplicable.size()", "grads.size()"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("ctx->forward_output_tensor_is_duplicable.size()"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("grads.size()", __val2)); do { throw ::phi::enforce
::EnforceNotMet(phi::ErrorSummary(__summary__.code(), std::move
(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 57); } while (0); } } while (0)
53 "%s's grad input size(%s) mast be equal with it's "do { auto __val1 = (ctx->forward_output_tensor_is_duplicable
.size()); auto __val2 = (grads.size()); using __TYPE1__ = decltype
(__val1); using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__
= ::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's grad input size(%s) mast be equal with it's "
"forward's output size(%s).", name(), grads.size(), ctx->
forward_output_tensor_is_duplicable.size())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "ctx->forward_output_tensor_is_duplicable.size()", "grads.size()"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("ctx->forward_output_tensor_is_duplicable.size()"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("grads.size()", __val2)); do { throw ::phi::enforce
::EnforceNotMet(phi::ErrorSummary(__summary__.code(), std::move
(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 57); } while (0); } } while (0)
54 "forward's output size(%s).",do { auto __val1 = (ctx->forward_output_tensor_is_duplicable
.size()); auto __val2 = (grads.size()); using __TYPE1__ = decltype
(__val1); using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__
= ::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's grad input size(%s) mast be equal with it's "
"forward's output size(%s).", name(), grads.size(), ctx->
forward_output_tensor_is_duplicable.size())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "ctx->forward_output_tensor_is_duplicable.size()", "grads.size()"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("ctx->forward_output_tensor_is_duplicable.size()"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("grads.size()", __val2)); do { throw ::phi::enforce
::EnforceNotMet(phi::ErrorSummary(__summary__.code(), std::move
(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 57); } while (0); } } while (0)
55 name(),do { auto __val1 = (ctx->forward_output_tensor_is_duplicable
.size()); auto __val2 = (grads.size()); using __TYPE1__ = decltype
(__val1); using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__
= ::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's grad input size(%s) mast be equal with it's "
"forward's output size(%s).", name(), grads.size(), ctx->
forward_output_tensor_is_duplicable.size())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "ctx->forward_output_tensor_is_duplicable.size()", "grads.size()"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("ctx->forward_output_tensor_is_duplicable.size()"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("grads.size()", __val2)); do { throw ::phi::enforce
::EnforceNotMet(phi::ErrorSummary(__summary__.code(), std::move
(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 57); } while (0); } } while (0)
56 grads.size(),do { auto __val1 = (ctx->forward_output_tensor_is_duplicable
.size()); auto __val2 = (grads.size()); using __TYPE1__ = decltype
(__val1); using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__
= ::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's grad input size(%s) mast be equal with it's "
"forward's output size(%s).", name(), grads.size(), ctx->
forward_output_tensor_is_duplicable.size())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "ctx->forward_output_tensor_is_duplicable.size()", "grads.size()"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("ctx->forward_output_tensor_is_duplicable.size()"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("grads.size()", __val2)); do { throw ::phi::enforce
::EnforceNotMet(phi::ErrorSummary(__summary__.code(), std::move
(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 57); } while (0); } } while (0)
57 ctx->forward_output_tensor_is_duplicable.size()))do { auto __val1 = (ctx->forward_output_tensor_is_duplicable
.size()); auto __val2 = (grads.size()); using __TYPE1__ = decltype
(__val1); using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__
= ::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's grad input size(%s) mast be equal with it's "
"forward's output size(%s).", name(), grads.size(), ctx->
forward_output_tensor_is_duplicable.size())); constexpr bool __kCanToString__
= ::phi::details::CanToString<__TYPE1__>::kValue &&
::phi::details::CanToString<__TYPE2__>::kValue; auto __message__
= ::paddle::string::Sprintf( "%s\n [Hint: Expected %s " "=="
" %s, but received %s " "!=" " %s.]", __summary__.error_message
(), "ctx->forward_output_tensor_is_duplicable.size()", "grads.size()"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("ctx->forward_output_tensor_is_duplicable.size()"
, __val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("grads.size()", __val2)); do { throw ::phi::enforce
::EnforceNotMet(phi::ErrorSummary(__summary__.code(), std::move
(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 57); } while (0); } } while (0)
;
58
59 auto backward_args = PyTuple_New(grads.size());
60 for (size_t i = 0; i < grads.size(); i++) {
6
Assuming the condition is false
7
Loop condition is false. Execution continues on line 101
61 if (ctx->forward_output_tensor_is_duplicable[i]) {
62 PyObject* pylist = PyList_New((Py_ssize_t)grads[i].size());
63 for (size_t j = 0; j < grads[i].size(); j++) {
64 if (ctx->materialize_grads && !grads[i][j].initialized()) {
65 paddle::Tensor tensor_tmp;
66 auto dense_tensor = std::make_shared<phi::DenseTensor>();
67 dense_tensor->set_meta(forward_outputs_meta_[i][j]);
68 tensor_tmp.set_impl(dense_tensor);
69 PyList_SET_ITEM(PyList_SetItem(pylist, static_cast<Py_ssize_t>(i), paddle
::pybind::ToPyObject(paddle::experimental::zeros_like( tensor_tmp
, tensor_tmp.dtype(), forward_outputs_place_[i][j])))
70 pylist,PyList_SetItem(pylist, static_cast<Py_ssize_t>(i), paddle
::pybind::ToPyObject(paddle::experimental::zeros_like( tensor_tmp
, tensor_tmp.dtype(), forward_outputs_place_[i][j])))
71 static_cast<Py_ssize_t>(i),PyList_SetItem(pylist, static_cast<Py_ssize_t>(i), paddle
::pybind::ToPyObject(paddle::experimental::zeros_like( tensor_tmp
, tensor_tmp.dtype(), forward_outputs_place_[i][j])))
72 paddle::pybind::ToPyObject(paddle::experimental::zeros_like(PyList_SetItem(pylist, static_cast<Py_ssize_t>(i), paddle
::pybind::ToPyObject(paddle::experimental::zeros_like( tensor_tmp
, tensor_tmp.dtype(), forward_outputs_place_[i][j])))
73 tensor_tmp,PyList_SetItem(pylist, static_cast<Py_ssize_t>(i), paddle
::pybind::ToPyObject(paddle::experimental::zeros_like( tensor_tmp
, tensor_tmp.dtype(), forward_outputs_place_[i][j])))
74 tensor_tmp.dtype(),PyList_SetItem(pylist, static_cast<Py_ssize_t>(i), paddle
::pybind::ToPyObject(paddle::experimental::zeros_like( tensor_tmp
, tensor_tmp.dtype(), forward_outputs_place_[i][j])))
75 forward_outputs_place_[i][j])))PyList_SetItem(pylist, static_cast<Py_ssize_t>(i), paddle
::pybind::ToPyObject(paddle::experimental::zeros_like( tensor_tmp
, tensor_tmp.dtype(), forward_outputs_place_[i][j])))
;
76 } else {
77 PyList_SET_ITEM(pylist,PyList_SetItem(pylist, static_cast<Py_ssize_t>(i), paddle
::pybind::ToPyObject(grads[i][0], true))
78 static_cast<Py_ssize_t>(i),PyList_SetItem(pylist, static_cast<Py_ssize_t>(i), paddle
::pybind::ToPyObject(grads[i][0], true))
79 paddle::pybind::ToPyObject(grads[i][0], true))PyList_SetItem(pylist, static_cast<Py_ssize_t>(i), paddle
::pybind::ToPyObject(grads[i][0], true))
;
80 }
81 }
82 PyTuple_SET_ITEM(backward_args, i, pylist)PyTuple_SetItem(backward_args, i, pylist);
83 } else {
84 if (ctx->materialize_grads && !grads[i][0].initialized()) {
85 paddle::Tensor tensor_tmp;
86 auto dense_tensor = std::make_shared<phi::DenseTensor>();
87 dense_tensor->set_meta(forward_outputs_meta_[i][0]);
88 tensor_tmp.set_impl(dense_tensor);
89 PyTuple_SET_ITEM(PyTuple_SetItem(backward_args, i, paddle::pybind::ToPyObject(
paddle::experimental::zeros_like( tensor_tmp, tensor_tmp.dtype
(), forward_outputs_place_[i][0])))
90 backward_args,PyTuple_SetItem(backward_args, i, paddle::pybind::ToPyObject(
paddle::experimental::zeros_like( tensor_tmp, tensor_tmp.dtype
(), forward_outputs_place_[i][0])))
91 i,PyTuple_SetItem(backward_args, i, paddle::pybind::ToPyObject(
paddle::experimental::zeros_like( tensor_tmp, tensor_tmp.dtype
(), forward_outputs_place_[i][0])))
92 paddle::pybind::ToPyObject(paddle::experimental::zeros_like(PyTuple_SetItem(backward_args, i, paddle::pybind::ToPyObject(
paddle::experimental::zeros_like( tensor_tmp, tensor_tmp.dtype
(), forward_outputs_place_[i][0])))
93 tensor_tmp, tensor_tmp.dtype(), forward_outputs_place_[i][0])))PyTuple_SetItem(backward_args, i, paddle::pybind::ToPyObject(
paddle::experimental::zeros_like( tensor_tmp, tensor_tmp.dtype
(), forward_outputs_place_[i][0])))
;
94 } else {
95 PyTuple_SET_ITEM(PyTuple_SetItem(backward_args, i, paddle::pybind::ToPyObject(
grads[i][0], true))
96 backward_args, i, paddle::pybind::ToPyObject(grads[i][0], true))PyTuple_SetItem(backward_args, i, paddle::pybind::ToPyObject(
grads[i][0], true))
;
97 }
98 }
99 }
100
101 VLOG(6)static_cast<void>(0), !(__extension__ ({ static google::
int32* vlocal__ = &google::kLogSiteUninitialized; google::
int32 verbose_level__ = (6); (*vlocal__ >= verbose_level__
) && ((vlocal__ != &google::kLogSiteUninitialized
) || (google::InitVLOG3__(&vlocal__, &FLAGS_v, "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, verbose_level__))); })) ? (void) 0 : google::LogMessageVoidify
() & google::LogMessage( "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 101).stream()
<< "PyLayer backward args is ready, begin call user's backward "
8
Assuming the condition is false
9
'?' condition is true
102 "function...";
103
104 auto backward_fn =
105 PyObject_GetAttrString(reinterpret_cast<PyObject*>(ctx), "backward");
106 if (!backward_fn) {
10
Assuming 'backward_fn' is non-null
11
Taking false branch
107 PADDLE_THROW(paddle::platform::errors::InvalidArgument(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::InvalidArgument( "Get backward function faild."
)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 108); } while (0)
108 "Get backward function faild."))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::InvalidArgument( "Get backward function faild."
)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 108); } while (0)
;
109 }
110 bool need_grad_tmp = egr::Controller::Instance().HasGrad();
111 egr::Controller::Instance().SetHasGrad(create_graph && need_grad_tmp);
12
Assuming 'create_graph' is false
112 auto outputs = PyObject_CallObject(backward_fn, backward_args);
113 egr::Controller::Instance().SetHasGrad(need_grad_tmp);
114 if (!outputs) {
13
Assuming 'outputs' is non-null
14
Taking false branch
115 PADDLE_THROW(paddle::platform::errors::External(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::External( pybind11::detail::error_string
().c_str())), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 116); } while (0)
116 pybind11::detail::error_string().c_str()))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::External( pybind11::detail::error_string
().c_str())), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 116); } while (0)
;
117 }
118
119 VLOG(6)static_cast<void>(0), !(__extension__ ({ static google::
int32* vlocal__ = &google::kLogSiteUninitialized; google::
int32 verbose_level__ = (6); (*vlocal__ >= verbose_level__
) && ((vlocal__ != &google::kLogSiteUninitialized
) || (google::InitVLOG3__(&vlocal__, &FLAGS_v, "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, verbose_level__))); })) ? (void) 0 : google::LogMessageVoidify
() & google::LogMessage( "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 119).stream()
<< "PyLayer backward function finish...";
15
Assuming the condition is false
16
'?' condition is true
120
121 PyObject* outputs_tuple = nullptr;
122 if (PyTuple_Check(outputs)((((((PyObject*)(outputs))->ob_type))->tp_flags & (
(1UL << 26))) != 0)
) {
17
Assuming the condition is false
18
Taking false branch
123 outputs_tuple = outputs;
124 } else {
125 outputs_tuple = PyTuple_New(1);
19
Calling 'PyTuple_New'
21
Returning from 'PyTuple_New'
24
PyObject ownership leak with reference count of 1
126 Py_INCREF(outputs)_Py_INCREF(((PyObject*)(outputs)));
127 PyTuple_SET_ITEM(outputs_tuple, 0, outputs)PyTuple_SetItem(outputs_tuple, 0, outputs);
128 }
129
130 size_t outputs_size = PyTuple_GET_SIZE(outputs_tuple)(((PyVarObject*)(((PyTupleObject *)(outputs_tuple))))->ob_size
)
;
131
132 if (outputs_size > ctx->forward_input_tensor_is_duplicable.size()) {
22
Assuming the condition is true
23
Taking true branch
133 PADDLE_THROW(paddle::platform::errors::InvalidArgument(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::InvalidArgument( "The number of outputs of `PyLayer.backward` should be %d, but "
"received %d.", ctx->forward_input_tensor_is_duplicable.size
(), outputs_size)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 137); } while (0)
134 "The number of outputs of `PyLayer.backward` should be %d, but "do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::InvalidArgument( "The number of outputs of `PyLayer.backward` should be %d, but "
"received %d.", ctx->forward_input_tensor_is_duplicable.size
(), outputs_size)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 137); } while (0)
135 "received %d.",do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::InvalidArgument( "The number of outputs of `PyLayer.backward` should be %d, but "
"received %d.", ctx->forward_input_tensor_is_duplicable.size
(), outputs_size)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 137); } while (0)
136 ctx->forward_input_tensor_is_duplicable.size(),do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::InvalidArgument( "The number of outputs of `PyLayer.backward` should be %d, but "
"received %d.", ctx->forward_input_tensor_is_duplicable.size
(), outputs_size)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 137); } while (0)
137 outputs_size))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(paddle::platform::errors::InvalidArgument( "The number of outputs of `PyLayer.backward` should be %d, but "
"received %d.", ctx->forward_input_tensor_is_duplicable.size
(), outputs_size)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 137); } while (0)
;
138 }
139
140 paddle::small_vector<std::vector<paddle::Tensor>, kSlotSmallVectorSize>
141 grad_out;
142 grad_out.reserve(ctx->forward_input_tensor_is_duplicable.size());
143 for (size_t i = 0; i < ctx->forward_input_tensor_is_duplicable.size(); i++) {
144 if (i < outputs_size) {
145 PyObject* obj = PyTuple_GET_ITEM(outputs_tuple, i)(((PyTupleObject *)(outputs_tuple))->ob_item[i]);
146 if (this->OutputMeta()[i][0].IsStopGradient()) {
147 PADDLE_ENFORCE_EQ(do { auto __val1 = (obj); auto __val2 = ((&_Py_NoneStruct
)); using __TYPE1__ = decltype(__val1); using __TYPE2__ = decltype
(__val2); using __COMMON_TYPE1__ = ::phi::details::CommonType1
<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__ = ::phi::
details::CommonType2<__TYPE1__, __TYPE2__>; bool __is_not_error
= (static_cast<__COMMON_TYPE1__>(__val1))==( static_cast
<__COMMON_TYPE2__>(__val2)); if (__builtin_expect(static_cast
<bool>(!__is_not_error), 0)) { auto __summary__ = phi::
ErrorSummary(paddle::platform::errors::InvalidArgument( "%s's backward function should return None at %d position, "
"because it's forward Tensor's stopgradient is true.", name(
), i)); constexpr bool __kCanToString__ = ::phi::details::CanToString
<__TYPE1__>::kValue && ::phi::details::CanToString
<__TYPE2__>::kValue; auto __message__ = ::paddle::string
::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "obj", "(&_Py_NoneStruct)"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("obj", __val1), ::phi::details::BinaryCompareMessageConverter
< __kCanToString__>::Convert("(&_Py_NoneStruct)", __val2
)); do { throw ::phi::enforce::EnforceNotMet(phi::ErrorSummary
(__summary__.code(), std::move(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 154); } while (0); } } while (0)
148 obj,do { auto __val1 = (obj); auto __val2 = ((&_Py_NoneStruct
)); using __TYPE1__ = decltype(__val1); using __TYPE2__ = decltype
(__val2); using __COMMON_TYPE1__ = ::phi::details::CommonType1
<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__ = ::phi::
details::CommonType2<__TYPE1__, __TYPE2__>; bool __is_not_error
= (static_cast<__COMMON_TYPE1__>(__val1))==( static_cast
<__COMMON_TYPE2__>(__val2)); if (__builtin_expect(static_cast
<bool>(!__is_not_error), 0)) { auto __summary__ = phi::
ErrorSummary(paddle::platform::errors::InvalidArgument( "%s's backward function should return None at %d position, "
"because it's forward Tensor's stopgradient is true.", name(
), i)); constexpr bool __kCanToString__ = ::phi::details::CanToString
<__TYPE1__>::kValue && ::phi::details::CanToString
<__TYPE2__>::kValue; auto __message__ = ::paddle::string
::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "obj", "(&_Py_NoneStruct)"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("obj", __val1), ::phi::details::BinaryCompareMessageConverter
< __kCanToString__>::Convert("(&_Py_NoneStruct)", __val2
)); do { throw ::phi::enforce::EnforceNotMet(phi::ErrorSummary
(__summary__.code(), std::move(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 154); } while (0); } } while (0)
149 Py_None,do { auto __val1 = (obj); auto __val2 = ((&_Py_NoneStruct
)); using __TYPE1__ = decltype(__val1); using __TYPE2__ = decltype
(__val2); using __COMMON_TYPE1__ = ::phi::details::CommonType1
<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__ = ::phi::
details::CommonType2<__TYPE1__, __TYPE2__>; bool __is_not_error
= (static_cast<__COMMON_TYPE1__>(__val1))==( static_cast
<__COMMON_TYPE2__>(__val2)); if (__builtin_expect(static_cast
<bool>(!__is_not_error), 0)) { auto __summary__ = phi::
ErrorSummary(paddle::platform::errors::InvalidArgument( "%s's backward function should return None at %d position, "
"because it's forward Tensor's stopgradient is true.", name(
), i)); constexpr bool __kCanToString__ = ::phi::details::CanToString
<__TYPE1__>::kValue && ::phi::details::CanToString
<__TYPE2__>::kValue; auto __message__ = ::paddle::string
::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "obj", "(&_Py_NoneStruct)"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("obj", __val1), ::phi::details::BinaryCompareMessageConverter
< __kCanToString__>::Convert("(&_Py_NoneStruct)", __val2
)); do { throw ::phi::enforce::EnforceNotMet(phi::ErrorSummary
(__summary__.code(), std::move(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 154); } while (0); } } while (0)
150 paddle::platform::errors::InvalidArgument(do { auto __val1 = (obj); auto __val2 = ((&_Py_NoneStruct
)); using __TYPE1__ = decltype(__val1); using __TYPE2__ = decltype
(__val2); using __COMMON_TYPE1__ = ::phi::details::CommonType1
<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__ = ::phi::
details::CommonType2<__TYPE1__, __TYPE2__>; bool __is_not_error
= (static_cast<__COMMON_TYPE1__>(__val1))==( static_cast
<__COMMON_TYPE2__>(__val2)); if (__builtin_expect(static_cast
<bool>(!__is_not_error), 0)) { auto __summary__ = phi::
ErrorSummary(paddle::platform::errors::InvalidArgument( "%s's backward function should return None at %d position, "
"because it's forward Tensor's stopgradient is true.", name(
), i)); constexpr bool __kCanToString__ = ::phi::details::CanToString
<__TYPE1__>::kValue && ::phi::details::CanToString
<__TYPE2__>::kValue; auto __message__ = ::paddle::string
::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "obj", "(&_Py_NoneStruct)"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("obj", __val1), ::phi::details::BinaryCompareMessageConverter
< __kCanToString__>::Convert("(&_Py_NoneStruct)", __val2
)); do { throw ::phi::enforce::EnforceNotMet(phi::ErrorSummary
(__summary__.code(), std::move(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 154); } while (0); } } while (0)
151 "%s's backward function should return None at %d position, "do { auto __val1 = (obj); auto __val2 = ((&_Py_NoneStruct
)); using __TYPE1__ = decltype(__val1); using __TYPE2__ = decltype
(__val2); using __COMMON_TYPE1__ = ::phi::details::CommonType1
<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__ = ::phi::
details::CommonType2<__TYPE1__, __TYPE2__>; bool __is_not_error
= (static_cast<__COMMON_TYPE1__>(__val1))==( static_cast
<__COMMON_TYPE2__>(__val2)); if (__builtin_expect(static_cast
<bool>(!__is_not_error), 0)) { auto __summary__ = phi::
ErrorSummary(paddle::platform::errors::InvalidArgument( "%s's backward function should return None at %d position, "
"because it's forward Tensor's stopgradient is true.", name(
), i)); constexpr bool __kCanToString__ = ::phi::details::CanToString
<__TYPE1__>::kValue && ::phi::details::CanToString
<__TYPE2__>::kValue; auto __message__ = ::paddle::string
::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "obj", "(&_Py_NoneStruct)"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("obj", __val1), ::phi::details::BinaryCompareMessageConverter
< __kCanToString__>::Convert("(&_Py_NoneStruct)", __val2
)); do { throw ::phi::enforce::EnforceNotMet(phi::ErrorSummary
(__summary__.code(), std::move(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 154); } while (0); } } while (0)
152 "because it's forward Tensor's stopgradient is true.",do { auto __val1 = (obj); auto __val2 = ((&_Py_NoneStruct
)); using __TYPE1__ = decltype(__val1); using __TYPE2__ = decltype
(__val2); using __COMMON_TYPE1__ = ::phi::details::CommonType1
<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__ = ::phi::
details::CommonType2<__TYPE1__, __TYPE2__>; bool __is_not_error
= (static_cast<__COMMON_TYPE1__>(__val1))==( static_cast
<__COMMON_TYPE2__>(__val2)); if (__builtin_expect(static_cast
<bool>(!__is_not_error), 0)) { auto __summary__ = phi::
ErrorSummary(paddle::platform::errors::InvalidArgument( "%s's backward function should return None at %d position, "
"because it's forward Tensor's stopgradient is true.", name(
), i)); constexpr bool __kCanToString__ = ::phi::details::CanToString
<__TYPE1__>::kValue && ::phi::details::CanToString
<__TYPE2__>::kValue; auto __message__ = ::paddle::string
::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "obj", "(&_Py_NoneStruct)"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("obj", __val1), ::phi::details::BinaryCompareMessageConverter
< __kCanToString__>::Convert("(&_Py_NoneStruct)", __val2
)); do { throw ::phi::enforce::EnforceNotMet(phi::ErrorSummary
(__summary__.code(), std::move(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 154); } while (0); } } while (0)
153 name(),do { auto __val1 = (obj); auto __val2 = ((&_Py_NoneStruct
)); using __TYPE1__ = decltype(__val1); using __TYPE2__ = decltype
(__val2); using __COMMON_TYPE1__ = ::phi::details::CommonType1
<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__ = ::phi::
details::CommonType2<__TYPE1__, __TYPE2__>; bool __is_not_error
= (static_cast<__COMMON_TYPE1__>(__val1))==( static_cast
<__COMMON_TYPE2__>(__val2)); if (__builtin_expect(static_cast
<bool>(!__is_not_error), 0)) { auto __summary__ = phi::
ErrorSummary(paddle::platform::errors::InvalidArgument( "%s's backward function should return None at %d position, "
"because it's forward Tensor's stopgradient is true.", name(
), i)); constexpr bool __kCanToString__ = ::phi::details::CanToString
<__TYPE1__>::kValue && ::phi::details::CanToString
<__TYPE2__>::kValue; auto __message__ = ::paddle::string
::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "obj", "(&_Py_NoneStruct)"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("obj", __val1), ::phi::details::BinaryCompareMessageConverter
< __kCanToString__>::Convert("(&_Py_NoneStruct)", __val2
)); do { throw ::phi::enforce::EnforceNotMet(phi::ErrorSummary
(__summary__.code(), std::move(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 154); } while (0); } } while (0)
154 i))do { auto __val1 = (obj); auto __val2 = ((&_Py_NoneStruct
)); using __TYPE1__ = decltype(__val1); using __TYPE2__ = decltype
(__val2); using __COMMON_TYPE1__ = ::phi::details::CommonType1
<__TYPE1__, __TYPE2__>; using __COMMON_TYPE2__ = ::phi::
details::CommonType2<__TYPE1__, __TYPE2__>; bool __is_not_error
= (static_cast<__COMMON_TYPE1__>(__val1))==( static_cast
<__COMMON_TYPE2__>(__val2)); if (__builtin_expect(static_cast
<bool>(!__is_not_error), 0)) { auto __summary__ = phi::
ErrorSummary(paddle::platform::errors::InvalidArgument( "%s's backward function should return None at %d position, "
"because it's forward Tensor's stopgradient is true.", name(
), i)); constexpr bool __kCanToString__ = ::phi::details::CanToString
<__TYPE1__>::kValue && ::phi::details::CanToString
<__TYPE2__>::kValue; auto __message__ = ::paddle::string
::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "obj", "(&_Py_NoneStruct)"
, ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("obj", __val1), ::phi::details::BinaryCompareMessageConverter
< __kCanToString__>::Convert("(&_Py_NoneStruct)", __val2
)); do { throw ::phi::enforce::EnforceNotMet(phi::ErrorSummary
(__summary__.code(), std::move(__message__)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 154); } while (0); } } while (0)
;
155 grad_out.push_back({});
156 } else {
157 if (ctx->forward_input_tensor_is_duplicable[i]) {
158 grad_out.push_back(
159 paddle::pybind::GetTensorListFromPyObject(obj, true));
160 } else {
161 if (paddle::pybind::PyCheckTensor(obj)) {
162 grad_out.push_back(
163 {paddle::pybind::UnSafeGetTensorFromPyObject(obj)});
164 } else if (obj == Py_None(&_Py_NoneStruct)) {
165 VLOG(4)static_cast<void>(0), !(__extension__ ({ static google::
int32* vlocal__ = &google::kLogSiteUninitialized; google::
int32 verbose_level__ = (4); (*vlocal__ >= verbose_level__
) && ((vlocal__ != &google::kLogSiteUninitialized
) || (google::InitVLOG3__(&vlocal__, &FLAGS_v, "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, verbose_level__))); })) ? (void) 0 : google::LogMessageVoidify
() & google::LogMessage( "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 165).stream()
<< "Got None for Tensor with pos: " << i;
166 grad_out.push_back({paddle::Tensor()});
167 } else {
168 PADDLE_THROW(phi::errors::InvalidArgument(do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(phi::errors::InvalidArgument( "We can only support Tensor or None for backward output, "
", but got %s, please check your PyLayer code and make it fits"
, reinterpret_cast<PyTypeObject*>(obj->ob_type)->
tp_name)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 171); } while (0)
169 "We can only support Tensor or None for backward output, "do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(phi::errors::InvalidArgument( "We can only support Tensor or None for backward output, "
", but got %s, please check your PyLayer code and make it fits"
, reinterpret_cast<PyTypeObject*>(obj->ob_type)->
tp_name)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 171); } while (0)
170 ", but got %s, please check your PyLayer code and make it fits",do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(phi::errors::InvalidArgument( "We can only support Tensor or None for backward output, "
", but got %s, please check your PyLayer code and make it fits"
, reinterpret_cast<PyTypeObject*>(obj->ob_type)->
tp_name)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 171); } while (0)
171 reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name))do { throw ::phi::enforce::EnforceNotMet( ::phi::ErrorSummary
(phi::errors::InvalidArgument( "We can only support Tensor or None for backward output, "
", but got %s, please check your PyLayer code and make it fits"
, reinterpret_cast<PyTypeObject*>(obj->ob_type)->
tp_name)), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 171); } while (0)
;
172 }
173 }
174 }
175 } else {
176 PADDLE_ENFORCE_EQ(do { auto __val1 = (this->OutputMeta()[i][0].IsStopGradient
()); auto __val2 = (true); using __TYPE1__ = decltype(__val1)
; using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ =
::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's backward function should not return empty at %d position."
, name(), i)); constexpr bool __kCanToString__ = ::phi::details
::CanToString<__TYPE1__>::kValue && ::phi::details
::CanToString<__TYPE2__>::kValue; auto __message__ = ::
paddle::string::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "this->OutputMeta()[i][0].IsStopGradient()"
, "true", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("this->OutputMeta()[i][0].IsStopGradient()",
__val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("true", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 182); } while (0); } } while (0)
177 this->OutputMeta()[i][0].IsStopGradient(),do { auto __val1 = (this->OutputMeta()[i][0].IsStopGradient
()); auto __val2 = (true); using __TYPE1__ = decltype(__val1)
; using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ =
::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's backward function should not return empty at %d position."
, name(), i)); constexpr bool __kCanToString__ = ::phi::details
::CanToString<__TYPE1__>::kValue && ::phi::details
::CanToString<__TYPE2__>::kValue; auto __message__ = ::
paddle::string::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "this->OutputMeta()[i][0].IsStopGradient()"
, "true", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("this->OutputMeta()[i][0].IsStopGradient()",
__val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("true", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 182); } while (0); } } while (0)
178 true,do { auto __val1 = (this->OutputMeta()[i][0].IsStopGradient
()); auto __val2 = (true); using __TYPE1__ = decltype(__val1)
; using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ =
::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's backward function should not return empty at %d position."
, name(), i)); constexpr bool __kCanToString__ = ::phi::details
::CanToString<__TYPE1__>::kValue && ::phi::details
::CanToString<__TYPE2__>::kValue; auto __message__ = ::
paddle::string::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "this->OutputMeta()[i][0].IsStopGradient()"
, "true", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("this->OutputMeta()[i][0].IsStopGradient()",
__val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("true", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 182); } while (0); } } while (0)
179 paddle::platform::errors::InvalidArgument(do { auto __val1 = (this->OutputMeta()[i][0].IsStopGradient
()); auto __val2 = (true); using __TYPE1__ = decltype(__val1)
; using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ =
::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's backward function should not return empty at %d position."
, name(), i)); constexpr bool __kCanToString__ = ::phi::details
::CanToString<__TYPE1__>::kValue && ::phi::details
::CanToString<__TYPE2__>::kValue; auto __message__ = ::
paddle::string::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "this->OutputMeta()[i][0].IsStopGradient()"
, "true", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("this->OutputMeta()[i][0].IsStopGradient()",
__val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("true", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 182); } while (0); } } while (0)
180 "%s's backward function should not return empty at %d position.",do { auto __val1 = (this->OutputMeta()[i][0].IsStopGradient
()); auto __val2 = (true); using __TYPE1__ = decltype(__val1)
; using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ =
::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's backward function should not return empty at %d position."
, name(), i)); constexpr bool __kCanToString__ = ::phi::details
::CanToString<__TYPE1__>::kValue && ::phi::details
::CanToString<__TYPE2__>::kValue; auto __message__ = ::
paddle::string::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "this->OutputMeta()[i][0].IsStopGradient()"
, "true", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("this->OutputMeta()[i][0].IsStopGradient()",
__val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("true", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 182); } while (0); } } while (0)
181 name(),do { auto __val1 = (this->OutputMeta()[i][0].IsStopGradient
()); auto __val2 = (true); using __TYPE1__ = decltype(__val1)
; using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ =
::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's backward function should not return empty at %d position."
, name(), i)); constexpr bool __kCanToString__ = ::phi::details
::CanToString<__TYPE1__>::kValue && ::phi::details
::CanToString<__TYPE2__>::kValue; auto __message__ = ::
paddle::string::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "this->OutputMeta()[i][0].IsStopGradient()"
, "true", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("this->OutputMeta()[i][0].IsStopGradient()",
__val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("true", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 182); } while (0); } } while (0)
182 i))do { auto __val1 = (this->OutputMeta()[i][0].IsStopGradient
()); auto __val2 = (true); using __TYPE1__ = decltype(__val1)
; using __TYPE2__ = decltype(__val2); using __COMMON_TYPE1__ =
::phi::details::CommonType1<__TYPE1__, __TYPE2__>; using
__COMMON_TYPE2__ = ::phi::details::CommonType2<__TYPE1__,
__TYPE2__>; bool __is_not_error = (static_cast<__COMMON_TYPE1__
>(__val1))==( static_cast<__COMMON_TYPE2__>(__val2))
; if (__builtin_expect(static_cast<bool>(!__is_not_error
), 0)) { auto __summary__ = phi::ErrorSummary(paddle::platform
::errors::InvalidArgument( "%s's backward function should not return empty at %d position."
, name(), i)); constexpr bool __kCanToString__ = ::phi::details
::CanToString<__TYPE1__>::kValue && ::phi::details
::CanToString<__TYPE2__>::kValue; auto __message__ = ::
paddle::string::Sprintf( "%s\n [Hint: Expected %s " "==" " %s, but received %s "
"!=" " %s.]", __summary__.error_message(), "this->OutputMeta()[i][0].IsStopGradient()"
, "true", ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("this->OutputMeta()[i][0].IsStopGradient()",
__val1), ::phi::details::BinaryCompareMessageConverter< __kCanToString__
>::Convert("true", __val2)); do { throw ::phi::enforce::EnforceNotMet
(phi::ErrorSummary(__summary__.code(), std::move(__message__)
), "../../../../../paddle/fluid/eager/pylayer/py_layer_node.cc"
, 182); } while (0); } } while (0)
;
183 grad_out.push_back({});
184 }
185 }
186
187 Py_XDECREF(backward_fn)_Py_XDECREF(((PyObject*)(backward_fn)));
188 Py_XDECREF(backward_args)_Py_XDECREF(((PyObject*)(backward_args)));
189 if (!PyTuple_Check(outputs)((((((PyObject*)(outputs))->ob_type))->tp_flags & (
(1UL << 26))) != 0)
) {
190 Py_XDECREF(outputs_tuple)_Py_XDECREF(((PyObject*)(outputs_tuple)));
191 }
192 Py_XDECREF(outputs)_Py_XDECREF(((PyObject*)(outputs)));
193 Py_XDECREF(ctx_)_Py_XDECREF(((PyObject*)(ctx_)));
194 ctx_ = nullptr;
195
196 return grad_out;
197}
198} // namespace egr

/opt/pyrefcon/lib/pyrefcon/models/PyTuple_New.model

1#ifndef PyTuple_New
2struct _object;
3typedef struct _object PyObject;
4PyObject* clang_analyzer_PyObject_New_Reference();
5PyObject* PyTuple_New(Py_ssize_t len) {
6 return clang_analyzer_PyObject_New_Reference();
20
Setting reference count to 1
7}
8#else
9#warning "API PyTuple_New is defined as a macro."
10#endif