Bug Summary

File:.cache/bazel/_bazel_alan/39be661231df2a680c9b74265384c13c/execroot/org_tensorflow/tensorflow/python/lib/core/py_func.cc
Warning:line 201, column 9
PyObject ownership leak with reference count of 1

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name py_func.cc -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-output=html -analyzer-checker=python -analyzer-disable-checker=deadcode -analyzer-config prune-paths=true,suppress-c++-stdlib=true,suppress-null-return-paths=false,crosscheck-with-z3=true,model-path=/opt/pyrefcon/lib/pyrefcon/models/models -analyzer-config experimental-enable-naive-ctu-analysis=true,ctu-dir=/tmp/pyrefcon/tensorflow/csa-scan,ctu-index-name=/tmp/pyrefcon/tensorflow/csa-scan/externalDefMap.txt,ctu-invocation-list=/tmp/pyrefcon/tensorflow/csa-scan/invocations.yaml,display-ctu-progress=false -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/home/pyrefcon/.cache/bazel/_bazel_alan/39be661231df2a680c9b74265384c13c/execroot/org_tensorflow -resource-dir /opt/pyrefcon/lib/clang/13.0.0 -iquote . -iquote bazel-out/k8-opt/bin -iquote external/local_config_python -iquote bazel-out/k8-opt/bin/external/local_config_python -iquote external/eigen_archive -iquote bazel-out/k8-opt/bin/external/eigen_archive -iquote external/com_google_absl -iquote bazel-out/k8-opt/bin/external/com_google_absl -iquote external/nsync -iquote bazel-out/k8-opt/bin/external/nsync -iquote external/gif -iquote bazel-out/k8-opt/bin/external/gif -iquote external/libjpeg_turbo -iquote bazel-out/k8-opt/bin/external/libjpeg_turbo -iquote external/com_google_protobuf -iquote bazel-out/k8-opt/bin/external/com_google_protobuf -iquote external/com_googlesource_code_re2 -iquote bazel-out/k8-opt/bin/external/com_googlesource_code_re2 -iquote external/farmhash_archive -iquote bazel-out/k8-opt/bin/external/farmhash_archive -iquote external/fft2d -iquote bazel-out/k8-opt/bin/external/fft2d -iquote external/highwayhash -iquote bazel-out/k8-opt/bin/external/highwayhash -iquote external/zlib -iquote bazel-out/k8-opt/bin/external/zlib -iquote external/double_conversion -iquote bazel-out/k8-opt/bin/external/double_conversion -iquote external/snappy -iquote bazel-out/k8-opt/bin/external/snappy -iquote external/llvm-project -iquote bazel-out/k8-opt/bin/external/llvm-project -iquote external/llvm_terminfo -iquote bazel-out/k8-opt/bin/external/llvm_terminfo -iquote external/llvm_zlib -iquote bazel-out/k8-opt/bin/external/llvm_zlib -iquote external/curl -iquote bazel-out/k8-opt/bin/external/curl -iquote external/boringssl -iquote bazel-out/k8-opt/bin/external/boringssl -iquote external/jsoncpp_git -iquote bazel-out/k8-opt/bin/external/jsoncpp_git -iquote external/local_config_cuda -iquote bazel-out/k8-opt/bin/external/local_config_cuda -iquote external/local_config_rocm -iquote bazel-out/k8-opt/bin/external/local_config_rocm -iquote external/local_config_tensorrt -iquote bazel-out/k8-opt/bin/external/local_config_tensorrt -iquote external/mkl_dnn_v1 -iquote bazel-out/k8-opt/bin/external/mkl_dnn_v1 -iquote external/com_github_grpc_grpc -iquote bazel-out/k8-opt/bin/external/com_github_grpc_grpc -iquote external/upb -iquote bazel-out/k8-opt/bin/external/upb -iquote external/lmdb -iquote bazel-out/k8-opt/bin/external/lmdb -iquote external/png -iquote bazel-out/k8-opt/bin/external/png -iquote external/gemmlowp -iquote bazel-out/k8-opt/bin/external/gemmlowp -iquote external/icu -iquote bazel-out/k8-opt/bin/external/icu -iquote external/org_sqlite -iquote bazel-out/k8-opt/bin/external/org_sqlite -iquote external/dlpack -iquote bazel-out/k8-opt/bin/external/dlpack -iquote external/pybind11 -iquote bazel-out/k8-opt/bin/external/pybind11 -isystem external/local_config_python/numpy_include -isystem bazel-out/k8-opt/bin/external/local_config_python/numpy_include -isystem /opt/pyrefcon/lib/pyrefcon/models/python3.8 -isystem /opt/pyrefcon/lib/pyrefcon/models/python3.8 -isystem third_party/eigen3/mkl_include -isystem bazel-out/k8-opt/bin/third_party/eigen3/mkl_include -isystem external/eigen_archive -isystem bazel-out/k8-opt/bin/external/eigen_archive -isystem external/nsync/public -isystem bazel-out/k8-opt/bin/external/nsync/public -isystem external/gif -isystem bazel-out/k8-opt/bin/external/gif -isystem external/com_google_protobuf/src -isystem bazel-out/k8-opt/bin/external/com_google_protobuf/src -isystem external/farmhash_archive/src -isystem bazel-out/k8-opt/bin/external/farmhash_archive/src -isystem external/zlib -isystem bazel-out/k8-opt/bin/external/zlib -isystem external/double_conversion -isystem bazel-out/k8-opt/bin/external/double_conversion -isystem external/llvm-project/llvm/include -isystem bazel-out/k8-opt/bin/external/llvm-project/llvm/include -isystem external/llvm-project/mlir/include -isystem bazel-out/k8-opt/bin/external/llvm-project/mlir/include -isystem external/curl/include -isystem bazel-out/k8-opt/bin/external/curl/include -isystem external/boringssl/src/include -isystem bazel-out/k8-opt/bin/external/boringssl/src/include -isystem external/jsoncpp_git/include -isystem bazel-out/k8-opt/bin/external/jsoncpp_git/include -isystem external/local_config_cuda/cuda -isystem bazel-out/k8-opt/bin/external/local_config_cuda/cuda -isystem external/local_config_cuda/cuda/cuda/include -isystem bazel-out/k8-opt/bin/external/local_config_cuda/cuda/cuda/include -isystem external/local_config_rocm/rocm -isystem bazel-out/k8-opt/bin/external/local_config_rocm/rocm -isystem external/local_config_rocm/rocm/rocm/include -isystem bazel-out/k8-opt/bin/external/local_config_rocm/rocm/rocm/include -isystem external/local_config_rocm/rocm/rocm/include/rocrand -isystem bazel-out/k8-opt/bin/external/local_config_rocm/rocm/rocm/include/rocrand -isystem external/local_config_rocm/rocm/rocm/include/roctracer -isystem bazel-out/k8-opt/bin/external/local_config_rocm/rocm/rocm/include/roctracer -isystem tensorflow/compiler/mlir/tensorflow/include -isystem bazel-out/k8-opt/bin/tensorflow/compiler/mlir/tensorflow/include -isystem tensorflow/compiler/mlir/hlo/include -isystem bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/include -isystem tensorflow/compiler/mlir/xla/include -isystem bazel-out/k8-opt/bin/tensorflow/compiler/mlir/xla/include -isystem external/mkl_dnn_v1/include -isystem bazel-out/k8-opt/bin/external/mkl_dnn_v1/include -isystem external/mkl_dnn_v1/src -isystem bazel-out/k8-opt/bin/external/mkl_dnn_v1/src -isystem external/mkl_dnn_v1/src/common -isystem bazel-out/k8-opt/bin/external/mkl_dnn_v1/src/common -isystem external/mkl_dnn_v1/src/common/ittnotify -isystem bazel-out/k8-opt/bin/external/mkl_dnn_v1/src/common/ittnotify -isystem external/mkl_dnn_v1/src/cpu -isystem bazel-out/k8-opt/bin/external/mkl_dnn_v1/src/cpu -isystem external/mkl_dnn_v1/src/cpu/gemm -isystem bazel-out/k8-opt/bin/external/mkl_dnn_v1/src/cpu/gemm -isystem external/mkl_dnn_v1/src/cpu/x64/xbyak -isystem bazel-out/k8-opt/bin/external/mkl_dnn_v1/src/cpu/x64/xbyak -isystem external/com_github_grpc_grpc/include -isystem bazel-out/k8-opt/bin/external/com_github_grpc_grpc/include -isystem external/com_github_grpc_grpc/src/core/ext/upb-generated -isystem bazel-out/k8-opt/bin/external/com_github_grpc_grpc/src/core/ext/upb-generated -isystem external/com_github_grpc_grpc/third_party/address_sorting/include -isystem bazel-out/k8-opt/bin/external/com_github_grpc_grpc/third_party/address_sorting/include -isystem external/png -isystem bazel-out/k8-opt/bin/external/png -isystem external/icu/icu4c/source/common -isystem bazel-out/k8-opt/bin/external/icu/icu4c/source/common -isystem external/llvm-project/mlir/lib/Conversions/GPUToSPIRV -isystem bazel-out/k8-opt/bin/external/llvm-project/mlir/lib/Conversions/GPUToSPIRV -isystem external/llvm-project/mlir/lib/Conversion/MemRefToSPIRV -isystem bazel-out/k8-opt/bin/external/llvm-project/mlir/lib/Conversion/MemRefToSPIRV -isystem external/llvm-project/mlir/lib/Conversion/StandardToSPIRV -isystem bazel-out/k8-opt/bin/external/llvm-project/mlir/lib/Conversion/StandardToSPIRV -isystem external/llvm-project/mlir/lib/Conversion/MathToSPIRV -isystem bazel-out/k8-opt/bin/external/llvm-project/mlir/lib/Conversion/MathToSPIRV -isystem external/llvm-project/mlir/lib/Conversion/TosaToLinalg -isystem bazel-out/k8-opt/bin/external/llvm-project/mlir/lib/Conversion/TosaToLinalg -isystem external/llvm-project/mlir/lib/Conversion/TosaToSCF -isystem bazel-out/k8-opt/bin/external/llvm-project/mlir/lib/Conversion/TosaToSCF -isystem external/llvm-project/mlir/lib/Conversion/TosaToStandard -isystem bazel-out/k8-opt/bin/external/llvm-project/mlir/lib/Conversion/TosaToStandard -isystem external/llvm-project/llvm/lib/Target/X86 -isystem bazel-out/k8-opt/bin/external/llvm-project/llvm/lib/Target/X86 -isystem external/pybind11/include -isystem bazel-out/k8-opt/bin/external/pybind11/include -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=1 -D NDEBUG -D SQLITE_OMIT_DEPRECATED -D EIGEN_ALTIVEC_USE_CUSTOM_PACK=0 -D GRPC_ARES=0 -D TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL -D TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL -D HAVE_SYS_UIO_H -D TF_USE_SNAPPY -D CURL_STATICLIB -D EIGEN_MPL2_ONLY -D EIGEN_MAX_ALIGN_BYTES=64 -D LLVM_ON_UNIX=1 -D HAVE_BACKTRACE=1 -D BACKTRACE_HEADER=<execinfo.h> -D LTDL_SHLIB_EXT=".so" -D LLVM_PLUGIN_EXT=".so" -D LLVM_ENABLE_THREADS=1 -D HAVE_SYSEXITS_H=1 -D HAVE_UNISTD_H=1 -D HAVE_STRERROR_R=1 -D HAVE_LIBPTHREAD=1 -D HAVE_PTHREAD_GETNAME_NP=1 -D HAVE_PTHREAD_SETNAME_NP=1 -D HAVE_PTHREAD_GETSPECIFIC=1 -D HAVE_REGISTER_FRAME=1 -D HAVE_DEREGISTER_FRAME=1 -D _GNU_SOURCE -D HAVE_LINK_H=1 -D HAVE_LSEEK64=1 -D HAVE_MALLINFO=1 -D HAVE_POSIX_FALLOCATE=1 -D HAVE_SBRK=1 -D HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC=1 -D LLVM_NATIVE_ARCH="X86" -D LLVM_NATIVE_ASMPARSER=LLVMInitializeX86AsmParser -D LLVM_NATIVE_ASMPRINTER=LLVMInitializeX86AsmPrinter -D LLVM_NATIVE_DISASSEMBLER=LLVMInitializeX86Disassembler -D LLVM_NATIVE_TARGET=LLVMInitializeX86Target -D LLVM_NATIVE_TARGETINFO=LLVMInitializeX86TargetInfo -D LLVM_NATIVE_TARGETMC=LLVMInitializeX86TargetMC -D LLVM_NATIVE_TARGETMCA=LLVMInitializeX86TargetMCA -D LLVM_HOST_TRIPLE="x86_64-unknown-linux-gnu" -D LLVM_DEFAULT_TARGET_TRIPLE="x86_64-unknown-linux-gnu" -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/BuiltinAttributesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/BuiltinDialectIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/BuiltinLocationAttributesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/BuiltinOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/BuiltinTypeInterfacesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/BuiltinTypesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/CallOpInterfacesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/CastOpInterfacesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/InferTypeOpInterfaceIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/OpAsmInterfaceIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/RegionKindInterfaceIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SideEffectInterfacesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SubElementInterfacesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SymbolInterfacesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/TensorEncodingIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ControlFlowInterfacesIncGen -I bazel-out/k8-opt/bin/external/local_config_cuda/cuda/_virtual_includes/cuda_headers_virtual -I bazel-out/k8-opt/bin/external/local_config_tensorrt/_virtual_includes/tensorrt_headers -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ParserTokenKinds -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/DerivedAttributeOpInterfaceIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/LoopLikeInterfaceIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/StandardOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/VectorInterfacesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/AffineMemoryOpInterfacesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/AffineOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/CopyOpInterfaceIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/MemRefBaseIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/MemRefOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/TensorOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ViewLikeInterfaceIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/LinalgInterfacesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/LinalgStructuredOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/LinalgOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/MathBaseIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/MathOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SCFIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SCFPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/TilingInterfaceIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ComplexBaseIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ComplexOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/PDLOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/PDLTypesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/PDLInterpOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ConversionPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/TransformsPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/QuantOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/QuantPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/MLIRShapeCanonicalizationIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ShapeOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/LLVMDialectAttributesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/LLVMDialectInterfaceIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/LLVMOpsIncGen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/canonicalize_inc_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/chlo_ops_inc_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/hlo_ops_base_inc_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/hlo_ops_inc_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/hlo_ops_pattern_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/lhlo_ops_inc_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/lhlo_ops_structs_inc_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/chlo_legalize_to_hlo_inc_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/disc_ral_ops_inc_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/lhlo_gpu_ops_enums_inc_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/lhlo_gpu_ops_inc_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/lhlo_gpu_ops_structs_inc_gen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/DiscRalPassIncGen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/LmhloPassIncGen -I bazel-out/k8-opt/bin/tensorflow/compiler/mlir/hlo/_virtual_includes/MhloPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/llvm/_virtual_includes/InstCombineTableGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/VectorOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/LinalgPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/StandardOpsTransformsPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/LLVMConversionIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/LLVMPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/OpenMPOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/AMXIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ArmNeonIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ArmSVEIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/X86VectorIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/AffinePassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/AsyncOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/AsyncPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/OpenACCOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/DLTIBaseIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/GPUBaseIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/GPUOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/GPUPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ParallelLoopMapperAttrGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/NVVMConversionIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/NVVMOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/GPUToNVVMGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/GPUToROCDLTGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ROCDLOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SPIRVAttrUtilsGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SPIRVAvailabilityIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SPIRVCanonicalizationIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SPIRVOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SPIRVSerializationGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ShapeToStandardGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/TosaDialectIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/TosaInterfacesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/TosaPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/EmitCAttributesIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/EmitCOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/MemRefPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SPIRVPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/ShapeTransformsPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SparseTensorAttrDefsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SparseTensorOpsIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/SparseTensorPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/mlir/_virtual_includes/TensorPassIncGen -I bazel-out/k8-opt/bin/external/llvm-project/llvm/_virtual_includes/X86CodeGen -I bazel-out/k8-opt/bin/external/llvm-project/llvm/_virtual_includes/X86CommonTableGen -I bazel-out/k8-opt/bin/external/llvm-project/llvm/_virtual_includes/X86Info -I bazel-out/k8-opt/bin/external/llvm-project/llvm/_virtual_includes/X86UtilsAndDesc -I bazel-out/k8-opt/bin/external/pybind11/_virtual_includes/pybind11 -D AUTOLOAD_DYNAMIC_KERNELS -D __DATE__="redacted" -D __TIMESTAMP__="redacted" -D __TIME__="redacted" -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /opt/pyrefcon/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wall -Wunused-but-set-parameter -Wno-free-nonheap-object -Wno-builtin-macro-redefined -w -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/home/pyrefcon/.cache/bazel/_bazel_alan/39be661231df2a680c9b74265384c13c/execroot/org_tensorflow -ferror-limit 19 -stack-protector 1 -fgnuc-version=4.2.1 -fcxx-exceptions -fexceptions -vectorize-loops -vectorize-slp -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/pyrefcon/tensorflow/csa-scan/reports -x c++ tensorflow/python/lib/core/py_func.cc

tensorflow/python/lib/core/py_func.cc

1/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#include "tensorflow/python/lib/core/py_func.h"
17
18#include <Python.h>
19
20// clang-format: off
21// Must be included first.
22#include "tensorflow/python/lib/core/numpy.h"
23// clang-format: on
24
25#include <array>
26
27#include "numpy/arrayobject.h"
28#include "tensorflow/c/eager/c_api.h"
29#include "tensorflow/c/eager/tfe_context_internal.h"
30#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
31#include "tensorflow/c/tf_status_helper.h"
32#include "tensorflow/core/common_runtime/eager/context.h"
33#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
34#include "tensorflow/core/framework/allocation_description.pb.h"
35#include "tensorflow/core/framework/op_kernel.h"
36#include "tensorflow/core/framework/tensor.h"
37#include "tensorflow/core/lib/core/errors.h"
38#include "tensorflow/core/lib/core/status.h"
39#include "tensorflow/core/lib/core/threadpool.h"
40#include "tensorflow/core/platform/macros.h"
41#include "tensorflow/core/platform/mutex.h"
42#include "tensorflow/core/platform/types.h"
43#include "tensorflow/python/eager/pywrap_tfe.h"
44#include "tensorflow/python/lib/core/ndarray_tensor.h"
45#include "tensorflow/python/lib/core/ndarray_tensor_bridge.h"
46#include "tensorflow/python/lib/core/py_util.h"
47#include "tensorflow/python/lib/core/safe_ptr.h"
48
49namespace tensorflow {
50namespace {
51
52static mutex mu(LINKER_INITIALIZED);
53static PyObject* py_trampoline TF_GUARDED_BY(mu)__attribute__((guarded_by(mu))) = nullptr;
54
55// Returns the py_trampoline that is used to pass the control to the
56// python runtime.
57PyObject* GetPyTrampoline() {
58 mutex_lock l(mu);
59 return py_trampoline;
60}
61
62// A call to the registered python function.
63struct PyCall {
64 // Passed to python runtime to call the python function registered
65 // with this "token".
66 string token;
67
68 // The device on which Tensors are stored; only used for EagerPyFunc.
69 Device* device = nullptr;
70
71 // True if the call is associated with an EagerPyFunc.
72 bool eager = false;
73
74 // True if the call is running under eager async mode.
75 bool eager_async = false;
76
77 // Inputs and outputs of this function invocation.
78 std::vector<Tensor> ins;
79 std::vector<Tensor> out;
80};
81
82bool IsCPUDevice(const Device* d) {
83 return d == nullptr || d->tensorflow_gpu_device_info() == nullptr;
84}
85
86// Givens the 'call', prepares the token and inputs as a python tuple
87// that is appropriate for calling the trampoline.
88Status MakeArgTuple(const PyCall* call, TFE_Context* ctx, PyObject** tuple) {
89 int64_t n = call->ins.size();
90 PyObject* lst = PyList_New(n);
91 CHECK(lst)if ((__builtin_expect(!(lst), 0))) ::tensorflow::internal::LogMessageFatal
("tensorflow/python/lib/core/py_func.cc", 91) << "Check failed: "
"lst" " "
;
92 // TFE_TensorHandle assumes that CPU is identified by nullptr.
93 //
94 // Set device name to be empty if the device is CPU.
95 const char* device_name = nullptr;
96
97 if (call->device != nullptr && !IsCPUDevice(call->device))
98 device_name = call->device->name().c_str();
99
100 for (int64_t i = 0; i < n; ++i) {
101 PyObject* arg = nullptr;
102 if (call->eager) {
103 Tensor t = call->ins[i];
104 arg = EagerTensorFromHandle(tensorflow::wrap(
105 tensorflow::unwrap(ctx)->CreateLocalHandleFromTFTensor(t,
106 device_name)));
107 if (arg == nullptr) {
108 Py_DECREF(lst)_Py_DECREF(((PyObject*)(lst)));
109 return errors::Internal("Unable to procure EagerTensor from Tensor.");
110 }
111 } else {
112 Status s = TensorToNdarray(call->ins[i], &arg);
113 if (!s.ok()) {
114 Py_DECREF(lst)_Py_DECREF(((PyObject*)(lst)));
115 return s;
116 }
117 arg = PyArray_Return(*(PyObject * (*)(PyArrayObject *)) _tensorflow_numpy_api[76]
)
(reinterpret_cast<PyArrayObject*>(arg));
118 }
119 PyList_SetItem(lst, i, arg);
120 }
121 *tuple = Py_BuildValue("(ssN)", call->token.c_str(), device_name, lst);
122 CHECK(*tuple)if ((__builtin_expect(!(*tuple), 0))) ::tensorflow::internal::
LogMessageFatal("tensorflow/python/lib/core/py_func.cc", 122)
<< "Check failed: " "*tuple" " "
;
123 return Status::OK();
124}
125
126bool IsSingleNone(PyObject* obj) {
127 if (!PyArray_Check(obj)((((PyObject*)(obj))->ob_type) == (&(*(PyTypeObject *)
_tensorflow_numpy_api[2])) || PyType_IsSubtype((((PyObject*)(
obj))->ob_type), (&(*(PyTypeObject *)_tensorflow_numpy_api
[2]))))
) {
128 return false;
129 }
130 PyArrayObject* array_obj = reinterpret_cast<PyArrayObject*>(obj);
131 if (PyArray_NDIM(array_obj) != 0 || PyArray_SIZE(array_obj)(*(npy_intp (*)(npy_intp const *, int)) _tensorflow_numpy_api
[158])(PyArray_DIMS(array_obj), PyArray_NDIM(array_obj))
!= 1) {
132 return false;
133 }
134 std::array<npy_intp, 0> indices;
135 char* item_ptr =
136 static_cast<char*>(PyArray_GetPtr(*(void * (*)(PyArrayObject *, npy_intp const*)) _tensorflow_numpy_api
[160])
(array_obj, indices.data()));
137 PyObject* item = PyArray_GETITEM(array_obj, item_ptr);
138 CHECK(item)if ((__builtin_expect(!(item), 0))) ::tensorflow::internal::LogMessageFatal
("tensorflow/python/lib/core/py_func.cc", 138) << "Check failed: "
"item" " "
;
139 return item == Py_None(&_Py_NoneStruct);
140}
141
142// Retrieves a Tensor from `eager_tensor` and stores it in `output_tensor`.
143// Validates that `output_tensor` is backed by memory in `expected_device`
144// (which is assumed to be a local device, one on which the kernel was
145// executed.)
146//
147// It may be nice to copy the tensor to the right device instead of failing if
148// it isn't already there. This is left as a future exercise. The required
149// device-copying logic is implemented in Python at the moment.
150tensorflow::Status ExtractTensorFromEagerTensor(const PyObject* eager_tensor,
151 TFE_Context* ctx,
152 const Device* expected_device,
153 const Tensor** output_tensor) {
154 tensorflow::TensorHandle* handle = down_cast<tensorflow::TensorHandle*>(
155 tensorflow::unwrap(ctx)->TFTensorHandleFromInterface(
156 tensorflow::unwrap(EagerTensor_Handle(eager_tensor))));
157
158 Device* actual_device = handle->device();
159 TF_RETURN_IF_ERROR(handle->Tensor(output_tensor))do { ::tensorflow::Status _status = (handle->Tensor(output_tensor
)); if ((__builtin_expect(!_status.ok(), 0))) return _status;
} while (0)
;
160 // actual_device may be nullptr, which implies local CPU.
161 if (expected_device == actual_device) return Status::OK();
162 const string& expected_device_name = expected_device->attributes().name();
163 if (actual_device == nullptr) {
164 if (!IsCPUDevice(expected_device)) {
165 return errors::Internal(
166 "Expected the py_func to return a Tensor backed by memory in ",
167 expected_device_name,
168 ", but is actually backed by local host memory. This is a bug.");
169 }
170 return Status::OK();
171 }
172 // NOTE(ebrevdo): Here we could try comparing "actual_device_name"
173 // (actual_device->attributes()->name()) to expected_device_name and ensure
174 // they're the same. However, this comparison fails if we create a ClusterDef
175 // on localhost, mainly because the Device created by Eager code doesn't match
176 // the device created by a session. In this case, expected_device_name may
177 // contain "worker" but the Eager device name contains "localhost". Since we
178 // can't easily access the true underlying device of "worker" here, we are not
179 // able to perform a proper comparison. Furthermore, we can't check
180 // IsCPUDevice(actual_device) because the kernel's device may indeed be a
181 // GPU device (the python interpreter doesn't use it, however).
182 return Status::OK();
183}
184
185// Calls the registered py function through the trampoline.
186Status DoCallPyFunc(PyCall* call, bool* out_log_on_error) {
187 *out_log_on_error = true;
188 PyObject* trampoline = GetPyTrampoline();
189 if (trampoline == nullptr) {
11
Assuming the condition is false
12
Taking false branch
190 return errors::InvalidArgument(
191 "Missing py trampoline. Most likely, it is a link error.");
192 }
193
194 // Prepare the argument.
195 PyObject* args = nullptr;
196 std::unique_ptr<EagerExecutor> new_executor = nullptr;
197 EagerExecutor* old_executor = nullptr;
198 if (call->eager
12.1
Field 'eager' is true
12.1
Field 'eager' is true
) {
13
Taking true branch
199 // See FuncRegistry._ctx.
200 TFE_Context* ctx = reinterpret_cast<TFE_Context*>(PyCapsule_GetPointer(
201 PyObject_GetAttrString(trampoline, "_ctx"), nullptr));
14
Calling 'PyObject_GetAttrString'
16
Returning from 'PyObject_GetAttrString'
17
PyObject ownership leak with reference count of 1
202 CHECK_NE(ctx, nullptr)while (::tensorflow::internal::CheckOpString _result{ ::tensorflow
::internal::Check_NEImpl( ::tensorflow::internal::GetReferenceableValue
(ctx), ::tensorflow::internal::GetReferenceableValue(nullptr)
, "ctx" " " "!=" " " "nullptr")}) ::tensorflow::internal::LogMessageFatal
("tensorflow/python/lib/core/py_func.cc", 202) << *(_result
.str_)
;
203 TF_RETURN_IF_ERROR(MakeArgTuple(call, ctx, &args))do { ::tensorflow::Status _status = (MakeArgTuple(call, ctx, &
args)); if ((__builtin_expect(!_status.ok(), 0))) return _status
; } while (0)
;
204 new_executor.reset(new EagerExecutor(call->eager_async));
205 old_executor = &(tensorflow::unwrap(ctx)->Executor());
206 tensorflow::unwrap(ctx)->SetExecutorForThread(new_executor.get());
207 } else {
208 TF_RETURN_IF_ERROR(MakeArgTuple(call, nullptr, &args))do { ::tensorflow::Status _status = (MakeArgTuple(call, nullptr
, &args)); if ((__builtin_expect(!_status.ok(), 0))) return
_status; } while (0)
;
209 }
210 CHECK(args)if ((__builtin_expect(!(args), 0))) ::tensorflow::internal::LogMessageFatal
("tensorflow/python/lib/core/py_func.cc", 210) << "Check failed: "
"args" " "
;
211
212 // Invokes the trampoline.
213 PyObject* result = PyEval_CallObject(trampoline, args)PyEval_CallObjectWithKeywords(trampoline, args, (PyObject *)__null
)
;
214 Py_DECREF(args)_Py_DECREF(((PyObject*)(args)));
215 Status s = Status::OK();
216 if (result == nullptr) {
217 if (PyErr_Occurred()) {
218 if (PyErr_ExceptionMatches(PyExc_ValueError) ||
219 PyErr_ExceptionMatches(PyExc_TypeError)) {
220 s = errors::InvalidArgument(PyExceptionFetch());
221 } else if (PyErr_ExceptionMatches(PyExc_StopIteration)) {
222 *out_log_on_error = false;
223 s = errors::OutOfRange(PyExceptionFetch());
224 } else if (PyErr_ExceptionMatches(PyExc_MemoryError)) {
225 s = errors::ResourceExhausted(PyExceptionFetch());
226 } else if (PyErr_ExceptionMatches(PyExc_NotImplementedError)) {
227 s = errors::Unimplemented(PyExceptionFetch());
228 } else {
229 // TODO(ebrevdo): Check if exception is an OpError and use the
230 // OpError.error_code property to map it back in the Status.
231 s = errors::Unknown(PyExceptionFetch());
232 }
233 } else {
234 s = errors::Internal("Failed to run py callback ", call->token,
235 ": see error log.");
236 }
237 }
238
239 TFE_Context* ctx = reinterpret_cast<TFE_Context*>(PyCapsule_GetPointer(
240 PyObject_GetAttrString(trampoline, "_ctx"), /*name=*/nullptr));
241 if (new_executor != nullptr) {
242 s.Update(new_executor->WaitForAllPendingNodes());
243 tensorflow::unwrap(ctx)->SetExecutorForThread(old_executor);
244 }
245
246 TF_RETURN_IF_ERROR(s)do { ::tensorflow::Status _status = (s); if ((__builtin_expect
(!_status.ok(), 0))) return _status; } while (0)
;
247
248 // Process the return values and convert them to TF Tensors.
249 if (PyList_Check(result)((((((PyObject*)(result))->ob_type))->tp_flags & ((
1UL << 25))) != 0)
) {
250 // `result` is a Python list; if this operation is an `EagerPyFunc`, then
251 // every item in the list must be an `EagerTensor`; otherwise, every element
252 // must be a NumPy array.
253 call->out.clear();
254 for (int i = 0; i < PyList_Size(result); ++i) {
255 Tensor t;
256 if (call->eager) {
257 const PyObject* item = PyList_GetItem(result, i);
258 if (EagerTensor_CheckExact(item)) {
259 const Tensor* tensor = nullptr;
260 s = ExtractTensorFromEagerTensor(item, ctx, call->device, &tensor);
261 if (s.ok()) t = *tensor;
262 } else {
263 s = errors::FailedPrecondition(
264 "Expected EagerTensor, found PyObject of type: ",
265 Py_TYPE(item)(((PyObject*)(item))->ob_type)->tp_name);
266 }
267 } else {
268 s = NdarrayToTensor(PyList_GetItem(result, i), &t);
269 }
270
271 if (!s.ok()) {
272 break;
273 }
274 call->out.push_back(t);
275 }
276 } else if (EagerTensor_CheckExact(result) || result == Py_None(&_Py_NoneStruct)) {
277 // result is an `EagerTensor` or `None`.
278 DCHECK(call->eager)while (false && (call->eager)) ::tensorflow::internal
::LogMessageFatal("tensorflow/python/lib/core/py_func.cc", 278
)
;
279 if (result != Py_None(&_Py_NoneStruct)) {
280 const Tensor* t = nullptr;
281 s = ExtractTensorFromEagerTensor(result, ctx, call->device, &t);
282 if (s.ok()) call->out.push_back(*t);
283 }
284 } else if (PyArray_Check(result)((((PyObject*)(result))->ob_type) == (&(*(PyTypeObject
*)_tensorflow_numpy_api[2])) || PyType_IsSubtype((((PyObject
*)(result))->ob_type), (&(*(PyTypeObject *)_tensorflow_numpy_api
[2]))))
) {
285 // `result` is a NumPy array.
286 DCHECK(!call->eager)while (false && (!call->eager)) ::tensorflow::internal
::LogMessageFatal("tensorflow/python/lib/core/py_func.cc", 286
)
;
287 if (!IsSingleNone(result)) {
288 Tensor t;
289 s = NdarrayToTensor(result, &t);
290 if (s.ok()) {
291 call->out.push_back(t);
292 }
293 }
294 } else {
295 s = errors::Internal("Unexpected PyObject was returned: ",
296 Py_TYPE(result)(((PyObject*)(result))->ob_type)->tp_name);
297 }
298 Py_DECREF(result)_Py_DECREF(((PyObject*)(result)));
299 return s;
300}
301
302} // end namespace
303
304void InitializePyTrampoline(PyObject* trampoline) {
305 mutex_lock l(mu);
306 if (py_trampoline == nullptr) {
307 py_trampoline = trampoline;
308 Py_INCREF(py_trampoline)_Py_INCREF(((PyObject*)(py_trampoline)));
309 } else {
310 LOG(WARNING)::tensorflow::internal::LogMessage("tensorflow/python/lib/core/py_func.cc"
, 310, ::tensorflow::WARNING)
<< "InitializeCallback should only be called once";
311 }
312}
313
314class PyFuncOp : public OpKernel {
315 public:
316 explicit PyFuncOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
317 OP_REQUIRES_OK(ctx, ctx->GetAttr("token", &token_))do { ::tensorflow::Status _s(ctx->GetAttr("token", &token_
)); if (!(__builtin_expect(!!(_s.ok()), 1))) { CheckNotInComputeAsync
((ctx), "OP_REQUIRES_OK_ASYNC"); (ctx)->CtxFailureWithWarning
("tensorflow/python/lib/core/py_func.cc", 317, _s); return; }
} while (0)
;
318 eager_ = type_string() == "EagerPyFunc";
319 if (eager_) {
320 OP_REQUIRES_OK(ctx, ctx->GetAttr("is_async", &eager_async_))do { ::tensorflow::Status _s(ctx->GetAttr("is_async", &
eager_async_)); if (!(__builtin_expect(!!(_s.ok()), 1))) { CheckNotInComputeAsync
((ctx), "OP_REQUIRES_OK_ASYNC"); (ctx)->CtxFailureWithWarning
("tensorflow/python/lib/core/py_func.cc", 320, _s); return; }
} while (0)
;
321 }
322 }
323
324 bool IsExpensive() override { return true; }
325
326 void Compute(OpKernelContext* ctx) override {
327 PyCall call;
328 call.token = token_;
329 call.eager = eager_;
330 if (call.eager) {
1
Assuming field 'eager' is true
2
Taking true branch
331 // Eager's C API uses `Device`, whereas `OpKernelContext` stores a
332 // `DeviceBase`; attempt to downcast.
333 call.device = dynamic_cast<Device*>(ctx->device());
334 if (call.device == nullptr) {
3
Assuming the condition is false
4
Taking false branch
335 ctx->CtxFailureWithWarning(errors::Internal(
336 "Unrecognized device class: ", ctx->device()->name()));
337 return;
338 }
339 call.eager_async = eager_async_;
340 }
341
342 for (int i = 0; i < ctx->num_inputs(); ++i) {
5
Assuming the condition is false
6
Loop condition is false. Execution continues on line 352
343 call.ins.push_back(ctx->input(i));
344 }
345
346 // NOTE(mrry): There is a potential time-of-check-to-time-of-use race here.
347 // because it is possible that `Py_Finalize()` could be called in another
348 // thread between this check and the call to `PyGILState_Ensure()`, which
349 // will abort the process if `Py_Finalize()` has been called. A more robust
350 // solution would be welcome, but it is not obvious how to make this work
351 // using the current Python C API.
352 OP_REQUIRES(ctx, Py_IsInitialized(),do { if (!(__builtin_expect(!!(Py_IsInitialized()), 1))) { CheckNotInComputeAsync
((ctx), "OP_REQUIRES_ASYNC"); (ctx)->CtxFailure("tensorflow/python/lib/core/py_func.cc"
, 355, (errors::FailedPrecondition( "Python interpreter state is not initialized. "
"The process may be terminated."))); return; } } while (0)
7
Assuming the condition is false
8
Taking false branch
9
Loop condition is false. Exiting loop
353 errors::FailedPrecondition(do { if (!(__builtin_expect(!!(Py_IsInitialized()), 1))) { CheckNotInComputeAsync
((ctx), "OP_REQUIRES_ASYNC"); (ctx)->CtxFailure("tensorflow/python/lib/core/py_func.cc"
, 355, (errors::FailedPrecondition( "Python interpreter state is not initialized. "
"The process may be terminated."))); return; } } while (0)
354 "Python interpreter state is not initialized. "do { if (!(__builtin_expect(!!(Py_IsInitialized()), 1))) { CheckNotInComputeAsync
((ctx), "OP_REQUIRES_ASYNC"); (ctx)->CtxFailure("tensorflow/python/lib/core/py_func.cc"
, 355, (errors::FailedPrecondition( "Python interpreter state is not initialized. "
"The process may be terminated."))); return; } } while (0)
355 "The process may be terminated."))do { if (!(__builtin_expect(!!(Py_IsInitialized()), 1))) { CheckNotInComputeAsync
((ctx), "OP_REQUIRES_ASYNC"); (ctx)->CtxFailure("tensorflow/python/lib/core/py_func.cc"
, 355, (errors::FailedPrecondition( "Python interpreter state is not initialized. "
"The process may be terminated."))); return; } } while (0)
;
356
357 PyGILState_STATE py_threadstate;
358 py_threadstate = PyGILState_Ensure();
359 bool log_on_error;
360 Status s = DoCallPyFunc(&call, &log_on_error);
10
Calling 'DoCallPyFunc'
361 // Sometimes py_funcs can be called without a session and leak memory. This
362 // ensures we clear the decref cache so this doesn't happen.
363 ClearDecrefCache();
364 PyGILState_Release(py_threadstate);
365
366 // Ensures that GIL is released even when !s.ok().
367 if (!s.ok()) {
368 if (log_on_error) {
369 ctx->CtxFailureWithWarning(s);
370 } else {
371 ctx->CtxFailure(s);
372 }
373 return;
374 }
375
376 OP_REQUIRES(ctx, static_cast<int32>(call.out.size()) == ctx->num_outputs(),do { if (!(__builtin_expect(!!(static_cast<int32>(call.
out.size()) == ctx->num_outputs()), 1))) { CheckNotInComputeAsync
((ctx), "OP_REQUIRES_ASYNC"); (ctx)->CtxFailure("tensorflow/python/lib/core/py_func.cc"
, 379, (errors::InvalidArgument(token_, " returns ", call.out
.size(), " values, but expects to see ", ctx->num_outputs(
), " values."))); return; } } while (0)
377 errors::InvalidArgument(token_, " returns ", call.out.size(),do { if (!(__builtin_expect(!!(static_cast<int32>(call.
out.size()) == ctx->num_outputs()), 1))) { CheckNotInComputeAsync
((ctx), "OP_REQUIRES_ASYNC"); (ctx)->CtxFailure("tensorflow/python/lib/core/py_func.cc"
, 379, (errors::InvalidArgument(token_, " returns ", call.out
.size(), " values, but expects to see ", ctx->num_outputs(
), " values."))); return; } } while (0)
378 " values, but expects to see ",do { if (!(__builtin_expect(!!(static_cast<int32>(call.
out.size()) == ctx->num_outputs()), 1))) { CheckNotInComputeAsync
((ctx), "OP_REQUIRES_ASYNC"); (ctx)->CtxFailure("tensorflow/python/lib/core/py_func.cc"
, 379, (errors::InvalidArgument(token_, " returns ", call.out
.size(), " values, but expects to see ", ctx->num_outputs(
), " values."))); return; } } while (0)
379 ctx->num_outputs(), " values."))do { if (!(__builtin_expect(!!(static_cast<int32>(call.
out.size()) == ctx->num_outputs()), 1))) { CheckNotInComputeAsync
((ctx), "OP_REQUIRES_ASYNC"); (ctx)->CtxFailure("tensorflow/python/lib/core/py_func.cc"
, 379, (errors::InvalidArgument(token_, " returns ", call.out
.size(), " values, but expects to see ", ctx->num_outputs(
), " values."))); return; } } while (0)
;
380 for (size_t i = 0; i < call.out.size(); ++i) {
381 const auto& t = call.out[i];
382 OP_REQUIRES(do { if (!(__builtin_expect(!!(t.dtype() == output_type(i)), 1
))) { CheckNotInComputeAsync((ctx), "OP_REQUIRES_ASYNC"); (ctx
)->CtxFailure("tensorflow/python/lib/core/py_func.cc", 386
, (errors::InvalidArgument(i, "-th value returned by ", token_
, " is ", DataTypeString(t.dtype()), ", but expects ", DataTypeString
(output_type(i))))); return; } } while (0)
383 ctx, t.dtype() == output_type(i),do { if (!(__builtin_expect(!!(t.dtype() == output_type(i)), 1
))) { CheckNotInComputeAsync((ctx), "OP_REQUIRES_ASYNC"); (ctx
)->CtxFailure("tensorflow/python/lib/core/py_func.cc", 386
, (errors::InvalidArgument(i, "-th value returned by ", token_
, " is ", DataTypeString(t.dtype()), ", but expects ", DataTypeString
(output_type(i))))); return; } } while (0)
384 errors::InvalidArgument(i, "-th value returned by ", token_, " is ",do { if (!(__builtin_expect(!!(t.dtype() == output_type(i)), 1
))) { CheckNotInComputeAsync((ctx), "OP_REQUIRES_ASYNC"); (ctx
)->CtxFailure("tensorflow/python/lib/core/py_func.cc", 386
, (errors::InvalidArgument(i, "-th value returned by ", token_
, " is ", DataTypeString(t.dtype()), ", but expects ", DataTypeString
(output_type(i))))); return; } } while (0)
385 DataTypeString(t.dtype()), ", but expects ",do { if (!(__builtin_expect(!!(t.dtype() == output_type(i)), 1
))) { CheckNotInComputeAsync((ctx), "OP_REQUIRES_ASYNC"); (ctx
)->CtxFailure("tensorflow/python/lib/core/py_func.cc", 386
, (errors::InvalidArgument(i, "-th value returned by ", token_
, " is ", DataTypeString(t.dtype()), ", but expects ", DataTypeString
(output_type(i))))); return; } } while (0)
386 DataTypeString(output_type(i))))do { if (!(__builtin_expect(!!(t.dtype() == output_type(i)), 1
))) { CheckNotInComputeAsync((ctx), "OP_REQUIRES_ASYNC"); (ctx
)->CtxFailure("tensorflow/python/lib/core/py_func.cc", 386
, (errors::InvalidArgument(i, "-th value returned by ", token_
, " is ", DataTypeString(t.dtype()), ", but expects ", DataTypeString
(output_type(i))))); return; } } while (0)
;
387 ctx->set_output(i, t);
388 }
389 }
390
391 private:
392 string token_;
393
394 // True if and only if this op should execute the python function eagerly,
395 // i.e., if and only if the eager attribute is set.
396 bool eager_;
397
398 bool eager_async_;
399
400 TF_DISALLOW_COPY_AND_ASSIGN(PyFuncOp)PyFuncOp(const PyFuncOp&) = delete; void operator=(const PyFuncOp
&) = delete
;
401};
402
403REGISTER_KERNEL_BUILDER(Name("PyFunc").Device(DEVICE_CPU), PyFuncOp)[[clang::annotate("tf:kernel")]] static ::tensorflow::InitOnStartupMarker
const register_kernel_9 __attribute__((unused)) = (::std::integral_constant
<bool, !(false || (true && true))>::value) ? ::
tensorflow::InitOnStartupMarker{} : ::tensorflow::InitOnStartupMarker
{} << ([](::tensorflow::KernelDef const* kernel_def) {
::tensorflow::kernel_factory::OpKernelRegistrar registrar( kernel_def
, "PyFuncOp", [](::tensorflow::OpKernelConstruction* context)
-> ::tensorflow::OpKernel* { return new PyFuncOp(context)
; }); (void)registrar; return ::tensorflow::InitOnStartupMarker
{}; })(::tensorflow::register_kernel::Name("PyFunc").Device(DEVICE_CPU
).Build());
;
404REGISTER_KERNEL_BUILDER(Name("PyFuncStateless").Device(DEVICE_CPU), PyFuncOp)[[clang::annotate("tf:kernel")]] static ::tensorflow::InitOnStartupMarker
const register_kernel_10 __attribute__((unused)) = (::std::integral_constant
<bool, !(false || (true && true))>::value) ? ::
tensorflow::InitOnStartupMarker{} : ::tensorflow::InitOnStartupMarker
{} << ([](::tensorflow::KernelDef const* kernel_def) {
::tensorflow::kernel_factory::OpKernelRegistrar registrar( kernel_def
, "PyFuncOp", [](::tensorflow::OpKernelConstruction* context)
-> ::tensorflow::OpKernel* { return new PyFuncOp(context)
; }); (void)registrar; return ::tensorflow::InitOnStartupMarker
{}; })(::tensorflow::register_kernel::Name("PyFuncStateless")
.Device(DEVICE_CPU).Build());
;
405REGISTER_KERNEL_BUILDER(Name("EagerPyFunc").Device(DEVICE_CPU), PyFuncOp)[[clang::annotate("tf:kernel")]] static ::tensorflow::InitOnStartupMarker
const register_kernel_11 __attribute__((unused)) = (::std::integral_constant
<bool, !(false || (true && true))>::value) ? ::
tensorflow::InitOnStartupMarker{} : ::tensorflow::InitOnStartupMarker
{} << ([](::tensorflow::KernelDef const* kernel_def) {
::tensorflow::kernel_factory::OpKernelRegistrar registrar( kernel_def
, "PyFuncOp", [](::tensorflow::OpKernelConstruction* context)
-> ::tensorflow::OpKernel* { return new PyFuncOp(context)
; }); (void)registrar; return ::tensorflow::InitOnStartupMarker
{}; })(::tensorflow::register_kernel::Name("EagerPyFunc").Device
(DEVICE_CPU).Build());
;
406
407DataType gpu_types[] = {
408 // No strings and int32s, no ref types and no resource/variant types.
409 DT_FLOAT, DT_DOUBLE, DT_UINT8, DT_INT16, DT_INT8,
410 DT_COMPLEX64, DT_INT64, DT_BOOL, DT_QINT8, DT_QUINT8,
411 DT_QINT32, DT_BFLOAT16, DT_QINT16, DT_QUINT16, DT_UINT16,
412 DT_COMPLEX128, DT_HALF, DT_UINT32, DT_UINT64,
413};
414
415REGISTER_KERNEL_BUILDER(Name("EagerPyFunc")[[clang::annotate("tf:kernel")]] static ::tensorflow::InitOnStartupMarker
const register_kernel_12 __attribute__((unused)) = (::std::integral_constant
<bool, !(false || (true && true))>::value) ? ::
tensorflow::InitOnStartupMarker{} : ::tensorflow::InitOnStartupMarker
{} << ([](::tensorflow::KernelDef const* kernel_def) {
::tensorflow::kernel_factory::OpKernelRegistrar registrar( kernel_def
, "PyFuncOp", [](::tensorflow::OpKernelConstruction* context)
-> ::tensorflow::OpKernel* { return new PyFuncOp(context)
; }); (void)registrar; return ::tensorflow::InitOnStartupMarker
{}; })(::tensorflow::register_kernel::Name("EagerPyFunc") .Device
(DEVICE_GPU) .TypeConstraint("Tin", gpu_types) .TypeConstraint
("Tout", gpu_types).Build());
416 .Device(DEVICE_GPU)[[clang::annotate("tf:kernel")]] static ::tensorflow::InitOnStartupMarker
const register_kernel_12 __attribute__((unused)) = (::std::integral_constant
<bool, !(false || (true && true))>::value) ? ::
tensorflow::InitOnStartupMarker{} : ::tensorflow::InitOnStartupMarker
{} << ([](::tensorflow::KernelDef const* kernel_def) {
::tensorflow::kernel_factory::OpKernelRegistrar registrar( kernel_def
, "PyFuncOp", [](::tensorflow::OpKernelConstruction* context)
-> ::tensorflow::OpKernel* { return new PyFuncOp(context)
; }); (void)registrar; return ::tensorflow::InitOnStartupMarker
{}; })(::tensorflow::register_kernel::Name("EagerPyFunc") .Device
(DEVICE_GPU) .TypeConstraint("Tin", gpu_types) .TypeConstraint
("Tout", gpu_types).Build());
417 .TypeConstraint("Tin", gpu_types)[[clang::annotate("tf:kernel")]] static ::tensorflow::InitOnStartupMarker
const register_kernel_12 __attribute__((unused)) = (::std::integral_constant
<bool, !(false || (true && true))>::value) ? ::
tensorflow::InitOnStartupMarker{} : ::tensorflow::InitOnStartupMarker
{} << ([](::tensorflow::KernelDef const* kernel_def) {
::tensorflow::kernel_factory::OpKernelRegistrar registrar( kernel_def
, "PyFuncOp", [](::tensorflow::OpKernelConstruction* context)
-> ::tensorflow::OpKernel* { return new PyFuncOp(context)
; }); (void)registrar; return ::tensorflow::InitOnStartupMarker
{}; })(::tensorflow::register_kernel::Name("EagerPyFunc") .Device
(DEVICE_GPU) .TypeConstraint("Tin", gpu_types) .TypeConstraint
("Tout", gpu_types).Build());
418 .TypeConstraint("Tout", gpu_types),[[clang::annotate("tf:kernel")]] static ::tensorflow::InitOnStartupMarker
const register_kernel_12 __attribute__((unused)) = (::std::integral_constant
<bool, !(false || (true && true))>::value) ? ::
tensorflow::InitOnStartupMarker{} : ::tensorflow::InitOnStartupMarker
{} << ([](::tensorflow::KernelDef const* kernel_def) {
::tensorflow::kernel_factory::OpKernelRegistrar registrar( kernel_def
, "PyFuncOp", [](::tensorflow::OpKernelConstruction* context)
-> ::tensorflow::OpKernel* { return new PyFuncOp(context)
; }); (void)registrar; return ::tensorflow::InitOnStartupMarker
{}; })(::tensorflow::register_kernel::Name("EagerPyFunc") .Device
(DEVICE_GPU) .TypeConstraint("Tin", gpu_types) .TypeConstraint
("Tout", gpu_types).Build());
419 PyFuncOp)[[clang::annotate("tf:kernel")]] static ::tensorflow::InitOnStartupMarker
const register_kernel_12 __attribute__((unused)) = (::std::integral_constant
<bool, !(false || (true && true))>::value) ? ::
tensorflow::InitOnStartupMarker{} : ::tensorflow::InitOnStartupMarker
{} << ([](::tensorflow::KernelDef const* kernel_def) {
::tensorflow::kernel_factory::OpKernelRegistrar registrar( kernel_def
, "PyFuncOp", [](::tensorflow::OpKernelConstruction* context)
-> ::tensorflow::OpKernel* { return new PyFuncOp(context)
; }); (void)registrar; return ::tensorflow::InitOnStartupMarker
{}; })(::tensorflow::register_kernel::Name("EagerPyFunc") .Device
(DEVICE_GPU) .TypeConstraint("Tin", gpu_types) .TypeConstraint
("Tout", gpu_types).Build());
;
420
421} // end namespace tensorflow

/opt/pyrefcon/lib/pyrefcon/models/models/PyObject_GetAttrString.model

1#ifndef PyObject_GetAttrString
2struct _object;
3typedef struct _object PyObject;
4PyObject* clang_analyzer_PyObject_New_Reference();
5PyObject* PyObject_GetAttrString(PyObject *o, const char *attr_name) {
6 return clang_analyzer_PyObject_New_Reference();
15
Setting reference count to 1
7}
8#else
9#warning "API PyObject_GetAttrString is defined as a macro."
10#endif