Bug Summary

File:build/../torch/csrc/distributed/autograd/init.cpp
Warning:line 20, column 20
PyObject ownership leak with reference count of 1

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name init.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-output=html -analyzer-checker=python -analyzer-disable-checker=deadcode -analyzer-config prune-paths=true,suppress-c++-stdlib=true,suppress-inlined-defensive-checks=false,suppress-null-return-paths=false,crosscheck-with-z3=true,model-path=/opt/pyrefcon/lib/pyrefcon/models/models -analyzer-config experimental-enable-naive-ctu-analysis=true,ctu-dir=/tmp/pyrefcon/pytorch/csa-scan,ctu-index-name=/tmp/pyrefcon/pytorch/csa-scan/externalDefMap.txt,ctu-invocation-list=/tmp/pyrefcon/pytorch/csa-scan/invocations.yaml,display-ctu-progress=false -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -relaxed-aliasing -fno-rounding-math -ffp-exception-behavior=ignore -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/tmp/pyrefcon/pytorch/build -resource-dir /opt/pyrefcon/lib/clang/13.0.0 -isystem third_party/gloo -isystem ../cmake/../third_party/gloo -isystem ../cmake/../third_party/googletest/googlemock/include -isystem ../cmake/../third_party/googletest/googletest/include -isystem ../third_party/protobuf/src -isystem ../third_party/gemmlowp -isystem ../third_party/neon2sse -isystem ../third_party/XNNPACK/include -isystem ../third_party -isystem ../cmake/../third_party/eigen -isystem /opt/pyrefcon/lib/pyrefcon/models/python3.8 -isystem /usr/lib/python3/dist-packages/numpy/core/include -isystem ../cmake/../third_party/pybind11/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem ../third_party/ideep/mkl-dnn/include -isystem ../third_party/ideep/include -D BUILDING_TESTS -D FMT_HEADER_ONLY=1 -D HAVE_MALLOC_USABLE_SIZE=1 -D HAVE_MMAP=1 -D HAVE_SHM_OPEN=1 -D HAVE_SHM_UNLINK=1 -D MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS -D ONNXIFI_ENABLE_EXT=1 -D ONNX_ML=1 -D ONNX_NAMESPACE=onnx_torch -D THP_BUILD_MAIN_LIB -D USE_C10D -D USE_C10D_GLOO -D USE_C10D_MPI -D USE_DISTRIBUTED -D USE_EXTERNAL_MZCRC -D USE_NUMPY -D USE_RPC -D USE_TENSORPIPE -D USE_VALGRIND -D _FILE_OFFSET_BITS=64 -D torch_python_EXPORTS -I aten/src -I ../aten/src -I . -I ../ -I ../cmake/../third_party/benchmark/include -I caffe2/contrib/aten -I ../third_party/onnx -I third_party/onnx -I ../third_party/foxi -I third_party/foxi -I ../torch/.. -I ../torch/../aten/src -I ../torch/../aten/src/TH -I caffe2/aten/src -I third_party -I ../torch/../third_party/valgrind-headers -I ../torch/../third_party/gloo -I ../torch/../third_party/onnx -I ../torch/csrc -I ../torch/csrc/api/include -I ../torch/lib -I ../torch/lib/libshm -I ../torch/csrc/distributed -I ../torch/csrc/api -I ../c10/.. -I third_party/ideep/mkl-dnn/include -I ../third_party/ideep/mkl-dnn/src/../include -I ../torch/lib/libshm/../../../torch/lib -I ../third_party/fmt/include -D USE_PTHREADPOOL -D NDEBUG -D USE_KINETO -D LIBKINETO_NOCUPTI -D USE_FBGEMM -D USE_QNNPACK -D USE_PYTORCH_QNNPACK -D USE_XNNPACK -D SYMBOLICATE_MOBILE_DEBUG_HANDLE -D HAVE_AVX_CPU_DEFINITION -D HAVE_AVX2_CPU_DEFINITION -D NDEBUG -D NDEBUG -D CAFFE2_USE_GLOO -D HAVE_GCC_GET_CPUID -D USE_AVX -D USE_AVX2 -D TH_HAVE_THREAD -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /opt/pyrefcon/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -Wno-unused-but-set-variable -Wno-maybe-uninitialized -Werror=format -Werror=cast-function-type -Wno-stringop-overflow -Wno-write-strings -Wno-strict-aliasing -Wno-cast-function-type -w -std=gnu++14 -fdeprecated-macro -fdebug-compilation-dir=/tmp/pyrefcon/pytorch/build -ferror-limit 19 -fvisibility-inlines-hidden -fopenmp -fopenmp-cuda-parallel-target-regions -pthread -fgnuc-version=4.2.1 -fcxx-exceptions -fexceptions -faligned-allocation -fcolor-diagnostics -vectorize-loops -vectorize-slp -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/pyrefcon/pytorch/csa-scan/reports -x c++ ../torch/csrc/distributed/autograd/init.cpp

../torch/csrc/distributed/autograd/init.cpp

1#include <torch/csrc/autograd/python_cpp_function.h>
2#include <torch/csrc/distributed/autograd/autograd.h>
3#include <torch/csrc/jit/python/pybind_utils.h>
4#include <torch/csrc/python_headers.h>
5#include <torch/csrc/utils/object_ptr.h>
6#include <torch/csrc/utils/pybind.h>
7#include <torch/types.h>
8
9namespace torch {
10namespace distributed {
11namespace autograd {
12
13namespace {
14
15template <typename T>
16using shared_ptr_class_ = py::class_<T, std::shared_ptr<T>>;
17
18PyObject* dist_autograd_init(PyObject* _unused, PyObject* noargs) {
19 auto autograd_module =
20 THPObjectPtr(PyImport_ImportModule("torch.distributed.autograd"));
1
Calling 'PyImport_ImportModule'
3
Returning from 'PyImport_ImportModule'
8
PyObject ownership leak with reference count of 1
21 if (!autograd_module) {
4
Assuming the condition is false
5
Taking false branch
22 throw python_error();
23 }
24
25 auto torch_C_module = THPObjectPtr(PyImport_ImportModule("torch._C"));
26 if (!torch_C_module) {
6
Assuming the condition is true
7
Taking true branch
27 throw python_error();
28 }
29
30 auto torch_C_m = py::handle(torch_C_module).cast<py::module>();
31 auto m = torch_C_m.def_submodule("_distributed_autograd", "distributed autograd bindings");
32
33 auto module = py::handle(m).cast<py::module>();
34
35 auto distAutogradContext =
36 shared_ptr_class_<DistAutogradContext>(module, "DistAutogradContext")
37 .def(
38 "_context_id",
39 &DistAutogradContext::contextId,
40 py::call_guard<py::gil_scoped_release>())
41 .def(
42 "_recv_functions",
43 [](const DistAutogradContext& ctx) {
44 std::map<int64_t, py::object> funcs;
45 for (const auto& map_entry : ctx.recvFunctions()) {
46 funcs.emplace(
47 map_entry.first,
48 py::reinterpret_steal<py::object>(
49 torch::autograd::functionToPyObject(
50 map_entry.second)));
51 }
52 return funcs;
53 })
54 .def(
55 "_send_functions",
56 [](const ContextPtr& ctx) {
57 std::map<int64_t, py::object> funcs;
58 for (const auto& map_entry : ctx->sendFunctions()) {
59 funcs.emplace(
60 map_entry.first,
61 py::reinterpret_steal<py::object>(
62 torch::autograd::functionToPyObject(
63 map_entry.second)));
64 }
65 return funcs;
66 })
67 .def("_known_worker_ids", &DistAutogradContext::getKnownWorkerIds);
68
69 module.def(
70 "_new_context",
71 []() -> const ContextPtr {
72 return DistAutogradContainer::getInstance().newContext();
73 },
74 py::return_value_policy::reference);
75
76 module.def(
77 "_release_context",
78 [](int64_t context_id) {
79 return DistAutogradContainer::getInstance().releaseContext(context_id);
80 },
81 py::call_guard<py::gil_scoped_release>());
82
83 module.def("_get_max_id", []() {
84 return DistAutogradContainer::getInstance().getMaxId();
85 });
86
87 module.def(
88 "_is_valid_context",
89 [](int64_t worker_id) {
90 DistAutogradContainer::getInstance().isValidContext(worker_id);
91 },
92 py::call_guard<py::gil_scoped_release>());
93
94 module.def(
95 "_retrieve_context",
96 [](int64_t context_id) -> const ContextPtr {
97 return DistAutogradContainer::getInstance().retrieveContext(context_id);
98 },
99 py::return_value_policy::reference);
100
101 module.def(
102 "_current_context",
103 []() -> const ContextPtr {
104 return DistAutogradContainer::getInstance().currentContext();
105 },
106 py::return_value_policy::reference);
107
108 module.def(
109 "_init",
110 [](int64_t worker_id) { DistAutogradContainer::init(worker_id); },
111 py::call_guard<py::gil_scoped_release>());
112
113 module.def(
114 "_get_debug_info",
115 []() { return DistEngine::getInstance().getDebugInfo(); },
116 py::call_guard<py::gil_scoped_release>());
117
118 py::options options;
119 options.disable_function_signatures();
120
121 module.def(
122 "backward",
123 backward,
124 R"(
125backward(context_id: int, roots: List[Tensor], retain_graph = False) -> None
126
127Kicks off the distributed backward pass using the provided roots. This
128currently implements the :ref:`fast-mode-algorithm` which
129assumes all RPC messages sent in the same distributed autograd context
130across workers would be part of the autograd graph during the backward pass.
131
132We use the provided roots to discover the autograd graph and compute
133appropriate dependencies. This method blocks until the entire
134autograd computation is done.
135
136We accumulate the gradients in the appropriate
137:class:`torch.distributed.autograd.context` on each of the nodes. The autograd
138context to be used is looked up given the ``context_id`` that is passed in when
139:meth:`torch.distributed.autograd.backward` is called. If there is no valid
140autograd context corresponding to the given ID, we throw an error. You can
141retrieve the accumulated gradients using the
142:meth:`~torch.distributed.autograd.get_gradients` API.
143
144Arguments:
145 context_id (int): The autograd context id for which we should retrieve the gradients.
146 roots (list): Tensors which represent the roots of the autograd
147 computation. All the tensors should be scalars.
148 retain_graph(bool, optional): If False, the graph used to compute the grad
149 will be freed. Note that in nearly all cases setting this
150 option to True is not needed and often can be worked around
151 in a much more efficient way. Usually, you need to set this
152 to True to run backward multiple times.
153
154Example::
155 >>> import torch.distributed.autograd as dist_autograd
156 >>> with dist_autograd.context() as context_id:
157 >>> pred = model.forward()
158 >>> loss = loss_func(pred, loss)
159 >>> dist_autograd.backward(context_id, loss)
160)",
161 py::arg("contextId"),
162 py::arg("roots"),
163 py::arg("retain_graph") = false,
164 py::call_guard<py::gil_scoped_release>());
165
166 module.def(
167 "get_gradients",
168 [](int64_t contextId) -> py::dict {
169 const auto& autogradContext =
170 DistAutogradContainer::getInstance().retrieveContext(contextId);
171 return torch::jit::toPyObject(IValue(autogradContext->getGradients()));
172 },
173 R"(
174get_gradients(context_id: int) -> Dict[Tensor, Tensor]
175
176Retrieves a map from Tensor to the appropriate gradient for that Tensor
177accumulated in the provided context corresponding to the given ``context_id``
178as part of the distributed autograd backward pass.
179
180Arguments:
181 context_id(int): The autograd context id for which we should retrieve the
182 gradients.
183
184Returns:
185 A map where the key is the Tensor and the value is the associated gradient
186 for that Tensor.
187
188Example::
189 >>> import torch.distributed.autograd as dist_autograd
190 >>> with dist_autograd.context() as context_id:
191 >>> t1 = torch.rand((3, 3), requires_grad=True)
192 >>> t2 = torch.rand((3, 3), requires_grad=True)
193 >>> loss = t1 + t2
194 >>> dist_autograd.backward(context_id, [loss.sum()])
195 >>> grads = dist_autograd.get_gradients(context_id)
196 >>> print(grads[t1])
197 >>> print(grads[t2])
198)",
199 py::arg("context_id"));
200
201 Py_RETURN_TRUEreturn _Py_INCREF(((PyObject*)(((PyObject *) &_Py_TrueStruct
)))), ((PyObject *) &_Py_TrueStruct)
;
202}
203} // namespace
204
205static PyMethodDef methods[] = { // NOLINT
206 {"_dist_autograd_init",
207 dist_autograd_init,
208 METH_NOARGS0x0004,
209 nullptr},
210 {nullptr, nullptr, 0, nullptr}};
211
212PyMethodDef* python_functions() {
213 return methods;
214}
215
216} // namespace autograd
217} // namespace distributed
218} // namespace torch

/opt/pyrefcon/lib/pyrefcon/models/models/PyImport_ImportModule.model

1#ifndef PyImport_ImportModule
2struct _object;
3typedef struct _object PyObject;
4PyObject* clang_analyzer_PyObject_New_Reference();
5PyObject* PyImport_ImportModule(const char *name) {
6 return clang_analyzer_PyObject_New_Reference();
2
Setting reference count to 1
7}
8#else
9#warning "API PyImport_ImportModule is defined as a macro."
10#endif