The following issues were found
torch/csrc/distributed/rpc/init.cpp
1 issues
Line: 105
"id",
&WorkerInfo::id_,
R"(Globally unique id to identify the worker.)")
.def("__eq__", &WorkerInfo::operator==, py::is_operator())
// pybind11 suggests the syntax .def(hash(py::self)), with the
// unqualified "hash" function call. However the
// argument-dependent lookup for the function "hash" doesn't get
// triggered in this context because it conflicts with the struct
// c10::hash, so we need to use the qualified name
Reported by Cppcheck.
torch/csrc/distributed/c10d/init.cpp
1 issues
Line: 1327
Column: 31
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
auto options = ::c10d::ProcessGroupGloo::Options::create();
// Use interfaces listed in "GLOO_SOCKET_IFNAME", if set.
char* ifnameEnv = getenv(::c10d::GLOO_SOCKET_IFNAME_ENV.c_str());
if (ifnameEnv) {
for (const auto& iface : ::c10d::split(',', ifnameEnv)) {
options->devices.push_back(
::c10d::ProcessGroupGloo::createDeviceForInterface(iface));
}
Reported by FlawFinder.
torch/csrc/distributed/c10d/frontend.cpp
1 issues
Line: 187
Column: 25
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
auto options = ProcessGroupGloo::Options::create();
// Use interfaces listed in "GLOO_SOCKET_IFNAME", if set.
char* ifnameEnv = getenv(GLOO_SOCKET_IFNAME_ENV.c_str());
if (ifnameEnv) {
for (const auto& iface : split(',', ifnameEnv)) {
options->devices.push_back(
::c10d::ProcessGroupGloo::createDeviceForInterface(iface));
}
Reported by FlawFinder.
torch/csrc/distributed/c10d/Utils.hpp
1 issues
Line: 89
Column: 28
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
}
inline bool parseEnvVarFlag(const char* envVarName) {
char* stringValue = std::getenv(envVarName);
if (stringValue != nullptr) {
int val;
try {
val = std::stoi(stringValue);
} catch (std::exception& e) {
Reported by FlawFinder.
torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp
1 issues
Line: 107
Column: 26
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
const std::vector<at::Tensor> gathered_tensors = output_tensors[i];
const at::Tensor reference_tensor = tensors_to_verify[i];
for (const auto& rank_tensor : gathered_tensors) {
if (!rank_tensor.equal(reference_tensor)) {
std::stringstream ss;
ss << "Detected mismatch between collectives on ranks. Rank "
<< pg->getRank()
<< " is running inconsistent collective: " << *this;
TORCH_CHECK(false, ss.str());
Reported by FlawFinder.
torch/csrc/Layout.h
1 issues
Line: 15
Column: 3
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
PyObject_HEAD
at::Layout layout;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
char name[LAYOUT_NAME_LEN + 1];
};
extern PyTypeObject THPLayoutType;
inline bool THPLayout_Check(PyObject *obj) {
Reported by FlawFinder.
torch/csrc/distributed/c10d/NCCLUtils.hpp
1 issues
Line: 71
Column: 7
CWE codes:
134
Suggestion:
Use a constant for the format specification
ncclResult_t result = cmd; \
if (result != ncclSuccess) { \
std::string err = ncclGetErrorWithVersion(result); \
fprintf( \
stderr, \
"NCCL error in: %s:%d, %s\n", \
__FILE__, \
__LINE__, \
err.c_str()); \
Reported by FlawFinder.
torch/csrc/deploy/test_deploy_gpu.cpp
1 issues
Line: 20
Column: 19
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
const char* simple_jit = "torch/csrc/deploy/example/generated/simple_jit";
const char* path(const char* envname, const char* path) {
const char* e = getenv(envname);
return e ? e : path;
}
TEST(TorchDeployGPUTest, SimpleModel) {
if (!torch::cuda::is_available()) {
Reported by FlawFinder.
torch/csrc/Layout.cpp
1 issues
Line: 20
Column: 8
CWE codes:
120
if (!self) throw python_error();
auto self_ = reinterpret_cast<THPLayout*>(self.get());
self_->layout = layout;
std::strncpy (self_->name, name.c_str(), LAYOUT_NAME_LEN);
self_->name[LAYOUT_NAME_LEN] = '\0';
return self.release();
}
PyObject *THPLayout_repr(THPLayout *self)
Reported by FlawFinder.
torch/csrc/deploy/example/benchmark.cpp
1 issues
Line: 299
Column: 20
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
};
int main(int argc, char* argv[]) {
int max_thread = atoi(argv[1]);
cuda = std::string(argv[2]) == "cuda";
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool jit_enable = std::string(argv[3]) == "jit";
Report::report_header(std::cout);
torch::deploy::InterpreterManager manager(max_thread);
Reported by FlawFinder.