The following issues were found
torch/csrc/api/src/optim/adagrad.cpp
1 issues
Line: 52
Column: 20
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
bool operator==(const AdagradParamState& lhs, const AdagradParamState& rhs) {
return (lhs.step() == rhs.step()) &&
torch::equal(lhs.sum(), rhs.sum());
}
void AdagradParamState::serialize(torch::serialize::OutputArchive& archive) const {
_TORCH_OPTIM_SERIALIZE_TORCH_ARG(step);
_TORCH_OPTIM_SERIALIZE_TORCH_ARG(sum);
Reported by FlawFinder.
torch/csrc/jit/serialization/import_legacy.cpp
1 issues
Line: 260
Column: 7
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
auto setstate = module.find_method("__setstate__");
TORCH_CHECK(
setstate,
"Cannot call '__setstate__' method because"
" it does not exist");
// Since all Tensors are going to be None before `__setstate__` is run, we
// can't do any optimizations on them that depend on the module type since the
Reported by FlawFinder.
torch/csrc/jit/serialization/export.cpp
1 issues
Line: 166
Column: 7
CWE codes:
362
auto folder = GetFileRootPath(onnx_file_path);
std::string fullFilePath = folder + "/" + tensorName;
std::unique_ptr<FILE, decltype(&CloseFile)> fp(
fopen(fullFilePath.c_str(), "wb"), &CloseFile);
if (fp == nullptr) {
throw std::runtime_error(
std::string("ONNX export failed. Could not open file or directory: ") +
fullFilePath);
}
Reported by FlawFinder.
torch/csrc/jit/runtime/static/passes.cpp
1 issues
Line: 361
#endif
}
TORCH_LIBRARY_FRAGMENT(static_runtime, m) {
m.def("static_runtime::permute_copy(Tensor self, int[] dims) -> Tensor");
m.def(
"static_runtime::reshape_copy(Tensor(a) self, int[] shape) -> Tensor(a)");
m.def(
"static_runtime::flatten_copy.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)");
Reported by Cppcheck.
torch/csrc/jit/runtime/static/ops.cpp
1 issues
Line: 92
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
const void* self_data = self_contig->data_ptr();
void* out_data = out.data_ptr();
memcpy(out_data, self_data, nbytes);
return out;
}
at::Tensor& flatten_copy_out(
Reported by FlawFinder.
torch/csrc/jit/runtime/register_prim_ops.cpp
1 issues
Line: 1568
// String Ops
// Implementations located in torch/csrc/jit/runtime/register_string_ops.cpp
TORCH_LIBRARY_IMPL(aten, CatchAll, m) {
m.impl(TORCH_SELECTIVE_NAME("aten::slice.str"), TORCH_FN(stringSlice));
m.impl(
TORCH_SELECTIVE_NAME("aten::strip"),
[](std::string string, const std::string& chars) {
auto rindex = string.find_last_not_of(chars);
Reported by Cppcheck.
torch/csrc/jit/runtime/register_distributed_ops.cpp
1 issues
Line: 281
// Implementations located in
// torch/csrc/jit/runtime/register_distributed_ops.cpp
TORCH_LIBRARY_IMPL(aten, CatchAll, m) {
m.impl("get_gradients", [](int64_t context_id) {
const auto& autogradContext =
dist_autograd::DistAutogradContainer::getInstance().retrieveContext(
context_id);
return autogradContext->getGradients();
Reported by Cppcheck.
torch/csrc/jit/runtime/instruction.cpp
1 issues
Line: 48
Column: 20
CWE codes:
126
"Instructions should be 8 bytes");
std::ostream& operator<<(std::ostream& out, Instruction inst) {
// TODO: use op info to print out the op in a more user-friendly way
int nargs = std::strlen(OpInfo(inst.op));
out << inst.op;
if (nargs > 0) {
out << " " << inst.X;
}
if (nargs > 1) {
Reported by FlawFinder.
torch/csrc/jit/runtime/graph_executor.cpp
1 issues
Line: 804
Column: 12
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
TORCH_API bool IsNewExecutorEnabled() {
static const auto disable_new_executor =
std::getenv("TORCH_JIT_DISABLE_NEW_EXECUTOR");
return getExecutorMode() && FLAGS_torch_jit_enable_new_executor &&
!disable_new_executor;
}
void runRequiredPasses(const std::shared_ptr<Graph>& g) {
Reported by FlawFinder.
torch/csrc/jit/runtime/argument_spec.h
1 issues
Line: 117
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
void combineHash(const ArgumentInfo& arg) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ArgumentInfo::plain_data_type arg_data;
std::memcpy(&arg_data, &arg, sizeof(ArgumentInfo));
hash_code = c10::hash_combine(hash_code, arg_data);
}
// equality is fast: check ninputs, and then check the raw array data,
// there are no size/stride indirections
Reported by FlawFinder.