The following issues were found
test/cpp/jit/test_interface.cpp
1 issues
Line: 47
si.loadType(QualifiedName(class_name));
}
TEST(InterfaceTest, ModuleInterfaceSerialization) {
auto cu = std::make_shared<CompilationUnit>();
Module parentMod("parentMod", cu);
Module subMod("subMod", cu);
std::vector<at::IValue> constantTable;
Reported by Cppcheck.
test/cpp/jit/test_inliner.cpp
1 issues
Line: 39
bool oldState_;
};
TEST(InlinerTest, Basic) {
// disable automatic inlining so we can test it manually
InlinerGuard guard(/*shouldInline=*/false);
CompilationUnit cu(testSource);
auto& fn = cu.get_function("foo3");
Reported by Cppcheck.
test/cpp/jit/test_graph_iterator.cpp
1 issues
Line: 72
}
}
TEST(GraphIteratorTest, ConstantReturnGraph) {
const auto graph_string = R"IR(
graph():
%1 : int = prim::Constant[value=0]()
return (%1))IR";
auto graph = std::make_shared<Graph>();
Reported by Cppcheck.
test/cpp/api/optim.cpp
1 issues
Line: 159
Column: 12
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
params_groups.push_back(OptimizerParamGroup(params));
auto& params_1 = params_groups[1].params();
for (size_t i = 0; i < params_1.size(); i++) {
torch::equal(params[i], params_1[i]);
}
// test for add_param_group() when one or more params existing in another param_group
// are passed in the new param group to be added
ASSERT_THROWS_WITH(
Reported by FlawFinder.
test/cpp/jit/test_graph_executor.cpp
1 issues
Line: 35
ASSERT_TRUE(almostEqual(stack[1].toTensor(), r1));
}
TEST(GraphExecutorTest, runAsync_executor) {
/*
TODO: there are some problem with C++ parsing script program involving
fork. Use the test module below for now.
issue about this: github.com/pytorch/pytorch/issues/46368
The test module file is generated by following:
Reported by Cppcheck.
test/cpp/jit/test_fuser.cpp
1 issues
Line: 80
ASSERT_EQ(max_diff, 0);
}
TEST(FuserTest, TestOne_CUDA) {
#if defined(FBCODE_CAFFE2)
return;
#endif
auto testOne = [&](int ti, int tj) {
const auto graph_string = R"IR(
Reported by Cppcheck.
test/cpp/jit/test_custom_operators.cpp
1 issues
Line: 41
ASSERT_TRUE(output.allclose(at::full(5, 3.0f)));
}
TEST(CustomOperatorTest, ExplicitSchema) {
torch::RegisterOperators reg(
"foo::bar_with_schema(float a, Tensor b) -> Tensor",
[](double a, at::Tensor b) { return a + b; });
auto& ops =
Reported by Cppcheck.
caffe2/python/pybind_state_hip.cc
1 issues
Line: 88
});
}
PYBIND11_MODULE(caffe2_pybind11_state_hip, m) {
m.doc() = "pybind11 stateful interface to Caffe2 workspaces - GPU edition";
addGlobalMethods(m);
addHIPGlobalMethods(m);
addObjectMethods(m);
Reported by Cppcheck.
test/cpp/jit/test_custom_class_registrations.cpp
1 issues
Line: 277
}
};
TORCH_LIBRARY(_TorchScriptTesting, m) {
m.class_<ReLUClass>("_ReLUClass")
.def(torch::init<>())
.def("run", &ReLUClass::run);
m.class_<_StaticMethod>("_StaticMethod")
Reported by Cppcheck.
test/cpp/jit/test_custom_class.cpp
1 issues
Line: 80
} // namespace
// Tests DocString is properly propagated when defining CustomClasses.
TEST(CustomClassTest, TestDocString) {
auto class_type = getCustomClass(
"__torch__.torch.classes._TorchBindTest._TorchBindTestClass");
AT_ASSERT(class_type);
AT_ASSERT(class_type->doc_string() == class_doc_string);
Reported by Cppcheck.