The following issues were found
test/cpp/api/integration.cpp
1 issues
Line: 110
Column: 37
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
F&& forward_op,
O&& optimizer) {
std::string mnist_path = "mnist";
if (const char* user_mnist_path = getenv("TORCH_CPP_TEST_MNIST_PATH")) {
mnist_path = user_mnist_path;
}
auto train_dataset =
torch::data::datasets::MNIST(
Reported by FlawFinder.
test/cpp/tensorexpr/test_type.cpp
1 issues
Line: 11
namespace jit {
using namespace torch::jit::tensorexpr;
TEST(Type, Test01) {
KernelScope kernel_scope;
{
Dtype dt1 = kInt;
ASSERT_EQ(dt1, kInt);
}
Reported by Cppcheck.
caffe2/share/contrib/depthwise/depthwise3x3_conv_op_test.cc
1 issues
Line: 205
constexpr size_t kIters = 20;
TEST(DEPTHWISE3x3, Conv) {
for (int i = 0; i < kIters; ++i) {
int channel = 2;
runConv(3, 3, 1, 1, channel, channel, channel, randInt(1, 2));
}
}
Reported by Cppcheck.
caffe2/share/contrib/depthwise/depthwise3x3_conv_op.cc
1 issues
Line: 514
Column: 7
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
#endif
if (FLAGS_caffe2_profile_depthwise) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
char buffer[1024];
const double gmacs = double(
Y->dim32(2) * Y->dim32(3) * Y->dim32(1) *
kernel_w() * kernel_h()) /
1.0E9;
const double gflops = 2 * gmacs / t.Seconds();
Reported by FlawFinder.
test/cpp/tensorexpr/test_te_fuser_pass.cpp
1 issues
Line: 33
bool parallel;
};
TEST(TEFuserPass, FuserPass_1) {
WithCPUFuser cf;
const auto graph_string = R"IR(
graph(%0 : Float(128, strides=[1], device=cpu),
%1 : Float(128, strides=[1], device=cpu)):
%12 : int = prim::Constant[value=1]()
Reported by Cppcheck.
caffe2/sgd/weight_scale_op.h
1 issues
Line: 38
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
Context* context) {
const auto w_size = N * sizeof(float);
if (iter % stepsize != 0 || iter >= update_upper_bound) {
memcpy(nw, w, w_size);
return;
}
// perform the weight scaling
caffe2::math::Scale<T, T, Context>(N, scale, w, nw, context);
}
Reported by FlawFinder.
test/cpp/tensorexpr/test_simplify.cpp
1 issues
Line: 16
using namespace torch::jit::tensorexpr;
using SimpleIRExprEval = ExprEval<SimpleIREvaluator>;
TEST(Simplify, ConstantFoldSimple) {
KernelScope kernel_scope;
ExprHandle a(2.0f);
ExprHandle b(3.0f);
ExprHandle f = (a + b);
Reported by Cppcheck.
caffe2/serialize/read_adapter_interface.h
1 issues
Line: 17
Column: 18
CWE codes:
120
20
class TORCH_API ReadAdapterInterface {
public:
virtual size_t size() const = 0;
virtual size_t read(uint64_t pos, void* buf, size_t n, const char* what = "")
const = 0;
virtual ~ReadAdapterInterface();
};
} // namespace serialize
Reported by FlawFinder.
caffe2/serialize/istream_adapter.h
1 issues
Line: 17
Column: 10
CWE codes:
120
20
C10_DISABLE_COPY_AND_ASSIGN(IStreamAdapter);
explicit IStreamAdapter(std::istream* istream);
size_t size() const override;
size_t read(uint64_t pos, void* buf, size_t n, const char* what = "")
const override;
~IStreamAdapter();
private:
std::istream* istream_;
Reported by FlawFinder.
caffe2/serialize/inline_container_test.cc
1 issues
Line: 76
ASSERT_EQ(memcmp(the_file.c_str() + off2, data2.data(), data2.size()), 0);
}
TEST(PytorchStreamWriterAndReader, GetNonexistentRecordThrows) {
std::ostringstream oss;
// write records through writers
PyTorchStreamWriter writer([&](const void* b, size_t n) -> size_t {
oss.write(static_cast<const char*>(b), n);
return oss ? n : 0;
Reported by Cppcheck.