The following issues were found
aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
1 issues
Line: 2192
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
if ((lanes == 32) && elem_size >= kVLen) {
int64_t vec_num = elem_size / kVLen;
std::vector<typename scalar_t::underlying> buf_in(lanes);
memcpy(buf_in.data(), X_ptr + ch, vec_num * kVLen); // 3 cycles
do_bn_compute<scalar_t>(
buf_in.data(),
Y_ptr + ch,
fake_scale,
in_zp_vec,
Reported by FlawFinder.
aten/src/ATen/native/quantized/QTensor.cpp
1 issues
Line: 122
Column: 11
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
underlying_t* dst_data =
reinterpret_cast<underlying_t*>(dst.data_ptr<scalar_t>());
if (self.numel() > 0) {
memcpy(dst_data, self_data, self.nbytes());
}
});
return dst;
}
Reported by FlawFinder.
aten/src/ATen/native/metal/MetalPrepackOpRegister.cpp
1 issues
Line: 44
std::move(packedWeight), std::move(bias), output_min, output_max);
}
TORCH_LIBRARY(metal, m) {
m.class_<Conv2dOpContext>("Conv2dOpContext")
.def_pickle(
[](const c10::intrusive_ptr<Conv2dOpContext>& op_context)
-> SerializationTypeConv2dPrePack { // __getstate__
return op_context->pack();
Reported by Cppcheck.
aten/src/ATen/native/cpu/UnaryOpsKernel.cpp
1 issues
Line: 468
Column: 25
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(generator->mutex_);
seed = generator->random();
}
int64_t n = self.numel();
bool contig = self.is_contiguous();
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Bool, self.scalar_type(), "bernoulli_scalar_cpu_", [&] {
Reported by FlawFinder.
aten/src/ATen/native/cpu/LinearAlgebraKernel.cpp
1 issues
Line: 94
CWE codes:
908
TORCH_CHECK(false, "linalg.vector_norm expects ord to be float");
}
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
acc_t init_val = (ord_val == -INFINITY) ? std::numeric_limits<acc_t>::infinity() : static_cast<acc_t>(0);
if (iter.numel() == 0) {
iter.output().fill_((ord_val < 0) ? INFINITY : 0);
return;
}
if (ord_val == 0) {
Reported by Cppcheck.
aten/src/ATen/native/cpu/BinaryOpsKernel.cpp
1 issues
Line: 172
using vec_t = Vectorized<scalar_t>;
cpu_kernel_vec(iter,
[](scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
if (C10_UNLIKELY(b == 0)) {
// Divide by zero: return standard IEEE result
return a / b;
}
auto mod = std::fmod(a, b);
Reported by Cppcheck.
aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_unpack.cpp
1 issues
Line: 71
}
};
TORCH_LIBRARY_IMPL(sparse, QuantizedCPU, m) {
m.impl(
TORCH_SELECTIVE_NAME("sparse::qlinear_unpack"),
TORCH_FN(QLinearUnpackWeightInt8::run));
}
} // namespace
Reported by Cppcheck.
aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp
1 issues
Line: 230
}
};
TORCH_LIBRARY_IMPL(sparse, QuantizedCPU, m) {
m.impl(
TORCH_SELECTIVE_NAME("sparse::qlinear_prepack"),
TORCH_FN(QLinearPackWeightInt8::run));
}
} // namespace
Reported by Cppcheck.
aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_dynamic.cpp
1 issues
Line: 180
}
};
TORCH_LIBRARY_IMPL(sparse, CPU, m) {
m.impl(
TORCH_SELECTIVE_NAME("sparse::qlinear_dynamic"),
TORCH_FN(QLinearDynamicInt8<false>::run));
m.impl(
TORCH_SELECTIVE_NAME("sparse::qlinear_relu_dynamic"),
Reported by Cppcheck.
aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear.cpp
1 issues
Line: 240
}
};
TORCH_LIBRARY_IMPL(sparse, QuantizedCPU, m) {
m.impl(
TORCH_SELECTIVE_NAME("sparse::qlinear"),
TORCH_FN(QLinearInt8<false>::run));
m.impl(
TORCH_SELECTIVE_NAME("sparse::qlinear_relu"),
Reported by Cppcheck.