The following issues were found
aten/src/ATen/native/quantized/cpu/qupsample_bilinear2d.cpp
1 issues
Line: 38
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
// special case: just copy
if (input_height == output_height && input_width == output_width) {
std::memcpy(
o_p,
i_p,
channels * input_height * input_width *
sizeof(typename scalar_t::underlying));
return;
Reported by FlawFinder.
aten/src/ATen/native/quantized/cpu/qthreshold.cpp
1 issues
Line: 37
return qy;
}
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
m.impl(TORCH_SELECTIVE_NAME("quantized::threshold"), TORCH_FN(threshold_quantized_cpu));
}
} // namespace native
} // namespace at
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qsigmoid.cpp
1 issues
Line: 133
}
};
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
m.impl(TORCH_SELECTIVE_NAME("quantized::sigmoid"), TORCH_FN(QSigmoid::run));
}
} // namespace
}} // namespace at::native
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qrelu.cpp
1 issues
Line: 191
}
};
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
m.impl(TORCH_SELECTIVE_NAME("quantized::relu6"), TORCH_FN(QRelu6::run));
m.impl(TORCH_SELECTIVE_NAME("quantized::leaky_relu"), TORCH_FN(QLeakyRelu::run));
}
} // namespace
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qpool.cpp
1 issues
Line: 455
}
};
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
m.impl(TORCH_SELECTIVE_NAME("quantized::max_pool1d"), TORCH_FN(QMaxPool_arr_args<1>::run));
m.impl(TORCH_SELECTIVE_NAME("quantized::max_pool2d"), TORCH_FN(QMaxPool_arr_args<2>::run));
}
} // namespace
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qnormalization.cpp
1 issues
Line: 119
}
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
// TODO: this is kind of... blegh
m.impl(TORCH_SELECTIVE_NAME("quantized::layer_norm"), [](
Tensor input,
std::vector<int64_t> normalized_shape, // because IntArrayRef doesn't work
c10::optional<Tensor> weight,
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qnnpack/src/u8rmax/sse2.c
1 issues
Line: 18
uint8_t pytorch_u8rmax_ukernel__sse2(size_t n, const uint8_t* x) {
assert(n != 0);
if
PYTORCH_QNNP_LIKELY(n >= 16) {
__m128i vmax = _mm_setzero_si128();
do {
const __m128i vx = _mm_loadu_si128((const __m128i*)x);
x += 16;
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qnnpack/src/u8rmax/neon.c
1 issues
Line: 18
uint8_t pytorch_u8rmax_ukernel__neon(size_t n, const uint8_t* x) {
assert(n != 0);
if
PYTORCH_QNNP_LIKELY(n >= 16) {
uint8x16_t vmax = vmovq_n_u8(0);
do {
const uint8x16_t vx = vld1q_u8(x);
x += 16;
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qnnpack/src/u8clamp/sse2.c
1 issues
Line: 22
const union pytorch_qnnp_u8_clamping_params params[RESTRICT_STATIC 1]) {
assert(n != 0);
if
PYTORCH_QNNP_LIKELY(n >= 8) {
const __m128i voutput_max =
_mm_load_si128((const __m128i*)¶ms->sse2.output_max);
const __m128i voutput_min =
_mm_load_si128((const __m128i*)¶ms->sse2.output_min);
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qnnpack/src/u8clamp/neon.c
1 issues
Line: 25
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
if
PYTORCH_QNNP_LIKELY(n >= 8) {
for (; n >= 64; n -= 64) {
const uint8x16_t vx0 = vld1q_u8(x);
x += 16;
const uint8x16_t vx1 = vld1q_u8(x);
Reported by Cppcheck.