The following issues were found
aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp
1 issues
Line: 239
}
};
TORCH_LIBRARY_IMPL(quantized, CPU, m) {
m.impl(
TORCH_SELECTIVE_NAME("quantized::embedding_bag_byte_unpack"),
qembeddingbag_byte_unpack);
m.impl(
TORCH_SELECTIVE_NAME("quantized::embedding_bag_4bit_unpack"),
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qembeddingbag_prepack.cpp
1 issues
Line: 460
}
};
TORCH_LIBRARY_IMPL(quantized, CPU, m) {
m.impl(
TORCH_SELECTIVE_NAME("quantized::embedding_bag_byte_prepack"),
TORCH_FN(qembeddingbag_byte_prepack));
m.impl(
TORCH_SELECTIVE_NAME("quantized::embedding_bag_4bit_prepack"),
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qelu.cpp
1 issues
Line: 26
return quantized_elu(qx, output_scale, output_zero_point, alpha, Scalar(1.0), Scalar(inv_alpha));
}
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
m.impl(TORCH_SELECTIVE_NAME("quantized::elu"), quantized_elu);
m.impl(TORCH_SELECTIVE_NAME("quantized::celu"), quantized_celu);
}
}} // namespace at::native
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qconv_unpack.cpp
1 issues
Line: 250
};
TORCH_LIBRARY_IMPL(quantized, CatchAll, m) {
// conv_unpack is deprecated, please use conv2d_unpack for 2D conv.
m.impl(TORCH_SELECTIVE_NAME("quantized::conv_unpack"), TORCH_FN(QConvUnpackWeightsInt8<2>::run));
// We use conv2d_unpack to be consistent with conv3d_unpack
m.impl(TORCH_SELECTIVE_NAME("quantized::conv1d_unpack"), TORCH_FN(QConv1dUnpackWeightsInt8::run));
m.impl(TORCH_SELECTIVE_NAME("quantized::conv2d_unpack"), TORCH_FN(QConvUnpackWeightsInt8<2>::run));
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp
1 issues
Line: 456
}
};
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
// Conv
// conv_prepack is deprecated, please use conv2d_prepack for 2D conv.
m.impl(TORCH_SELECTIVE_NAME("quantized::conv_prepack"), TORCH_FN(QConvPackWeightInt8<2>::run_conv));
m.impl(TORCH_SELECTIVE_NAME("quantized::conv1d_prepack"), TORCH_FN(QConv1dPackWeightInt8::run_conv));
m.impl(TORCH_SELECTIVE_NAME("quantized::conv2d_prepack"), TORCH_FN(QConvPackWeightInt8<2>::run_conv));
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qconv.cpp
1 issues
Line: 883
}
};
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
m.impl(TORCH_SELECTIVE_NAME("quantized::conv1d"), QConv1dInt8<false>::run);
m.impl(TORCH_SELECTIVE_NAME("quantized::conv1d_relu"), QConv1dInt8<true>::run);
m.impl(TORCH_SELECTIVE_NAME("quantized::conv2d.new"), QConvInt8<2, false>::run);
m.impl(TORCH_SELECTIVE_NAME("quantized::conv2d_relu.new"), QConvInt8<2, true>::run);
m.impl(TORCH_SELECTIVE_NAME("quantized::conv3d.new"), QConvInt8<3, false>::run);
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qclamp.cpp
1 issues
Line: 159
return self;
}
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
m.impl(TORCH_SELECTIVE_NAME("quantized::clamp"), TORCH_FN(clamp_quantized_cpu));
}
} // namespace native
} // namespace at
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qbatch_norm.cpp
1 issues
Line: 395
return qy;
}
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
m.impl(TORCH_SELECTIVE_NAME("quantized::batch_norm"), TORCH_FN(q_batch_norm_impl<false>));
m.impl(TORCH_SELECTIVE_NAME("quantized::batch_norm_relu"), TORCH_FN(q_batch_norm_impl<true>));
m.impl(TORCH_SELECTIVE_NAME("quantized::batch_norm1d"), TORCH_FN(q_batch_norm1d_impl<false>));
m.impl(TORCH_SELECTIVE_NAME("quantized::batch_norm1d_relu"), TORCH_FN(q_batch_norm1d_impl<true>));
m.impl(TORCH_SELECTIVE_NAME("quantized::batch_norm2d"), TORCH_FN(q_batch_norm2d_impl<false>));
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/qadd.cpp
1 issues
Line: 284
return qadd_scalar_out(qa, b.item(), out);
}
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
m.impl(TORCH_SELECTIVE_NAME("quantized::add"), TORCH_FN(qadd</*ReLUFused=*/false>));
m.impl(TORCH_SELECTIVE_NAME("quantized::add.out"), TORCH_FN(qadd_out</*ReLUFused=*/false>));
m.impl(TORCH_SELECTIVE_NAME("quantized::add.Scalar"), TORCH_FN(qadd_scalar</*ReLUFused=*/false>));
m.impl(TORCH_SELECTIVE_NAME("quantized::add.Scalar2"), TORCH_FN(qadd_scalar2</*ReLUFused=*/false>));
m.impl(TORCH_SELECTIVE_NAME("quantized::add.Scalar_out"), TORCH_FN(qadd_scalar_out</*ReLUFused=*/false>));
Reported by Cppcheck.
aten/src/ATen/native/quantized/cpu/make_per_tensor_quantized_tensor.cpp
1 issues
Line: 26
Column: 11
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
underlying_t* dst_data =
reinterpret_cast<underlying_t*>(dst.data_ptr<scalar_t>());
if (self.numel() > 0) {
memcpy(dst_data, self_data, self.nbytes());
}
});
return dst;
}
Reported by FlawFinder.