The following issues were found

aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/AlignedAllocator.h
1 issues
memalign - On some systems (though not Linux-based systems) an attempt to free() results from memalign() may fail. This may, on a few systems, be exploitable. Also note that memalign() may not check that the boundary parameter is correct
Security

Line: 76 Column: 20 CWE codes: 676
Suggestion: Use posix_memalign instead (defined in POSIX's 1003.1d). Don't switch to valloc(); it is marked as obsolete in BSD 4.3, as legacy in SUSv2, and is no longer defined in SUSv3. In some cases, malloc()'s alignment may be sufficient

                    size_type n,
      typename AlignedAllocator<void, Alignment>::const_pointer hint = 0) {
#if defined(__ANDROID__)
    void* memory = memalign(Alignment, n * sizeof(T));
    if (memory == 0) {
#if !defined(__GNUC__) || defined(__EXCEPTIONS)
      throw std::bad_alloc();
#endif
    }

            

Reported by FlawFinder.

aten/src/ATen/native/quantized/cpu/qnnpack/src/q8vadd/sse2.c
1 issues
syntax error
Error

Line: 22

                  uint8_t* y,
    const union pytorch_qnnp_add_quantization_params
        quantization_params[RESTRICT_STATIC 1]) {
  if
    PYTORCH_QNNP_LIKELY(n >= 8) {
      const __m128i vzero_point_product = _mm_load_si128(
          (const __m128i*)&quantization_params->sse2.zero_point_product);
      const __m128i va_multiplier_lo = _mm_load_si128(
          (const __m128i*)&quantization_params->sse2.a_multiplier_lo);

            

Reported by Cppcheck.

aten/src/ATen/native/quantized/cpu/qnnpack/src/q8vadd/neon.c
1 issues
syntax error
Error

Line: 37

                    vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
  const uint8x16_t vy_max = vld1q_dup_u8(&quantization_params->neon.y_max);
  const uint8x16_t vy_min = vld1q_dup_u8(&quantization_params->neon.y_min);
  if
    PYTORCH_QNNP_LIKELY(n >= 8) {
#ifdef __aarch64__
      for (; n >= 32; n -= 32) {
        const uint8x16_t va01 = vld1q_u8(a);
        a += 16;

            

Reported by Cppcheck.

aten/src/ATen/native/quantized/cpu/qnnpack/src/pack_block_sparse.cc
1 issues
syntax error
Error

Line: 35

                    bool block_zero{true};
      for (uint32_t ib = 0; ib < row_block_size; ++ib) {
        uint32_t row_index = i * row_block_size + ib;
        if PYTORCH_QNNP_UNLIKELY(row_index >= N) {
          break;
        }
        for (uint32_t jb = 0; jb < col_block_size; ++jb) {
          uint32_t col_index = j * col_block_size + jb;
          if PYTORCH_QNNP_UNLIKELY(col_index >= K) {

            

Reported by Cppcheck.

aten/src/ATen/native/quantized/cpu/qmul.cpp
1 issues
syntax error
Error

Line: 190

                }
};

TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
  m.impl(TORCH_SELECTIVE_NAME("quantized::mul"),                 TORCH_FN(QMul</*ReLUFused=*/false>::run));
  m.impl(TORCH_SELECTIVE_NAME("quantized::mul.out"),             TORCH_FN(QMulOut</*ReLUFused=*/false>::run));
  m.impl(TORCH_SELECTIVE_NAME("quantized::mul.Scalar"),          TORCH_FN(QMulScalar</*ReLUFused=*/false>::run));
  m.impl(TORCH_SELECTIVE_NAME("quantized::mul.Scalar2"),          TORCH_FN(QMulScalar2</*ReLUFused=*/false>::run));
  m.impl(TORCH_SELECTIVE_NAME("quantized::mul.Scalar_out"),      TORCH_FN(QMulScalarOut</*ReLUFused=*/false>::run));

            

Reported by Cppcheck.

aten/src/ATen/native/quantized/cpu/qlinear_unpack.cpp
1 issues
syntax error
Error

Line: 139

                }
};

TORCH_LIBRARY_IMPL(quantized, CPU, m) {
  m.impl(TORCH_SELECTIVE_NAME("quantized::linear_unpack.legacy"), TORCH_FN(QLinearUnpackWeightInt8Legacy::run));
  m.impl(TORCH_SELECTIVE_NAME("quantized::linear_unpack_fp16.legacy"), TORCH_FN(QLinearUnpackWeightFp16Legacy::run));
}

TORCH_LIBRARY_IMPL(quantized, CatchAll, m) {

            

Reported by Cppcheck.

aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp
1 issues
syntax error
Error

Line: 324

                }
};

TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
  m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack"), TORCH_FN(QLinearPackWeightInt8::run));
  m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack_legacy"), TORCH_FN(QLinearPackWeightInt8Legacy::run));
}

TORCH_LIBRARY_IMPL(quantized, CPU, m) {

            

Reported by Cppcheck.

aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp
1 issues
syntax error
Error

Line: 464

              #endif // USE_FBGEMM
};

TORCH_LIBRARY_IMPL(quantized, CPU, m) {
  m.impl(TORCH_SELECTIVE_NAME("quantized::linear_dynamic"), TORCH_FN(QLinearDynamicInt8<false>::run));
  m.impl(TORCH_SELECTIVE_NAME("quantized::linear_relu_dynamic"), TORCH_FN(QLinearDynamicInt8<true>::run));
  m.impl(TORCH_SELECTIVE_NAME("quantized::linear_dynamic_fp16"), TORCH_FN(QLinearDynamicFp16<false>::run));
}


            

Reported by Cppcheck.

aten/src/ATen/native/quantized/cpu/qlinear.cpp
1 issues
syntax error
Error

Line: 442

                }
};

TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
  m.impl(TORCH_SELECTIVE_NAME("quantized::linear"), TORCH_FN(QLinearInt8<false>::run));
  m.impl(TORCH_SELECTIVE_NAME("quantized::linear_relu"), TORCH_FN(QLinearInt8<true>::run));
}

TORCH_LIBRARY_IMPL(_quantized, QuantizedCPU, m) {

            

Reported by Cppcheck.

aten/src/ATen/native/quantized/cpu/qhardswish.cpp
1 issues
syntax error
Error

Line: 91

                return qy;
}

TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
  m.impl(TORCH_SELECTIVE_NAME("quantized::hardswish"), TORCH_FN(quantized_hardswish));
}

}}  // namespace at::native

            

Reported by Cppcheck.