The following issues were found

torch/ao/__init__.py
1 issues
Missing module docstring
Error

Line: 1 Column: 1

              from torch.ao import nn
from torch.ao import sparsity

            

Reported by Pylint.

torch/csrc/jit/tensorexpr/types.cpp
1 issues
There is an unknown macro here somewhere. Configuration is required. If AT_FORALL_SCALAR_TYPES_AND2 is a macro then please configure it.
Error

Line: 19

              // NOLINTNEXTLINE
#define DTYPE_DEFINE(_1, n) TORCH_API Dtype k##n(ScalarType::n, 1);

AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, DTYPE_DEFINE)

#undef DTYPE_DEFINE

TORCH_API Dtype kHandle(ScalarType::Undefined, 1);


            

Reported by Cppcheck.

torch/csrc/jit/codegen/cuda/compute_at.cpp
1 issues
mismatch - Function does not check the second iterator for over-read conditions
Security

Line: 81 Column: 7 CWE codes: 126
Suggestion: This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it

                auto mismatch = BestEffortReplay::findFirstMismatchedID(
      tv_ref_->domain(), original_domain_);
  TORCH_CHECK(
      mismatch >= (int)original_compute_at_position,
      "Invalid computeAt detected. This computeAt call would invalidate the set computeAt on ",
      tv_ref_,
      " as the previous set computeAt was on the domain ",
      original_domain_,
      " with a computeAt position of ",

            

Reported by FlawFinder.

torch/csrc/MemoryFormat.h
1 issues
char - Statically-sized arrays can be improperly restricted, leading to potential overflows or other issues
Security

Line: 15 Column: 3 CWE codes: 119 120
Suggestion: Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length

                PyObject_HEAD
  at::MemoryFormat memory_format;
  // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
  char name[MEMORY_FORMAT_NAME_LEN + 1];
};

extern PyTypeObject THPMemoryFormatType;

inline bool THPMemoryFormat_Check(PyObject *obj) {

            

Reported by FlawFinder.

tools/autograd/templates/python_variable_methods.cpp
1 issues
There is an unknown macro here somewhere. Configuration is required. If HANDLE_TH_ERRORS is a macro then please configure it.
Error

Line: 1055

              
static PyObject * THPVariable_bool_scalar(PyObject* self, PyObject* args) {
  if (check_has_torch_function(self)) {
    HANDLE_TH_ERRORS
    return handle_torch_function(self, "__bool__", args);
    END_HANDLE_TH_ERRORS
  }
  jit::tracer::warn("Converting a tensor to a Python boolean", jit::tracer::WARN_PYTHON_DATAFLOW);
  return THPVariable_is_nonzero(self, args);

            

Reported by Cppcheck.

third_party/miniz-2.0.8/examples/example1.c
1 issues
strlen - Does not handle strings that are not \0-terminated; if given one it may perform an over-read (it could cause a crash if unprotected)
Security

Line: 43 Column: 26 CWE codes: 126

              
  uint step = 0;
  int cmp_status;
  uLong src_len = (uLong)strlen(s_pStr);
  uLong cmp_len = compressBound(src_len);
  uLong uncomp_len = src_len;
  uint8 *pCmp, *pUncomp;
  uint total_succeeded = 0;
  (void)argc, (void)argv;

            

Reported by FlawFinder.

torch/csrc/generic/Storage.cpp
1 issues
There is an unknown macro here somewhere. Configuration is required. If HANDLE_TH_ERRORS is a macro then please configure it.
Error

Line: 140

              
static Py_ssize_t THPStorage_(length)(THPStorage *self)
{
  HANDLE_TH_ERRORS
  return self->cdata->nbytes() / sizeof(scalar_t);
  END_HANDLE_TH_ERRORS_RET(-1)
}

static PyObject * THPStorage_(get)(THPStorage *self, PyObject *index)

            

Reported by Cppcheck.

torch/csrc/utils/tensor_types.cpp
1 issues
mismatch - Function does not check the second iterator for over-read conditions
Security

Line: 58 Column: 12 CWE codes: 126
Suggestion: This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it

                  return getDeprecatedTypeProperties(backend, scalar_type).options();
  }

  if (std::mismatch(cuda_prefix.begin(), cuda_prefix.end(), str.begin()).first == cuda_prefix.end()) {
    // torch.cuda. is prefix of str
    std::call_once(cuda_once, []() {
      for (auto type : autograd::VariableType::allCUDATypes()) {
        cuda_map.emplace(type_to_string(*type), type);
      }

            

Reported by FlawFinder.

torch/csrc/distributed/rpc/tensorpipe_utils.cpp
1 issues
memcpy - Does not check for buffer overflows when copying to destination
Security

Line: 290 Column: 5 CWE codes: 120
Suggestion: Make sure destination can always hold the source data

                    return 0;
    }
    size_t toCopy = std::min(picklePos + n, pickleLen) - picklePos;
    memcpy(buf, pickleData + picklePos, toCopy);
    picklePos += toCopy;
    return toCopy;
  };
  auto tensorReadFunc = [&](const std::string& ename) -> at::DataPtr {
    unsigned long index = std::stoul(ename);

            

Reported by FlawFinder.

torch/csrc/MemoryFormat.cpp
1 issues
strncpy - Easily used incorrectly; doesn't always \0-terminate or check for invalid pointers [MS-banned]
Security

Line: 20 Column: 8 CWE codes: 120

                if (!self) throw python_error();
  auto self_ = reinterpret_cast<THPMemoryFormat*>(self.get());
  self_->memory_format = memory_format;
  std::strncpy (self_->name, name.c_str(), MEMORY_FORMAT_NAME_LEN);
  self_->name[MEMORY_FORMAT_NAME_LEN] = '\0';
  return self.release();
}

PyObject *THPMemoryFormat_repr(THPMemoryFormat *self)

            

Reported by FlawFinder.