The following issues were found

torch/csrc/Generator.cpp
1 issues
There is an unknown macro here somewhere. Configuration is required. If HANDLE_TH_ERRORS is a macro then please configure it.
Error

Line: 163

              }

static PyObject * THPGenerator_get_device(THPGenerator *self, void *unused) {
  HANDLE_TH_ERRORS
  return THPDevice_New(self->cdata.device());
  END_HANDLE_TH_ERRORS
}

// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables)

            

Reported by Cppcheck.

torch/csrc/cuda/python_nccl.cpp
1 issues
memcpy - Does not check for buffer overflows when copying to destination
Security

Line: 112 Column: 3 CWE codes: 120
Suggestion: Make sure destination can always hold the source data

                    id_len);

  ncclUniqueId commId;
  memcpy(&commId, id, NCCL_UNIQUE_ID_BYTES);
  ncclComm_t comm;
  {
    pybind11::gil_scoped_release no_gil;
    comm = comm_init_rank(nranks, commId, rank);
  }

            

Reported by FlawFinder.

torch/csrc/cuda/nccl.h
1 issues
char - Statically-sized arrays can be improperly restricted, leading to potential overflows or other issues
Security

Line: 24 Column: 18 CWE codes: 119 120
Suggestion: Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length

              /** redefine nccl unique ID in torch scope. this should be identical to native nccl impp. */
#define NCCL_UNIQUE_ID_BYTES 128
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
typedef struct { char internal[NCCL_UNIQUE_ID_BYTES]; } ncclUniqueId;

/* Error type */
enum class ncclResult {
    Success                 =  0,
    UnhandledCudaError      =  1,

            

Reported by FlawFinder.

torch/csrc/cuda/Stream.cpp
1 issues
There is an unknown macro here somewhere. Configuration is required. If HANDLE_TH_ERRORS is a macro then please configure it.
Error

Line: 66

              }

static PyObject * THCPStream_get_device(THCPStream *self, void *unused) {
  HANDLE_TH_ERRORS
  return THPDevice_New(self->cuda_stream.device());
  END_HANDLE_TH_ERRORS
}

static PyObject * THCPStream_get_cuda_stream(THCPStream *self, void *unused) {

            

Reported by Cppcheck.

torch/csrc/cuda/Module.cpp
1 issues
There is an unknown macro here somewhere. Configuration is required. If HANDLE_TH_ERRORS is a macro then please configure it.
Error

Line: 136

              }

static PyObject * THCPModule_isInBadFork(PyObject *self, PyObject *noargs) {
  HANDLE_TH_ERRORS
  return PyBool_FromLong(in_bad_fork);
  END_HANDLE_TH_ERRORS
}

PyObject * THCPModule_getCurrentStream_wrap(

            

Reported by Cppcheck.

torch/csrc/Exceptions.h
1 issues
printf - If format strings can be influenced by an attacker, they can be exploited
Security

Line: 261 Column: 28 CWE codes: 134
Suggestion: Use a constant for the format specification

              // The compiler can then warn on invalid format specifiers
#ifdef __GNUC__
#  define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) \
    __attribute__((format (printf, FORMAT_INDEX, VA_ARGS_INDEX)))
#else
#  define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX)
#endif

// Translates to Python IndexError

            

Reported by FlawFinder.

torch/csrc/autograd/record_function_ops.cpp
1 issues
syntax error
Error

Line: 69

              }

// Internal only, do not use directly, use Python's record_function()
TORCH_LIBRARY_FRAGMENT(profiler, m) {
    m.def("_record_function_enter", &record_function_enter);
    m.def("_record_function_exit", &record_function_exit);
}

// Needed to register JIT operator in operator registry below

            

Reported by Cppcheck.

torch/csrc/Dtype.h
1 issues
char - Statically-sized arrays can be improperly restricted, leading to potential overflows or other issues
Security

Line: 13 Column: 3 CWE codes: 119 120
Suggestion: Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length

                PyObject_HEAD
  at::ScalarType scalar_type;
  // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
  char name[DTYPE_NAME_LEN + 1];
};

TORCH_API extern PyTypeObject THPDtypeType;

inline bool THPDtype_Check(PyObject *obj) {

            

Reported by FlawFinder.

torch/csrc/autograd/python_function.cpp
1 issues
There is an unknown macro here somewhere. Configuration is required. If HANDLE_TH_ERRORS is a macro then please configure it.
Error

Line: 741

              
PyObject *THPFunction_saved_tensors(THPFunction *self, void *_unused)
{
  HANDLE_TH_ERRORS
  return unpack_saved_variables(self, [](const Variable& var) {
    return THPVariable_Wrap(var);
  });
  END_HANDLE_TH_ERRORS
}

            

Reported by Cppcheck.

torch/csrc/autograd/init.cpp
1 issues
There is an unknown macro here somewhere. Configuration is required. If HANDLE_TH_ERRORS is a macro then please configure it.
Error

Line: 411

              }

static PyObject * autocast_increment_nesting(PyObject* _unused, PyObject *arg) {
  HANDLE_TH_ERRORS
  return THPUtils_packInt64(at::autocast::increment_nesting());
  END_HANDLE_TH_ERRORS
}

static PyObject * autocast_decrement_nesting(PyObject* _unused, PyObject *arg) {

            

Reported by Cppcheck.