The following issues were found
test/typing/fail/creation_ops.py
3 issues
Line: 2
Column: 1
# flake8: noqa
import torch
torch.tensor([3], dtype='int32') # E: expected "Optional[dtype]"
torch.ones(3, dtype='int32') # E: No overload variant of "ones" matches argument types "int", "str"
torch.zeros(3, dtype='int32') # E: No overload variant of "zeros" matches argument types "int", "str"
Reported by Pylint.
Line: 1
Column: 1
# flake8: noqa
import torch
torch.tensor([3], dtype='int32') # E: expected "Optional[dtype]"
torch.ones(3, dtype='int32') # E: No overload variant of "ones" matches argument types "int", "str"
torch.zeros(3, dtype='int32') # E: No overload variant of "zeros" matches argument types "int", "str"
Reported by Pylint.
Line: 6
Column: 1
torch.tensor([3], dtype='int32') # E: expected "Optional[dtype]"
torch.ones(3, dtype='int32') # E: No overload variant of "ones" matches argument types "int", "str"
torch.zeros(3, dtype='int32') # E: No overload variant of "zeros" matches argument types "int", "str"
Reported by Pylint.
test/typing/fail/bitwise_ops.py
3 issues
Line: 2
Column: 1
# flake8: noqa
import torch
# binary ops: <<, >>, |, &, ~, ^
a = torch.ones(3, dtype=torch.float64)
i = int()
i | a # E: Unsupported operand types
Reported by Pylint.
Line: 9
Column: 1
a = torch.ones(3, dtype=torch.float64)
i = int()
i | a # E: Unsupported operand types
Reported by Pylint.
Line: 1
Column: 1
# flake8: noqa
import torch
# binary ops: <<, >>, |, &, ~, ^
a = torch.ones(3, dtype=torch.float64)
i = int()
i | a # E: Unsupported operand types
Reported by Pylint.
torch/ao/nn/sparse/quantized/__init__.py
3 issues
Line: 3
Column: 1
from torch.ao.nn.sparse.quantized import dynamic
from .linear import Linear
from .linear import LinearPackedParams
__all__ = [
"dynamic",
"Linear",
"LinearPackedParams",
Reported by Pylint.
Line: 4
Column: 1
from torch.ao.nn.sparse.quantized import dynamic
from .linear import Linear
from .linear import LinearPackedParams
__all__ = [
"dynamic",
"Linear",
"LinearPackedParams",
Reported by Pylint.
Line: 1
Column: 1
from torch.ao.nn.sparse.quantized import dynamic
from .linear import Linear
from .linear import LinearPackedParams
__all__ = [
"dynamic",
"Linear",
"LinearPackedParams",
Reported by Pylint.
torch/ao/sparsity/_mappings.py
3 issues
Line: 1
Column: 1
import torch
def get_static_sparse_quantized_mapping():
_static_sparse_quantized_mapping = dict({
torch.nn.Linear: torch.ao.nn.sparse.quantized.Linear,
})
return _static_sparse_quantized_mapping
def get_dynamic_sparse_quantized_mapping():
Reported by Pylint.
Line: 3
Column: 1
import torch
def get_static_sparse_quantized_mapping():
_static_sparse_quantized_mapping = dict({
torch.nn.Linear: torch.ao.nn.sparse.quantized.Linear,
})
return _static_sparse_quantized_mapping
def get_dynamic_sparse_quantized_mapping():
Reported by Pylint.
Line: 9
Column: 1
})
return _static_sparse_quantized_mapping
def get_dynamic_sparse_quantized_mapping():
_dynamic_sparse_quantized_mapping = dict({
torch.nn.Linear: torch.ao.nn.sparse.quantized.dynamic.Linear,
})
return _dynamic_sparse_quantized_mapping
Reported by Pylint.
torch/backends/mkl/__init__.py
3 issues
Line: 6
Column: 12
def is_available():
r"""Returns whether PyTorch is built with MKL support."""
return torch._C.has_mkl
Reported by Pylint.
Line: 1
Column: 1
import torch
def is_available():
r"""Returns whether PyTorch is built with MKL support."""
return torch._C.has_mkl
Reported by Pylint.
Line: 6
Column: 12
def is_available():
r"""Returns whether PyTorch is built with MKL support."""
return torch._C.has_mkl
Reported by Pylint.
torch/backends/openmp/__init__.py
3 issues
Line: 6
Column: 12
def is_available():
r"""Returns whether PyTorch is built with OpenMP support."""
return torch._C.has_openmp
Reported by Pylint.
Line: 1
Column: 1
import torch
def is_available():
r"""Returns whether PyTorch is built with OpenMP support."""
return torch._C.has_openmp
Reported by Pylint.
Line: 6
Column: 12
def is_available():
r"""Returns whether PyTorch is built with OpenMP support."""
return torch._C.has_openmp
Reported by Pylint.
torch/csrc/Exceptions.cpp
3 issues
Line: 134
Column: 3
CWE codes:
134
Suggestion:
Use a constant for the format specification
static const size_t ERROR_BUF_SIZE = 1024;
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
char error_buf[ERROR_BUF_SIZE];
vsnprintf(error_buf, ERROR_BUF_SIZE, format, fmt_args);
// Ensure that the string is null terminated
error_buf[sizeof(error_buf) / sizeof(*error_buf) - 1] = 0;
return std::string(error_buf);
Reported by FlawFinder.
Line: 25
Column: 21
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
namespace torch {
static bool compute_cpp_stack_traces_enabled() {
auto envar = std::getenv("TORCH_SHOW_CPP_STACKTRACES");
if (envar) {
if (strcmp(envar, "0") == 0) {
return false;
}
if (strcmp(envar, "1") == 0) {
Reported by FlawFinder.
Line: 133
Column: 3
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
static std::string formatMessage(const char *format, va_list fmt_args) {
static const size_t ERROR_BUF_SIZE = 1024;
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
char error_buf[ERROR_BUF_SIZE];
vsnprintf(error_buf, ERROR_BUF_SIZE, format, fmt_args);
// Ensure that the string is null terminated
error_buf[sizeof(error_buf) / sizeof(*error_buf) - 1] = 0;
Reported by FlawFinder.
torch/csrc/api/include/torch/nn/functional/activation.h
3 issues
Line: 627
Column: 16
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
Tensor q, k, v;
if (!use_separate_proj_weight) {
if (torch::equal(query, key) && torch::equal(key, value)) {
// self-attention
const auto chunks =
F::linear(query, in_proj_weight, in_proj_bias).chunk(3, /*dim=*/-1);
q = chunks[0];
k = chunks[1];
Reported by FlawFinder.
Line: 627
Column: 44
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
Tensor q, k, v;
if (!use_separate_proj_weight) {
if (torch::equal(query, key) && torch::equal(key, value)) {
// self-attention
const auto chunks =
F::linear(query, in_proj_weight, in_proj_bias).chunk(3, /*dim=*/-1);
q = chunks[0];
k = chunks[1];
Reported by FlawFinder.
Line: 634
Column: 23
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
q = chunks[0];
k = chunks[1];
v = chunks[2];
} else if (torch::equal(key, value)) {
// encoder-decoder attention
// This is inline in_proj function with in_proj_weight and in_proj_bias
auto _b = in_proj_bias;
auto _start = 0;
auto _end = embed_dim;
Reported by FlawFinder.
torch/csrc/api/src/data/datasets/mnist.cpp
3 issues
Line: 42
Column: 20
CWE codes:
120
20
static const bool is_little_endian = check_is_little_endian();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
uint32_t value;
AT_ASSERT(stream.read(reinterpret_cast<char*>(&value), sizeof value));
return is_little_endian ? flip_endianness(value) : value;
}
uint32_t expect_int32(std::ifstream& stream, uint32_t expected) {
const auto value = read_int32(stream);
Reported by FlawFinder.
Line: 79
Column: 10
CWE codes:
120
20
auto tensor =
torch::empty({count, 1, kImageRows, kImageColumns}, torch::kByte);
images.read(reinterpret_cast<char*>(tensor.data_ptr()), tensor.numel());
return tensor.to(torch::kFloat32).div_(255);
}
Tensor read_targets(const std::string& root, bool train) {
const auto path =
Reported by FlawFinder.
Line: 95
Column: 11
CWE codes:
120
20
expect_int32(targets, count);
auto tensor = torch::empty(count, torch::kByte);
targets.read(reinterpret_cast<char*>(tensor.data_ptr()), count);
return tensor.to(torch::kInt64);
}
} // namespace
MNIST::MNIST(const std::string& root, Mode mode)
Reported by FlawFinder.
torch/csrc/api/src/data/samplers/distributed.cpp
3 issues
Line: 84
Column: 11
CWE codes:
120
20
void DistributedRandomSampler::load(serialize::InputArchive& archive) {
auto tensor = torch::empty(1, torch::kInt64);
archive.read("epoch_", tensor, /*is_buffer=*/true);
epoch_ = tensor.item<int64_t>();
// call reset() after loading epoch_ to populate indices.
reset(size_);
tensor = torch::empty(1, torch::kInt64);
Reported by FlawFinder.
Line: 90
Column: 11
CWE codes:
120
20
reset(size_);
tensor = torch::empty(1, torch::kInt64);
archive.read("sample_index_", tensor, /*is_buffer=*/true);
sample_index_ = tensor.item<int64_t>();
}
size_t DistributedRandomSampler::index() const noexcept {
return sample_index_;
Reported by FlawFinder.
Line: 158
Column: 11
CWE codes:
120
20
void DistributedSequentialSampler::load(serialize::InputArchive& archive) {
auto tensor = torch::empty(1, torch::kInt64);
archive.read("sample_index_", tensor, /*is_buffer=*/true);
sample_index_ = tensor.item<int64_t>();
}
size_t DistributedSequentialSampler::index() const noexcept {
return sample_index_;
Reported by FlawFinder.