The following issues were found
torch/jit/_async.py
5 issues
Line: 84
Column: 12
mod = Mod()
assert mod(input) == torch.jit.script(mod).forward(input)
"""
return torch._C.fork(func, *args, **kwargs)
def wait(future):
r"""
Forces completion of a `torch.jit.Future[T]` asynchronous task, returning the
Reported by Pylint.
Line: 96
Column: 12
Returns:
`T`: the return value of the the completed task
"""
return torch._C.wait(future)
_register_builtin(wait, "aten::wait")
Reported by Pylint.
Line: 25
Column: 1
of the result of this execution. `fork` will return immediately,
so the return value of `func` may not have been computed yet. To force completion
of the task and access the return value invoke `torch.jit.wait` on the Future. `fork` invoked
with a `func` which returns `T` is typed as `torch.jit.Future[T]`. `fork` calls can be arbitrarily
nested, and may be invoked with positional and keyword arguments.
Asynchronous execution will only occur when run in TorchScript. If run in pure python,
`fork` will not execute in parallel. `fork` will also not execute in parallel when invoked
while tracing, however the `fork` and `wait` calls will be captured in the exported IR Graph.
Reported by Pylint.
Line: 84
Column: 12
mod = Mod()
assert mod(input) == torch.jit.script(mod).forward(input)
"""
return torch._C.fork(func, *args, **kwargs)
def wait(future):
r"""
Forces completion of a `torch.jit.Future[T]` asynchronous task, returning the
Reported by Pylint.
Line: 96
Column: 12
Returns:
`T`: the return value of the the completed task
"""
return torch._C.wait(future)
_register_builtin(wait, "aten::wait")
Reported by Pylint.
torch/utils/data/__init__.py
5 issues
Line: 1
Column: 3
# TODO(VitalyFedyunin): Rearranging this imports leads to crash,
# need to cleanup dependencies and fix it
from torch.utils.data.sampler import (
BatchSampler,
RandomSampler,
Sampler,
SequentialSampler,
SubsetRandomSampler,
WeightedRandomSampler,
Reported by Pylint.
Line: 11
Column: 1
SubsetRandomSampler,
WeightedRandomSampler,
)
from torch.utils.data.dataset import (
ChainDataset,
ConcatDataset,
Dataset,
Dataset as MapDataPipe,
DataChunk,
Reported by Pylint.
Line: 11
Column: 1
SubsetRandomSampler,
WeightedRandomSampler,
)
from torch.utils.data.dataset import (
ChainDataset,
ConcatDataset,
Dataset,
Dataset as MapDataPipe,
DataChunk,
Reported by Pylint.
Line: 1
Column: 1
# TODO(VitalyFedyunin): Rearranging this imports leads to crash,
# need to cleanup dependencies and fix it
from torch.utils.data.sampler import (
BatchSampler,
RandomSampler,
Sampler,
SequentialSampler,
SubsetRandomSampler,
WeightedRandomSampler,
Reported by Pylint.
Line: 65
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
'runtime_validation_disabled']
# Please keep this list sorted
assert __all__ == sorted(__all__)
################################################################################
# import subpackage
################################################################################
Reported by Bandit.
torch/nn/intrinsic/qat/modules/linear_relu.py
5 issues
Line: 6
Column: 32
import torch.nn.intrinsic as nni
import torch.nn.functional as F
class LinearReLU(nnqat.Linear, nni._FusedModule):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for weight, used in
quantization aware training.
Reported by Pylint.
Line: 6
Column: 32
import torch.nn.intrinsic as nni
import torch.nn.functional as F
class LinearReLU(nnqat.Linear, nni._FusedModule):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for weight, used in
quantization aware training.
Reported by Pylint.
Line: 34
Column: 23
qconfig=None):
super(LinearReLU, self).__init__(in_features, out_features, bias, qconfig)
def forward(self, input):
return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias))
@classmethod
def from_float(cls, mod):
return super(LinearReLU, cls).from_float(mod)
Reported by Pylint.
Line: 1
Column: 1
import torch
import torch.nn.qat as nnqat
import torch.nn.intrinsic as nni
import torch.nn.functional as F
class LinearReLU(nnqat.Linear, nni._FusedModule):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for weight, used in
Reported by Pylint.
Line: 32
Column: 9
def __init__(self, in_features, out_features, bias=True,
qconfig=None):
super(LinearReLU, self).__init__(in_features, out_features, bias, qconfig)
def forward(self, input):
return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias))
@classmethod
Reported by Pylint.
torch/package/analyze/trace_dependencies.py
5 issues
Line: 6
Column: 5
def trace_dependencies(
callable: Callable[[Any], Any], inputs: Iterable[Tuple[Any, ...]]
) -> List[str]:
"""Trace the execution of a callable in order to determine which modules it uses.
Args:
callable: The callable to execute and trace.
Reported by Pylint.
Line: 19
Column: 43
"""
modules_used = set()
def record_used_modules(frame, event, arg):
# If the event being profiled is not a Python function
# call, there is nothing to do.
if event != "call":
return
Reported by Pylint.
Line: 1
Column: 1
import sys
from typing import Any, Callable, Iterable, List, Tuple
def trace_dependencies(
callable: Callable[[Any], Any], inputs: Iterable[Tuple[Any, ...]]
) -> List[str]:
"""Trace the execution of a callable in order to determine which modules it uses.
Reported by Pylint.
Line: 12
Column: 1
Args:
callable: The callable to execute and trace.
inputs: The input to use during tracing. The modules used by 'callable' when invoked by each set of inputs
are union-ed to determine all modules used by the callable for the purpooses of packaging.
Returns: A list of the names of all modules used during callable execution.
"""
modules_used = set()
Reported by Pylint.
Line: 13
Column: 1
Args:
callable: The callable to execute and trace.
inputs: The input to use during tracing. The modules used by 'callable' when invoked by each set of inputs
are union-ed to determine all modules used by the callable for the purpooses of packaging.
Returns: A list of the names of all modules used during callable execution.
"""
modules_used = set()
Reported by Pylint.
torch/package/glob_group.py
5 issues
Line: 1
Column: 1
import re
from typing import Iterable, Union
GlobPattern = Union[str, Iterable[str]]
class GlobGroup:
"""A set of patterns that candidate strings will be matched against.
Reported by Pylint.
Line: 19
Column: 1
- A double wildcard ("**"). This matches against zero or more complete segments.
Examples:
``torch.**``: matches ``torch`` and all its submodules, e.g. ``torch.nn`` and ``torch.nn.functional``.
``torch.*``: matches ``torch.nn`` or ``torch.functional``, but not ``torch.nn.functional``.
``torch*.**``: matches ``torch``, ``torchvision``, and all their submodules.
A candidates will match the ``GlobGroup`` if it matches any of the ``include`` patterns and
none of the ``exclude`` patterns.
Reported by Pylint.
Line: 53
Column: 5
def __repr__(self):
return self._dbg
def matches(self, candidate: str) -> bool:
candidate = self.separator + candidate
return any(p.fullmatch(candidate) for p in self.include) and all(
not p.fullmatch(candidate) for p in self.exclude
)
Reported by Pylint.
Line: 61
Column: 9
@staticmethod
def _glob_list(elems: GlobPattern, separator: str = "."):
if isinstance(elems, str):
return [GlobGroup._glob_to_re(elems, separator)]
else:
return [GlobGroup._glob_to_re(e, separator) for e in elems]
@staticmethod
Reported by Pylint.
Line: 72
Column: 17
# with '.' so `import torch` will regex against `.torch`, assuming '.' is the separator
def component_to_re(component):
if "**" in component:
if component == "**":
return "(" + re.escape(separator) + "[^" + separator + "]+)*"
else:
raise ValueError("** can only appear as an entire path segment")
else:
return re.escape(separator) + ("[^" + separator + "]*").join(
Reported by Pylint.
torch/utils/__init__.py
5 issues
Line: 4
Column: 1
import os.path as _osp
import sys
from .throughput_benchmark import ThroughputBenchmark
from ._crash_handler import enable_minidumps, disable_minidumps, enable_minidumps_on_exceptions
# Set the module for a given object for nicer printing
def set_module(obj, mod):
if not isinstance(mod, str):
Reported by Pylint.
Line: 5
Column: 1
import sys
from .throughput_benchmark import ThroughputBenchmark
from ._crash_handler import enable_minidumps, disable_minidumps, enable_minidumps_on_exceptions
# Set the module for a given object for nicer printing
def set_module(obj, mod):
if not isinstance(mod, str):
raise TypeError("The mod argument should be a string")
Reported by Pylint.
Line: 1
Column: 1
import os.path as _osp
import sys
from .throughput_benchmark import ThroughputBenchmark
from ._crash_handler import enable_minidumps, disable_minidumps, enable_minidumps_on_exceptions
# Set the module for a given object for nicer printing
def set_module(obj, mod):
if not isinstance(mod, str):
Reported by Pylint.
Line: 8
Column: 1
from ._crash_handler import enable_minidumps, disable_minidumps, enable_minidumps_on_exceptions
# Set the module for a given object for nicer printing
def set_module(obj, mod):
if not isinstance(mod, str):
raise TypeError("The mod argument should be a string")
obj.__module__ = mod
if sys.executable == "torch_deploy":
Reported by Pylint.
Line: 15
Column: 5
if sys.executable == "torch_deploy":
# not valid inside torch_deploy interpreter, no paths exists for frozen modules
cmake_prefix_path = None
else:
cmake_prefix_path = _osp.join(_osp.dirname(_osp.dirname(__file__)), 'share', 'cmake')
Reported by Pylint.
caffe2/python/onnx/tests/helper_test.py
5 issues
Line: 17
Column: 13
class TestCaffe2Basic(TestCase):
def test_dummy_name(self):
g = C.DummyName()
g.reset()
names_1 = [g.new_dummy_name() for _ in range(3)]
g.reset()
names_2 = [g.new_dummy_name() for _ in range(3)]
self.assertEqual(names_1, names_2)
Reported by Pylint.
Line: 1
Column: 1
## @package onnx
# Module caffe2.python.onnx.tests.helper_test
import unittest
Reported by Pylint.
Line: 15
Column: 1
import caffe2.python._import_c_extension as C
class TestCaffe2Basic(TestCase):
def test_dummy_name(self):
g = C.DummyName()
g.reset()
names_1 = [g.new_dummy_name() for _ in range(3)]
g.reset()
Reported by Pylint.
Line: 16
Column: 5
class TestCaffe2Basic(TestCase):
def test_dummy_name(self):
g = C.DummyName()
g.reset()
names_1 = [g.new_dummy_name() for _ in range(3)]
g.reset()
names_2 = [g.new_dummy_name() for _ in range(3)]
Reported by Pylint.
Line: 17
Column: 9
class TestCaffe2Basic(TestCase):
def test_dummy_name(self):
g = C.DummyName()
g.reset()
names_1 = [g.new_dummy_name() for _ in range(3)]
g.reset()
names_2 = [g.new_dummy_name() for _ in range(3)]
self.assertEqual(names_1, names_2)
Reported by Pylint.
test/cpp/jit/test_module_api.cpp
5 issues
Line: 48
si.loadType(QualifiedName(class_name));
}
TEST(ModuleAPITest, MethodRunAsync) {
// Module m("m");
// m.define(R"(
// def forward(self):
// r1 = torch.jit.fork(torch.mm, torch.rand(100,100),torch.rand(100,100))
// r2 = torch.jit.fork(torch.mm, torch.rand(100,100),torch.rand(100,100))
Reported by Cppcheck.
Line: 236
Column: 18
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
at::Tensor t3 =
m3.attr(tensor_attr).toTensor(); // copy will not copy the Tensor
// check copy works
ASSERT_TRUE(t1.equal(t2));
ASSERT_TRUE(t1.equal(t3));
// zero out t1
t1.zero_();
// check that t2 is not affected because it is a deep copy
Reported by FlawFinder.
Line: 237
Column: 18
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
m3.attr(tensor_attr).toTensor(); // copy will not copy the Tensor
// check copy works
ASSERT_TRUE(t1.equal(t2));
ASSERT_TRUE(t1.equal(t3));
// zero out t1
t1.zero_();
// check that t2 is not affected because it is a deep copy
ASSERT_TRUE(!t1.equal(t2));
Reported by FlawFinder.
Line: 242
Column: 19
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
// zero out t1
t1.zero_();
// check that t2 is not affected because it is a deep copy
ASSERT_TRUE(!t1.equal(t2));
// check that t3 is the same as t1 since it is a shallow copy
ASSERT_TRUE(t1.equal(t3));
}
TEST(ModuleAPITest, DeepCopyString) {
Reported by FlawFinder.
Line: 244
Column: 18
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
// check that t2 is not affected because it is a deep copy
ASSERT_TRUE(!t1.equal(t2));
// check that t3 is the same as t1 since it is a shallow copy
ASSERT_TRUE(t1.equal(t3));
}
TEST(ModuleAPITest, DeepCopyString) {
auto cu = std::make_shared<CompilationUnit>();
auto cls = ClassType::create("foo.bar", cu, true);
Reported by FlawFinder.
test/cpp/jit/torch_python_test.cpp
5 issues
Line: 20
Column: 13
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
bool isSandcastle() {
return (
(std::getenv("SANDCASTLE")) ||
(std::getenv("TW_JOB_USER") &&
std::string(std::getenv("TW_JOB_USER")) == "sandcastle"));
}
void testEvalModeForLoadedModule() {
Reported by FlawFinder.
Line: 21
Column: 13
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
bool isSandcastle() {
return (
(std::getenv("SANDCASTLE")) ||
(std::getenv("TW_JOB_USER") &&
std::string(std::getenv("TW_JOB_USER")) == "sandcastle"));
}
void testEvalModeForLoadedModule() {
if (isSandcastle())
Reported by FlawFinder.
Line: 22
Column: 25
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
return (
(std::getenv("SANDCASTLE")) ||
(std::getenv("TW_JOB_USER") &&
std::string(std::getenv("TW_JOB_USER")) == "sandcastle"));
}
void testEvalModeForLoadedModule() {
if (isSandcastle())
return; // The module file to load is not generated in Sandcastle
Reported by FlawFinder.
Line: 54
Column: 18
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto elements = ivalue.toTuple()->elements();
auto ones = torch::ones({2, 2});
AT_ASSERT(ones.equal(elements.at(0).toTensor()));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto twos = torch::ones({3, 5}) * 2;
AT_ASSERT(twos.equal(elements.at(1).toTensor()));
}
Reported by FlawFinder.
Line: 58
Column: 18
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto twos = torch::ones({3, 5}) * 2;
AT_ASSERT(twos.equal(elements.at(1).toTensor()));
}
void testTorchSaveError() {
if (isSandcastle()) {
// The file to load is not generated in Sandcastle
Reported by FlawFinder.
test/cpp_extensions/setup.py
5 issues
Line: 2
Column: 1
import sys
import torch.cuda
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
if sys.platform == 'win32':
vc_version = os.getenv('VCToolsVersion', '')
Reported by Pylint.
Line: 5
Column: 1
import torch.cuda
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
if sys.platform == 'win32':
vc_version = os.getenv('VCToolsVersion', '')
if vc_version.startswith('14.16.'):
Reported by Pylint.
Line: 6
Column: 1
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
if sys.platform == 'win32':
vc_version = os.getenv('VCToolsVersion', '')
if vc_version.startswith('14.16.'):
CXX_FLAGS = ['/sdl']
Reported by Pylint.
Line: 1
Column: 1
import sys
import torch.cuda
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
if sys.platform == 'win32':
vc_version = os.getenv('VCToolsVersion', '')
Reported by Pylint.
Line: 3
Column: 1
import sys
import torch.cuda
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
if sys.platform == 'win32':
vc_version = os.getenv('VCToolsVersion', '')
Reported by Pylint.