The following issues were found
test/typing/pass/math_ops.py
8 issues
Line: 2
Column: 1
# flake8: noqa
import torch
import math
a = torch.randn(4)
b = torch.randn(4)
t = torch.tensor([-1, -2, 3], dtype=torch.int8)
# abs/absolute
Reported by Pylint.
Line: 31
Column: 1
torch.addcmul(torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1)
# angle
torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
# asin/arcsin
torch.asin(a)
torch.arcsin(a)
Reported by Pylint.
Line: 152
Column: 1
torch.frac(torch.tensor([1, 2.5, -3.2]))
# imag
torch.randn(4, dtype=torch.cfloat).imag
# ldexp
torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
Reported by Pylint.
Line: 251
Column: 1
# nextafter
eps = torch.finfo(torch.float32).eps
torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps])
# polygamma
torch.polygamma(1, torch.tensor([1, 0.5]))
torch.polygamma(2, torch.tensor([1, 0.5]))
torch.polygamma(3, torch.tensor([1, 0.5]))
Reported by Pylint.
Line: 267
Column: 1
torch.rad2deg(torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]]))
# real
torch.randn(4, dtype=torch.cfloat).real
# reciprocal
torch.reciprocal(a)
# remainder
Reported by Pylint.
Line: 1
Column: 1
# flake8: noqa
import torch
import math
a = torch.randn(4)
b = torch.randn(4)
t = torch.tensor([-1, -2, 3], dtype=torch.int8)
# abs/absolute
Reported by Pylint.
Line: 3
Column: 1
# flake8: noqa
import torch
import math
a = torch.randn(4)
b = torch.randn(4)
t = torch.tensor([-1, -2, 3], dtype=torch.int8)
# abs/absolute
Reported by Pylint.
Line: 202
Column: 1
torch.logical_not(torch.tensor([True, False]))
torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
# logical_or
torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
torch.logical_or(r, s)
torch.logical_or(r.double(), s.double())
Reported by Pylint.
test/package/package_a/fake_script_class.py
8 issues
Line: 1
Column: 1
import torch
@torch.jit.script
class MyScriptClass:
"""Intended to be scripted."""
def __init__(self, x):
self.foo = x
Reported by Pylint.
Line: 1
Column: 1
import torch
@torch.jit.script
class MyScriptClass:
"""Intended to be scripted."""
def __init__(self, x):
self.foo = x
Reported by Pylint.
Line: 5
Column: 1
@torch.jit.script
class MyScriptClass:
"""Intended to be scripted."""
def __init__(self, x):
self.foo = x
Reported by Pylint.
Line: 9
Column: 9
"""Intended to be scripted."""
def __init__(self, x):
self.foo = x
def set_foo(self, x):
self.foo = x
Reported by Pylint.
Line: 11
Column: 5
def __init__(self, x):
self.foo = x
def set_foo(self, x):
self.foo = x
@torch.jit.script
def uses_script_class(x):
Reported by Pylint.
Line: 11
Column: 5
def __init__(self, x):
self.foo = x
def set_foo(self, x):
self.foo = x
@torch.jit.script
def uses_script_class(x):
Reported by Pylint.
Line: 16
Column: 1
@torch.jit.script
def uses_script_class(x):
"""Intended to be scripted."""
foo = MyScriptClass(x)
return foo.foo
Reported by Pylint.
Line: 18
Column: 5
@torch.jit.script
def uses_script_class(x):
"""Intended to be scripted."""
foo = MyScriptClass(x)
return foo.foo
Reported by Pylint.
torch/backends/xnnpack/__init__.py
8 issues
Line: 1
Column: 1
import sys
import torch
import types
class _XNNPACKEnabled(object):
def __get__(self, obj, objtype):
return torch._C._is_xnnpack_enabled()
def __set__(self, obj, val):
Reported by Pylint.
Line: 3
Column: 1
import sys
import torch
import types
class _XNNPACKEnabled(object):
def __get__(self, obj, objtype):
return torch._C._is_xnnpack_enabled()
def __set__(self, obj, val):
Reported by Pylint.
Line: 5
Column: 1
import torch
import types
class _XNNPACKEnabled(object):
def __get__(self, obj, objtype):
return torch._C._is_xnnpack_enabled()
def __set__(self, obj, val):
raise RuntimeError("Assignment not supported")
Reported by Pylint.
Line: 12
Column: 1
def __set__(self, obj, val):
raise RuntimeError("Assignment not supported")
class XNNPACKEngine(types.ModuleType):
def __init__(self, m, name):
super(XNNPACKEngine, self).__init__(name)
self.m = m
def __getattr__(self, attr):
Reported by Pylint.
Line: 12
Column: 1
def __set__(self, obj, val):
raise RuntimeError("Assignment not supported")
class XNNPACKEngine(types.ModuleType):
def __init__(self, m, name):
super(XNNPACKEngine, self).__init__(name)
self.m = m
def __getattr__(self, attr):
Reported by Pylint.
Line: 14
Column: 9
class XNNPACKEngine(types.ModuleType):
def __init__(self, m, name):
super(XNNPACKEngine, self).__init__(name)
self.m = m
def __getattr__(self, attr):
return self.m.__getattribute__(attr)
Reported by Pylint.
Line: 15
Column: 9
class XNNPACKEngine(types.ModuleType):
def __init__(self, m, name):
super(XNNPACKEngine, self).__init__(name)
self.m = m
def __getattr__(self, attr):
return self.m.__getattribute__(attr)
enabled = _XNNPACKEnabled()
Reported by Pylint.
Line: 7
Column: 16
class _XNNPACKEnabled(object):
def __get__(self, obj, objtype):
return torch._C._is_xnnpack_enabled()
def __set__(self, obj, val):
raise RuntimeError("Assignment not supported")
class XNNPACKEngine(types.ModuleType):
Reported by Pylint.
test/test_public_bindings.py
8 issues
Line: 1
Column: 1
from torch.testing._internal.common_utils import run_tests
import torch
import unittest
class TestPublicBindings(unittest.TestCase):
def test_no_new_bindings(self):
"""
This test aims to stop the introduction of new JIT bindings into torch._C
Reported by Pylint.
Line: 3
Column: 1
from torch.testing._internal.common_utils import run_tests
import torch
import unittest
class TestPublicBindings(unittest.TestCase):
def test_no_new_bindings(self):
"""
This test aims to stop the introduction of new JIT bindings into torch._C
Reported by Pylint.
Line: 260
Column: 50
"wait",
}
torch_C_bindings = {elem for elem in dir(torch._C) if not elem.startswith("_")}
# Check that the torch._C bindings are all in the allowlist. Since
# bindings can change based on how PyTorch was compiled (e.g. with/without
# CUDA), the two may not be an exact match but the bindings should be
# a subset of the allowlist.
Reported by Pylint.
Line: 1
Column: 1
from torch.testing._internal.common_utils import run_tests
import torch
import unittest
class TestPublicBindings(unittest.TestCase):
def test_no_new_bindings(self):
"""
This test aims to stop the introduction of new JIT bindings into torch._C
Reported by Pylint.
Line: 4
Column: 1
from torch.testing._internal.common_utils import run_tests
import torch
import unittest
class TestPublicBindings(unittest.TestCase):
def test_no_new_bindings(self):
"""
This test aims to stop the introduction of new JIT bindings into torch._C
Reported by Pylint.
Line: 6
Column: 1
import torch
import unittest
class TestPublicBindings(unittest.TestCase):
def test_no_new_bindings(self):
"""
This test aims to stop the introduction of new JIT bindings into torch._C
whose names do not start with _. Such bindings are made available as
torch.XXX, which may not be desirable.
Reported by Pylint.
Line: 25
Column: 9
#
# {elem for elem in dir(torch._C) if not elem.startswith("_")}
#
torch_C_allowlist_superset = {
"AggregationType",
"AliasDb",
"AnyType",
"Argument",
"ArgumentSpec",
Reported by Pylint.
Line: 260
Column: 9
"wait",
}
torch_C_bindings = {elem for elem in dir(torch._C) if not elem.startswith("_")}
# Check that the torch._C bindings are all in the allowlist. Since
# bindings can change based on how PyTorch was compiled (e.g. with/without
# CUDA), the two may not be an exact match but the bindings should be
# a subset of the allowlist.
Reported by Pylint.
torch/csrc/deploy/example/fx/examples.py
8 issues
Line: 1
Column: 1
import torch.fx
try:
from .some_dependency import a_non_torch_leaf
except ImportError:
from some_dependency import a_non_torch_leaf
torch.fx.wrap('a_non_torch_leaf')
class SimpleWithLeaf(torch.nn.Module):
Reported by Pylint.
Line: 14
Column: 23
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
output = self.weight + a_non_torch_leaf(1, input)
return output
Reported by Pylint.
Line: 1
Column: 1
import torch.fx
try:
from .some_dependency import a_non_torch_leaf
except ImportError:
from some_dependency import a_non_torch_leaf
torch.fx.wrap('a_non_torch_leaf')
class SimpleWithLeaf(torch.nn.Module):
Reported by Pylint.
Line: 9
Column: 1
torch.fx.wrap('a_non_torch_leaf')
class SimpleWithLeaf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
Reported by Pylint.
Line: 9
Column: 1
torch.fx.wrap('a_non_torch_leaf')
class SimpleWithLeaf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
Reported by Pylint.
Line: 10
Column: 5
torch.fx.wrap('a_non_torch_leaf')
class SimpleWithLeaf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
output = self.weight + a_non_torch_leaf(1, input)
Reported by Pylint.
Line: 10
Column: 5
torch.fx.wrap('a_non_torch_leaf')
class SimpleWithLeaf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
output = self.weight + a_non_torch_leaf(1, input)
Reported by Pylint.
Line: 14
Column: 5
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
output = self.weight + a_non_torch_leaf(1, input)
return output
Reported by Pylint.
torch/distributions/chi2.py
8 issues
Line: 5
Column: 1
from torch.distributions.gamma import Gamma
class Chi2(Gamma):
r"""
Creates a Chi2 distribution parameterized by shape parameter :attr:`df`.
This is exactly equivalent to ``Gamma(alpha=0.5*df, beta=0.5)``
Example::
Reported by Pylint.
Line: 5
Column: 1
from torch.distributions.gamma import Gamma
class Chi2(Gamma):
r"""
Creates a Chi2 distribution parameterized by shape parameter :attr:`df`.
This is exactly equivalent to ``Gamma(alpha=0.5*df, beta=0.5)``
Example::
Reported by Pylint.
Line: 5
Column: 1
from torch.distributions.gamma import Gamma
class Chi2(Gamma):
r"""
Creates a Chi2 distribution parameterized by shape parameter :attr:`df`.
This is exactly equivalent to ``Gamma(alpha=0.5*df, beta=0.5)``
Example::
Reported by Pylint.
Line: 1
Column: 1
from torch.distributions import constraints
from torch.distributions.gamma import Gamma
class Chi2(Gamma):
r"""
Creates a Chi2 distribution parameterized by shape parameter :attr:`df`.
This is exactly equivalent to ``Gamma(alpha=0.5*df, beta=0.5)``
Reported by Pylint.
Line: 22
Column: 9
arg_constraints = {'df': constraints.positive}
def __init__(self, df, validate_args=None):
super(Chi2, self).__init__(0.5 * df, 0.5, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Chi2, _instance)
return super(Chi2, self).expand(batch_shape, new)
Reported by Pylint.
Line: 26
Column: 16
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Chi2, _instance)
return super(Chi2, self).expand(batch_shape, new)
@property
def df(self):
return self.concentration * 2
Reported by Pylint.
Line: 29
Column: 5
return super(Chi2, self).expand(batch_shape, new)
@property
def df(self):
return self.concentration * 2
Reported by Pylint.
Line: 29
Column: 5
return super(Chi2, self).expand(batch_shape, new)
@property
def df(self):
return self.concentration * 2
Reported by Pylint.
torch/fx/experimental/fx2trt/converters/linear.py
8 issues
Line: 2
Column: 1
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import mark_as_int8_layer, to_numpy, get_dyn_range
def common_linear(network, mod, input_val, layer_name, is_quantized):
"""
TensorRT fully connected layer implicitly flatten last three dimensions at
Reported by Pylint.
Line: 5
Column: 1
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import mark_as_int8_layer, to_numpy, get_dyn_range
def common_linear(network, mod, input_val, layer_name, is_quantized):
"""
TensorRT fully connected layer implicitly flatten last three dimensions at
the start and implicitly reshape the result to (K, 1, 1) at the end.
Reported by Pylint.
Line: 39
Column: 62
layer.name = f"{layer_name}_linear"
if is_quantized:
dyn_range = get_dyn_range(mod.scale, mod.zero_point, torch.quint8)
mark_as_int8_layer(layer, dyn_range)
# reshape the output from (*, K, 1, 1) to (*, K)
layer = network.add_shuffle(layer.get_output(0))
layer.reshape_dims = tuple(input_val.shape[:-1]) + (mod.out_features,)
Reported by Pylint.
Line: 67
Column: 45
@tensorrt_converter(torch.nn.quantized.modules.linear.Linear)
def quantized_linear(network, submod, args, kwargs, layer_name):
input_val = args[0]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f"Quantized Linear received input {input_val} that is not part "
"of the TensorRT region!")
Reported by Pylint.
Line: 1
Column: 1
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import mark_as_int8_layer, to_numpy, get_dyn_range
def common_linear(network, mod, input_val, layer_name, is_quantized):
"""
TensorRT fully connected layer implicitly flatten last three dimensions at
Reported by Pylint.
Line: 54
Column: 1
@tensorrt_converter(torch.nn.modules.linear.Linear)
def linear(network, submod, args, kwargs, layer_name):
# args/kwargs should have already been normalized to kwargs
assert len(args) == 0
input_val = kwargs["input"]
if not isinstance(input_val, trt.tensorrt.ITensor):
Reported by Pylint.
Line: 56
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
@tensorrt_converter(torch.nn.modules.linear.Linear)
def linear(network, submod, args, kwargs, layer_name):
# args/kwargs should have already been normalized to kwargs
assert len(args) == 0
input_val = kwargs["input"]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f"Linear received input {input_val} that is not part "
"of the TensorRT region!")
Reported by Bandit.
Line: 67
Column: 1
@tensorrt_converter(torch.nn.quantized.modules.linear.Linear)
def quantized_linear(network, submod, args, kwargs, layer_name):
input_val = args[0]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f"Quantized Linear received input {input_val} that is not part "
"of the TensorRT region!")
Reported by Pylint.
torch/_utils_internal.py
8 issues
Line: 37
Column: 41
def prepare_multiprocessing_environment(path: str) -> None:
pass
def resolve_library_path(path: str) -> str:
return os.path.realpath(path)
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import tempfile
# this arbitrary-looking assortment of functionality is provided here
# to have a central place for overrideable behavior. The motivating
# use is the FB build environment, where this source file is replaced
# by an equivalent.
Reported by Pylint.
Line: 15
Column: 5
# __file__ is meaningless in the context of frozen torch used in torch deploy.
# setting empty torch_parent should allow below functions to operate without crashing,
# but it's unclear if there is a valid use case for them in the context of deploy.
torch_parent = ""
else:
if os.path.basename(os.path.dirname(__file__)) == 'shared':
torch_parent = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
else:
torch_parent = os.path.dirname(os.path.dirname(__file__))
Reported by Pylint.
Line: 22
Column: 1
else:
torch_parent = os.path.dirname(os.path.dirname(__file__))
def get_file_path(*path_components: str) -> str:
return os.path.join(torch_parent, *path_components)
def get_file_path_2(*path_components: str) -> str:
return os.path.join(*path_components)
Reported by Pylint.
Line: 26
Column: 1
return os.path.join(torch_parent, *path_components)
def get_file_path_2(*path_components: str) -> str:
return os.path.join(*path_components)
def get_writable_path(path: str) -> str:
if os.access(path, os.W_OK):
Reported by Pylint.
Line: 30
Column: 1
return os.path.join(*path_components)
def get_writable_path(path: str) -> str:
if os.access(path, os.W_OK):
return path
return tempfile.mkdtemp(suffix=os.path.basename(path))
Reported by Pylint.
Line: 37
Column: 1
def prepare_multiprocessing_environment(path: str) -> None:
pass
def resolve_library_path(path: str) -> str:
return os.path.realpath(path)
Reported by Pylint.
Line: 41
Column: 1
pass
def resolve_library_path(path: str) -> str:
return os.path.realpath(path)
TEST_MASTER_ADDR = '127.0.0.1'
TEST_MASTER_PORT = 29500
Reported by Pylint.
test/package/test_analyze.py
8 issues
Line: 1
Column: 1
import torch
from torch.package import analyze
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch.package import analyze
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
Reported by Pylint.
Line: 3
Column: 1
import torch
from torch.package import analyze
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
Reported by Pylint.
Line: 16
Column: 9
"""Dependency analysis API tests."""
def test_trace_dependencies(self):
import test_trace_dep
obj = test_trace_dep.SumMod()
used_modules = analyze.trace_dependencies(obj, [(torch.randn(4),)])
Reported by Pylint.
Line: 1
Column: 1
import torch
from torch.package import analyze
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
Reported by Pylint.
Line: 12
Column: 1
from common import PackageTestCase
class TestAnalyze(PackageTestCase):
"""Dependency analysis API tests."""
def test_trace_dependencies(self):
import test_trace_dep
Reported by Pylint.
Line: 15
Column: 5
class TestAnalyze(PackageTestCase):
"""Dependency analysis API tests."""
def test_trace_dependencies(self):
import test_trace_dep
obj = test_trace_dep.SumMod()
used_modules = analyze.trace_dependencies(obj, [(torch.randn(4),)])
Reported by Pylint.
Line: 16
Column: 9
"""Dependency analysis API tests."""
def test_trace_dependencies(self):
import test_trace_dep
obj = test_trace_dep.SumMod()
used_modules = analyze.trace_dependencies(obj, [(torch.randn(4),)])
Reported by Pylint.
torch/_six.py
8 issues
Line: 36
Column: 32
# the actual metaclass.
class metaclass(meta): # type: ignore[misc, valid-type]
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
Reported by Pylint.
Line: 40
Column: 36
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2010-2017 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
Reported by Pylint.
Line: 24
Column: 1
import math
import sys
inf = math.inf
nan = math.nan
string_classes = (str, bytes)
PY37 = sys.version_info[0] == 3 and sys.version_info[1] >= 7
def with_metaclass(meta: type, *bases) -> type:
Reported by Pylint.
Line: 25
Column: 1
import sys
inf = math.inf
nan = math.nan
string_classes = (str, bytes)
PY37 = sys.version_info[0] == 3 and sys.version_info[1] >= 7
def with_metaclass(meta: type, *bases) -> type:
"""Create a base class with a metaclass."""
Reported by Pylint.
Line: 34
Column: 5
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta): # type: ignore[misc, valid-type]
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
Reported by Pylint.
Line: 34
Column: 5
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta): # type: ignore[misc, valid-type]
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
Reported by Pylint.
Line: 36
Column: 9
# the actual metaclass.
class metaclass(meta): # type: ignore[misc, valid-type]
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
Reported by Pylint.