The following issues were found

torch/testing/_internal/jit_metaprogramming_utils.py
195 issues
Module 'torch' has no 'double' member
Error

Line: 22 Column: 25

              from torch._six import inf

# TODO: include files like this should not set the default dtype
torch.set_default_dtype(torch.double)

L = 20
M = 10
S = 5


            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 60 Column: 22

                  ('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')),
    ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),
    ('max_pool3d', (S, S, S, S, S), (2, 1)),
    ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
    ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
    ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
    ('lp_pool1d', (S, S, S), (2., 3, 2,)),
    ('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
    ('adaptive_max_pool1d', (S, S, S), (5,)),

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 60 Column: 50

                  ('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')),
    ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),
    ('max_pool3d', (S, S, S, S, S), (2, 1)),
    ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
    ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
    ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
    ('lp_pool1d', (S, S, S), (2., 3, 2,)),
    ('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
    ('adaptive_max_pool1d', (S, S, S), (5,)),

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 61 Column: 22

                  ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),
    ('max_pool3d', (S, S, S, S, S), (2, 1)),
    ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
    ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
    ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
    ('lp_pool1d', (S, S, S), (2., 3, 2,)),
    ('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
    ('adaptive_max_pool1d', (S, S, S), (5,)),
    ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 61 Column: 52

                  ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),
    ('max_pool3d', (S, S, S, S, S), (2, 1)),
    ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
    ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
    ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
    ('lp_pool1d', (S, S, S), (2., 3, 2,)),
    ('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
    ('adaptive_max_pool1d', (S, S, S), (5,)),
    ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 62 Column: 22

                  ('max_pool3d', (S, S, S, S, S), (2, 1)),
    ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
    ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
    ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
    ('lp_pool1d', (S, S, S), (2., 3, 2,)),
    ('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
    ('adaptive_max_pool1d', (S, S, S), (5,)),
    ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),
    ('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)),

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 62 Column: 54

                  ('max_pool3d', (S, S, S, S, S), (2, 1)),
    ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
    ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
    ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
    ('lp_pool1d', (S, S, S), (2., 3, 2,)),
    ('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
    ('adaptive_max_pool1d', (S, S, S), (5,)),
    ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),
    ('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)),

            

Reported by Pylint.

Module 'torch' has no 'double' member
Error

Line: 103 Column: 35

                  ('softplus', (S, S, S), (),),
    ('softmin', (S, S, S), (0,),),
    ('softmax', (S, S, S), (0,), '', (True,)),
    ('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)),
    ('tanh', (S, S, S), (), '', (True,)),
    ('sigmoid', (S, S, S), (), '', (True,)),
    ('log_softmax', (S, S, S), (0,), '', (True,)),
    ('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])),
    ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])),

            

Reported by Pylint.

Module 'torch' has no 'zeros' member
Error

Line: 109 Column: 41

                  ('log_softmax', (S, S, S), (0,), '', (True,)),
    ('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])),
    ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])),
    ('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),),
    ('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)),
    ('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),),
    ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), ),
        '', (False, 'aten::_batch_norm_impl_index')),
    ('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),),

            

Reported by Pylint.

Module 'torch' has no 'rand' member
Error

Line: 110 Column: 64

                  ('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])),
    ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])),
    ('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),),
    ('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)),
    ('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),),
    ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), ),
        '', (False, 'aten::_batch_norm_impl_index')),
    ('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),),
    ('layer_norm', (S, S, S, S), ([5],), '',

            

Reported by Pylint.

test/test_mobile_optimizer.py
194 issues
Unable to import 'torch'
Error

Line: 2 Column: 1

              import unittest
import torch
import torch.nn as nn
import torch.backends.xnnpack
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
                                          generate_mobile_module_lints,

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 3 Column: 1

              import unittest
import torch
import torch.nn as nn
import torch.backends.xnnpack
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
                                          generate_mobile_module_lints,

            

Reported by Pylint.

Unable to import 'torch.backends.xnnpack'
Error

Line: 4 Column: 1

              import unittest
import torch
import torch.nn as nn
import torch.backends.xnnpack
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
                                          generate_mobile_module_lints,

            

Reported by Pylint.

Unable to import 'torch.utils.bundled_inputs'
Error

Line: 5 Column: 1

              import torch
import torch.nn as nn
import torch.backends.xnnpack
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
                                          generate_mobile_module_lints,
                                          optimize_for_mobile)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 6 Column: 1

              import torch.nn as nn
import torch.backends.xnnpack
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
                                          generate_mobile_module_lints,
                                          optimize_for_mobile)
from torch.nn import functional as F

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 7 Column: 1

              import torch.backends.xnnpack
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
                                          generate_mobile_module_lints,
                                          optimize_for_mobile)
from torch.nn import functional as F
from torch._C import MobileOptimizerType

            

Reported by Pylint.

Unable to import 'torch.utils.mobile_optimizer'
Error

Line: 8 Column: 1

              import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
                                          generate_mobile_module_lints,
                                          optimize_for_mobile)
from torch.nn import functional as F
from torch._C import MobileOptimizerType
from torch.testing._internal.common_quantized import override_quantized_engine

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 11 Column: 1

              from torch.utils.mobile_optimizer import (LintCode,
                                          generate_mobile_module_lints,
                                          optimize_for_mobile)
from torch.nn import functional as F
from torch._C import MobileOptimizerType
from torch.testing._internal.common_quantized import override_quantized_engine

try:
    import torchvision

            

Reported by Pylint.

Unable to import 'torch._C'
Error

Line: 12 Column: 1

                                                        generate_mobile_module_lints,
                                          optimize_for_mobile)
from torch.nn import functional as F
from torch._C import MobileOptimizerType
from torch.testing._internal.common_quantized import override_quantized_engine

try:
    import torchvision
    HAS_TORCHVISION = True

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_quantized'
Error

Line: 13 Column: 1

                                                        optimize_for_mobile)
from torch.nn import functional as F
from torch._C import MobileOptimizerType
from torch.testing._internal.common_quantized import override_quantized_engine

try:
    import torchvision
    HAS_TORCHVISION = True
except ImportError:

            

Reported by Pylint.

caffe2/python/data_parallel_model_test.py
193 issues
Unable to import 'mock'
Error

Line: 13 Column: 1

              import tempfile
import unittest
import time
from mock import Mock
from hypothesis import assume, given, settings
import hypothesis.strategies as st

from caffe2.proto import caffe2_pb2
from caffe2.python import brew, core, cnn, data_parallel_model, dyndep, \

            

Reported by Pylint.

Unable to import 'hypothesis'
Error

Line: 14 Column: 1

              import unittest
import time
from mock import Mock
from hypothesis import assume, given, settings
import hypothesis.strategies as st

from caffe2.proto import caffe2_pb2
from caffe2.python import brew, core, cnn, data_parallel_model, dyndep, \
    model_helper, optimizer, rnn_cell, workspace

            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 15 Column: 1

              import time
from mock import Mock
from hypothesis import assume, given, settings
import hypothesis.strategies as st

from caffe2.proto import caffe2_pb2
from caffe2.python import brew, core, cnn, data_parallel_model, dyndep, \
    model_helper, optimizer, rnn_cell, workspace
from caffe2.python.test_util import TestCase

            

Reported by Pylint.

Attribute 'tmpdir' defined outside __init__
Error

Line: 28 Column: 9

              
class TemporaryDirectory:
    def __enter__(self):
        self.tmpdir = tempfile.mkdtemp()
        return self.tmpdir

    def __exit__(self, type, value, traceback):
        shutil.rmtree(self.tmpdir)


            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 31 Column: 24

                      self.tmpdir = tempfile.mkdtemp()
        return self.tmpdir

    def __exit__(self, type, value, traceback):
        shutil.rmtree(self.tmpdir)

# Note(jiayq): we are yet to find out why Travis gives out an error in gloo
# like:
# RuntimeError: [enforce fail at /home/travis/build/caffe2/caffe2/third_party/gloo/gloo/transport/tcp/device.cc:113] ifa != nullptr. Unable to find interface for: [127.0.1.1]

            

Reported by Pylint.

Unused argument 'model'
Error

Line: 46 Column: 31

                      '''
        Helper function for test_equiv
        '''
        def input_builder_fun(model):
            return None

        def model_build_fun(model, loss_scale):
            fc = model.FC("data", "fc", 16, 1,
                          ("ConstantFill", {}), ("ConstantFill", {}))

            

Reported by Pylint.

Redefining name 'st' from outer scope (line 15)
Error

Line: 102 Column: 17

                          batch_per_device = batch_size // len(devices)

            for (j, g) in enumerate(devices):
                st = j * batch_per_device
                en = st + batch_per_device
                data = full_data[st:en, :].astype(np.float32)
                labels = full_labels[st:en].astype(np.float32)
                with core.DeviceScope(core.DeviceOption(model._device_type, g)):
                    workspace.FeedBlob(

            

Reported by Pylint.

Access to a protected member _device_type of a client class
Error

Line: 106 Column: 57

                              en = st + batch_per_device
                data = full_data[st:en, :].astype(np.float32)
                labels = full_labels[st:en].astype(np.float32)
                with core.DeviceScope(core.DeviceOption(model._device_type, g)):
                    workspace.FeedBlob(
                        "{}_{}/data".format(model._device_prefix, g), data
                    )
                    workspace.FeedBlob(
                        "{}_{}/label".format(model._device_prefix, g), labels

            

Reported by Pylint.

Access to a protected member _device_prefix of a client class
Error

Line: 108 Column: 45

                              labels = full_labels[st:en].astype(np.float32)
                with core.DeviceScope(core.DeviceOption(model._device_type, g)):
                    workspace.FeedBlob(
                        "{}_{}/data".format(model._device_prefix, g), data
                    )
                    workspace.FeedBlob(
                        "{}_{}/label".format(model._device_prefix, g), labels
                    )


            

Reported by Pylint.

Access to a protected member _device_prefix of a client class
Error

Line: 111 Column: 46

                                      "{}_{}/data".format(model._device_prefix, g), data
                    )
                    workspace.FeedBlob(
                        "{}_{}/label".format(model._device_prefix, g), labels
                    )

            if i == 0:
                workspace.RunNetOnce(model.param_init_net)
                workspace.CreateNet(model.net)

            

Reported by Pylint.

test/test_spectral_ops.py
193 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch
import unittest
import math
from contextlib import contextmanager
from itertools import product
import itertools
import doctest
import inspect


            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 10 Column: 1

              import doctest
import inspect

from torch.testing._internal.common_utils import \
    (TestCase, run_tests, TEST_NUMPY, TEST_LIBROSA, TEST_MKL)
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, ops, dtypes, onlyOnCPUAndCUDA,
     skipCPUIfNoFFT, deviceCountAtLeast, onlyCUDA, OpDTypes, skipIf)
from torch.testing._internal.common_methods_invocations import spectral_funcs, SpectralFuncInfo

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_device_type'
Error

Line: 12 Column: 1

              
from torch.testing._internal.common_utils import \
    (TestCase, run_tests, TEST_NUMPY, TEST_LIBROSA, TEST_MKL)
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, ops, dtypes, onlyOnCPUAndCUDA,
     skipCPUIfNoFFT, deviceCountAtLeast, onlyCUDA, OpDTypes, skipIf)
from torch.testing._internal.common_methods_invocations import spectral_funcs, SpectralFuncInfo

from setuptools import distutils

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_methods_invocations'
Error

Line: 15 Column: 1

              from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, ops, dtypes, onlyOnCPUAndCUDA,
     skipCPUIfNoFFT, deviceCountAtLeast, onlyCUDA, OpDTypes, skipIf)
from torch.testing._internal.common_methods_invocations import spectral_funcs, SpectralFuncInfo

from setuptools import distutils
from typing import Optional, List



            

Reported by Pylint.

Unable to import 'librosa'
Error

Line: 26 Column: 5

              

if TEST_LIBROSA:
    import librosa


def _complex_stft(x, *args, **kwargs):
    # Transform real and imaginary components separably
    stft_real = torch.stft(x.real, *args, **kwargs, return_complex=True, onesided=False)

            

Reported by Pylint.

Undefined variable 'op'
Error

Line: 217 Column: 46

                          return (input, s, dim, norm)

    @onlyOnCPUAndCUDA
    @ops([op for op in spectral_funcs if not op.ndimensional])
    def test_reference_1d(self, device, dtype, op):
        norm_modes = ((None, "forward", "backward", "ortho")
                      if distutils.version.LooseVersion(np.__version__) >= '1.20.0'
                      else (None, "ortho"))
        test_args = [

            

Reported by Pylint.

Undefined variable 'op'
Error

Line: 370 Column: 42

                  # nd-fft tests
    @onlyOnCPUAndCUDA
    @unittest.skipIf(not TEST_NUMPY, 'NumPy not found')
    @ops([op for op in spectral_funcs if op.ndimensional])
    def test_reference_nd(self, device, dtype, op):
        norm_modes = ((None, "forward", "backward", "ortho")
                      if distutils.version.LooseVersion(np.__version__) >= '1.20.0'
                      else (None, "ortho"))


            

Reported by Pylint.

Undefined variable 'op'
Error

Line: 442 Column: 42

                                  forward != torch.fft.fftn or x.is_complex()))

    @onlyOnCPUAndCUDA
    @ops([op for op in spectral_funcs if op.ndimensional],
         allowed_dtypes=[torch.float, torch.cfloat])
    def test_fftn_invalid(self, device, dtype, op):
        a = torch.rand(10, 10, 10, device=device, dtype=dtype)

        with self.assertRaisesRegex(RuntimeError, "dims must be unique"):

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 105 Column: 44

                  # (i.e. it cannot be a set of random numbers)
    # So for ROCm, call np.fft.rfftn and use its output as the input
    # for testing ops that call hipfftExecC2R
    def _generate_valid_rocfft_input(self, input, op, s, dim, norm):
        def get_op_name(op):
            if type(op) == SpectralFuncInfo:
                return op.name
            else:
                return op.__name__

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 247 Column: 13

              
        for iargs in test_args:
            args = list(iargs)
            input = args[0]
            args = args[1:]

            if torch.version.hip is not None and input.device.type == 'cuda':
                input, args[0], args[1], args[2] = self._generate_valid_rocfft_input(
                    input, op, args[0], args[1], args[2])

            

Reported by Pylint.

torch/jit/_recursive.py
192 issues
Module 'torch' has no 'device' member
Error

Line: 99 Column: 55

              # in addition, tuples and lists of these base types are also considered constants
# If you edit this list, then you also need to edit the handlers in
# ConstantValue in jit/script/init.cpp
_constant_types = (bool, float, int, str, type(None), torch.device, torch.layout, torch.dtype)

def _get_valid_constant(attr, v, owner_type):
    if isinstance(v, _constant_types):
        return v
    elif isinstance(v, tuple) or isinstance(v, list):

            

Reported by Pylint.

Module 'torch' has no 'dtype' member
Error

Line: 99 Column: 83

              # in addition, tuples and lists of these base types are also considered constants
# If you edit this list, then you also need to edit the handlers in
# ConstantValue in jit/script/init.cpp
_constant_types = (bool, float, int, str, type(None), torch.device, torch.layout, torch.dtype)

def _get_valid_constant(attr, v, owner_type):
    if isinstance(v, _constant_types):
        return v
    elif isinstance(v, tuple) or isinstance(v, list):

            

Reported by Pylint.

Module 'torch' has no 'layout' member
Error

Line: 99 Column: 69

              # in addition, tuples and lists of these base types are also considered constants
# If you edit this list, then you also need to edit the handlers in
# ConstantValue in jit/script/init.cpp
_constant_types = (bool, float, int, str, type(None), torch.device, torch.layout, torch.dtype)

def _get_valid_constant(attr, v, owner_type):
    if isinstance(v, _constant_types):
        return v
    elif isinstance(v, tuple) or isinstance(v, list):

            

Reported by Pylint.

method is not callable
Error

Line: 896 Column: 16

              
        script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
        method = types.MethodType(unbound_method, script_module)
        return method(*args)

    # make the lazy binding method "look like" the original method
    lazy_binding_method.original_fn = unbound_method  # type: ignore[attr-defined]
    lazy_binding_method.__name__ = unbound_method.__name__
    torch._jit_internal.copy_torchscript_modifier(unbound_method, lazy_binding_method)

            

Reported by Pylint.

TODO: there should be a more principled way of doing this.
Error

Line: 23 Column: 3

              PropertyStub = collections.namedtuple('PropertyStub', ('resolution_callback', 'def_'))


# TODO: there should be a more principled way of doing this.
ignored_attributes = [
    "_version",
    "_parameters",
    "_buffers",
    "_modules",

            

Reported by Pylint.

Access to a protected member _jit_script_class_compile of a client class
Error

Line: 44 Column: 24

                  if not script_class:
        ast = get_jit_class_def(obj, obj.__name__)
        defaults = torch.jit.frontend.get_default_args_for_class(obj)
        script_class = torch._C._jit_script_class_compile(qualified_name, ast, defaults, rcb)
        _add_script_class(obj, script_class)

    return script_class

def make_stub(func, name):

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 44 Column: 24

                  if not script_class:
        ast = get_jit_class_def(obj, obj.__name__)
        defaults = torch.jit.frontend.get_default_args_for_class(obj)
        script_class = torch._C._jit_script_class_compile(qualified_name, ast, defaults, rcb)
        _add_script_class(obj, script_class)

    return script_class

def make_stub(func, name):

            

Reported by Pylint.

Access to a protected member _jit_tree_views of a client class
Error

Line: 116 Column: 21

                      """.format(torch.typename(type(v)), owner_type, attr, constants)))


class SourceContext(torch._C._jit_tree_views.SourceRangeFactory):
    def __init__(self, source, filename, file_lineno, leading_whitespace_len):
        super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len)


def infer_concrete_type_builder(nn_module, share_types=True):

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 116 Column: 21

                      """.format(torch.typename(type(v)), owner_type, attr, constants)))


class SourceContext(torch._C._jit_tree_views.SourceRangeFactory):
    def __init__(self, source, filename, file_lineno, leading_whitespace_len):
        super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len)


def infer_concrete_type_builder(nn_module, share_types=True):

            

Reported by Pylint.

Useless super delegation in method '__init__'
Error

Line: 117 Column: 5

              

class SourceContext(torch._C._jit_tree_views.SourceRangeFactory):
    def __init__(self, source, filename, file_lineno, leading_whitespace_len):
        super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len)


def infer_concrete_type_builder(nn_module, share_types=True):
    """

            

Reported by Pylint.

test/test_multiprocessing.py
189 issues
Unable to import 'torch'
Error

Line: 10 Column: 1

              import copy
from sys import platform

import torch
import torch.cuda
import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN,

            

Reported by Pylint.

Unable to import 'torch.cuda'
Error

Line: 11 Column: 1

              from sys import platform

import torch
import torch.cuda
import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN,
                                                  load_tests, slowTest, TEST_WITH_TSAN)

            

Reported by Pylint.

Unable to import 'torch.multiprocessing'
Error

Line: 12 Column: 1

              
import torch
import torch.cuda
import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN,
                                                  load_tests, slowTest, TEST_WITH_TSAN)


            

Reported by Pylint.

Unable to import 'torch.utils.hooks'
Error

Line: 13 Column: 1

              import torch
import torch.cuda
import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN,
                                                  load_tests, slowTest, TEST_WITH_TSAN)

# load_tests from common_utils is used to automatically filter tests for

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 14 Column: 1

              import torch.cuda
import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN,
                                                  load_tests, slowTest, TEST_WITH_TSAN)

# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 15 Column: 1

              import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN,
                                                  load_tests, slowTest, TEST_WITH_TSAN)

# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests

            

Reported by Pylint.

Assigning the same variable 'load_tests' to itself
Error

Line: 20 Column: 1

              
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests

TEST_REPEATS = 30
HAS_SHM_FILES = os.path.isdir('/dev/shm')
TEST_CUDA_IPC = torch.cuda.is_available() and \
    sys.platform != 'darwin' and \

            

Reported by Pylint.

Probable insecure usage of temp file/directory.
Security

Line: 23
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b108_hardcoded_tmp_directory.html

              load_tests = load_tests

TEST_REPEATS = 30
HAS_SHM_FILES = os.path.isdir('/dev/shm')
TEST_CUDA_IPC = torch.cuda.is_available() and \
    sys.platform != 'darwin' and \
    sys.platform != 'win32'
TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1


            

Reported by Bandit.

Unused variable 'i'
Error

Line: 41 Column: 9

              

def _test_cuda_ipc_deadlock_actor(queue, iterations):
    for i in range(iterations):
        if not queue.empty():
            queue.get()
        time.sleep(.01)



            

Reported by Pylint.

Unused variable 'i'
Error

Line: 49 Column: 9

              
def _test_cuda_ipc_deadlock_learner(queue, iterations):
    net = torch.nn.LSTM(1, 1).cuda()
    for i in range(iterations):
        if not queue.full():
            queue.put(copy.deepcopy(net.state_dict()))
        time.sleep(.01)



            

Reported by Pylint.

test/test_ops.py
188 issues
Unable to import 'torch'
Error

Line: 5 Column: 1

              from functools import partial, wraps
import warnings

import torch

from torch.testing import \
    (FileCheck, floating_and_complex_types_and, get_all_dtypes)
from torch.testing._internal.common_utils import \
    (TestCase, is_iterable_of_tensors, run_tests, IS_SANDCASTLE, clone_input_helper, make_tensor,

            

Reported by Pylint.

Unable to import 'torch.testing'
Error

Line: 7 Column: 1

              
import torch

from torch.testing import \
    (FileCheck, floating_and_complex_types_and, get_all_dtypes)
from torch.testing._internal.common_utils import \
    (TestCase, is_iterable_of_tensors, run_tests, IS_SANDCASTLE, clone_input_helper, make_tensor,
     gradcheck, gradgradcheck, IS_IN_CI, suppress_warnings)
from torch.testing._internal.common_methods_invocations import \

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 9 Column: 1

              
from torch.testing import \
    (FileCheck, floating_and_complex_types_and, get_all_dtypes)
from torch.testing._internal.common_utils import \
    (TestCase, is_iterable_of_tensors, run_tests, IS_SANDCASTLE, clone_input_helper, make_tensor,
     gradcheck, gradgradcheck, IS_IN_CI, suppress_warnings)
from torch.testing._internal.common_methods_invocations import \
    (op_db, _NOTHING, UnaryUfuncInfo, SpectralFuncInfo)
from torch.testing._internal.common_device_type import \

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_methods_invocations'
Error

Line: 12 Column: 1

              from torch.testing._internal.common_utils import \
    (TestCase, is_iterable_of_tensors, run_tests, IS_SANDCASTLE, clone_input_helper, make_tensor,
     gradcheck, gradgradcheck, IS_IN_CI, suppress_warnings)
from torch.testing._internal.common_methods_invocations import \
    (op_db, _NOTHING, UnaryUfuncInfo, SpectralFuncInfo)
from torch.testing._internal.common_device_type import \
    (deviceCountAtLeast, instantiate_device_type_tests, ops, onlyCUDA, onlyOnCPUAndCUDA, skipCUDAIfRocm, OpDTypes)
from torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference
from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, \

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_device_type'
Error

Line: 14 Column: 1

                   gradcheck, gradgradcheck, IS_IN_CI, suppress_warnings)
from torch.testing._internal.common_methods_invocations import \
    (op_db, _NOTHING, UnaryUfuncInfo, SpectralFuncInfo)
from torch.testing._internal.common_device_type import \
    (deviceCountAtLeast, instantiate_device_type_tests, ops, onlyCUDA, onlyOnCPUAndCUDA, skipCUDAIfRocm, OpDTypes)
from torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference
from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, \
    check_alias_annotation
from torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_jit'
Error

Line: 16 Column: 1

                  (op_db, _NOTHING, UnaryUfuncInfo, SpectralFuncInfo)
from torch.testing._internal.common_device_type import \
    (deviceCountAtLeast, instantiate_device_type_tests, ops, onlyCUDA, onlyOnCPUAndCUDA, skipCUDAIfRocm, OpDTypes)
from torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference
from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, \
    check_alias_annotation
from torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining
import torch.testing._internal.opinfo_helper as opinfo_helper


            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_metaprogramming_utils'
Error

Line: 17 Column: 1

              from torch.testing._internal.common_device_type import \
    (deviceCountAtLeast, instantiate_device_type_tests, ops, onlyCUDA, onlyOnCPUAndCUDA, skipCUDAIfRocm, OpDTypes)
from torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference
from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, \
    check_alias_annotation
from torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining
import torch.testing._internal.opinfo_helper as opinfo_helper

# variant testing is only done with torch.float and torch.cfloat to avoid

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 19 Column: 1

              from torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference
from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, \
    check_alias_annotation
from torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining
import torch.testing._internal.opinfo_helper as opinfo_helper

# variant testing is only done with torch.float and torch.cfloat to avoid
#   excessive test times and maximize signal to noise ratio
_variant_ops = partial(ops, dtypes=OpDTypes.supported,

            

Reported by Pylint.

Unable to import 'torch.testing._internal.opinfo_helper'
Error

Line: 20 Column: 1

              from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, \
    check_alias_annotation
from torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining
import torch.testing._internal.opinfo_helper as opinfo_helper

# variant testing is only done with torch.float and torch.cfloat to avoid
#   excessive test times and maximize signal to noise ratio
_variant_ops = partial(ops, dtypes=OpDTypes.supported,
                       allowed_dtypes=(torch.float, torch.cfloat))

            

Reported by Pylint.

Undefined variable 'op'
Error

Line: 805 Column: 40

                  _alias_ops = partial(ops, dtypes=OpDTypes.supported,
                         allowed_dtypes=(torch.float,))

    @_alias_ops((op for op in op_db if op.aliases))
    def test_jit_alias_remapping(self, device, dtype, op):
        # Required to avoid undefined value: tensor error in JIT compilation of the function template
        tensor = torch.tensor

        samples = op.sample_inputs(device, dtype, requires_grad=True)

            

Reported by Pylint.

test/test_foreach.py
186 issues
Unable to import 'torch'
Error

Line: 5 Column: 1

              from numbers import Number
import random
import re
import torch
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, TEST_WITH_SLOW
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, dtypes, onlyCUDA, skipCUDAIfRocm, skipMeta, ops)
from torch.testing._internal.common_methods_invocations import \

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 7 Column: 1

              import re
import torch
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, TEST_WITH_SLOW
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, dtypes, onlyCUDA, skipCUDAIfRocm, skipMeta, ops)
from torch.testing._internal.common_methods_invocations import \
    (foreach_unary_op_db, foreach_binary_op_db, foreach_pointwise_op_db, foreach_minmax_op_db, make_tensor)


            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_device_type'
Error

Line: 8 Column: 1

              import torch
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, TEST_WITH_SLOW
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, dtypes, onlyCUDA, skipCUDAIfRocm, skipMeta, ops)
from torch.testing._internal.common_methods_invocations import \
    (foreach_unary_op_db, foreach_binary_op_db, foreach_pointwise_op_db, foreach_minmax_op_db, make_tensor)

# Includes some values such that N * N won't be a multiple of 4,

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_methods_invocations'
Error

Line: 10 Column: 1

              from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, TEST_WITH_SLOW
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, dtypes, onlyCUDA, skipCUDAIfRocm, skipMeta, ops)
from torch.testing._internal.common_methods_invocations import \
    (foreach_unary_op_db, foreach_binary_op_db, foreach_pointwise_op_db, foreach_minmax_op_db, make_tensor)

# Includes some values such that N * N won't be a multiple of 4,
# which should ensure we test the vectorized and non-vectorized
# kernel code paths.

            

Reported by Pylint.

Unused argument 'dtype'
Error

Line: 251 Column: 31

                          for _, scalarlist in getScalarLists(N):
                self._test_binary_op_scalarlist(device, dtype, op, N, scalarlist, False, False)

    def _pointwise_test(self, dtype, op, ref, inputs, is_fastpath, is_inplace, *, values=None):
        ref_inputs = [[t.clone().detach() for t in inputs[0]], inputs[1], inputs[2]] if is_inplace else inputs
        try:
            actual = op(inputs, self.is_cuda, is_fastpath)
        except RuntimeError as e:
            with self.assertRaisesRegex(type(e), re.escape(str(e))):

            

Reported by Pylint.

Unused argument 'dtype'
Error

Line: 323 Column: 35

                  # note(mkozuki): fastpath test uses dtypes which fastpath implementation supports.
    # To confirm the dtypes of `OpInfo` cover the dtypes that the function support,
    # this test does not use `try-except` for fastpath.
    def _regular_unary_test(self, dtype, op, ref, inputs, is_fastpath):
        if is_fastpath:
            self.assertEqual(ref(inputs), op(inputs, self.is_cuda, is_fastpath))
            return
        try:
            actual = op(inputs, self.is_cuda, is_fastpath)

            

Reported by Pylint.

Unused argument 'dtype'
Error

Line: 342 Column: 35

                  # - for integer inputs, trigonometric functions and exponential function returns float outputs,
    #   which causes "result type Float can't be case to the desired type" error.
    # Thus, `try-except` is used even if `is_fastpath` is `True`.
    def _inplace_unary_test(self, dtype, inplace, inplace_ref, inputs, is_fastpath):
        copied_inputs = [[t.clone().detach() for t in tensors] for tensors in inputs]
        try:
            inplace(inputs, self.is_cuda, is_fastpath)
        except RuntimeError as e:
            with self.assertRaisesRegex(type(e), re.escape(str(e))):

            

Reported by Pylint.

Expression "(inplace_ref(copied_inputs), )" is assigned to nothing
Error

Line: 350 Column: 13

                          with self.assertRaisesRegex(type(e), re.escape(str(e))):
                inplace_ref(copied_inputs)
        else:
            inplace_ref(copied_inputs),
            self.assertEqual(copied_inputs, inputs)

    def _test_unary(self, device, dtype, opinfo, N, is_fastpath):
        op, ref, inplace_op, inplace_ref = self._get_funcs(opinfo, 1)
        inputs = opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath),

            

Reported by Pylint.

Unused argument 'dtype'
Error

Line: 415 Column: 72

                      self._minmax_test(op, inputs, True, 1)

    @dtypes(*torch.testing.get_all_dtypes())
    def test_add_scalar_with_empty_list_and_empty_tensor(self, device, dtype):
        # TODO: enable empty list case
        for tensors in [[torch.randn([0])]]:
            res = torch._foreach_add(tensors, 1)
            self.assertEqual(res, tensors)


            

Reported by Pylint.

Unused argument 'device'
Error

Line: 415 Column: 64

                      self._minmax_test(op, inputs, True, 1)

    @dtypes(*torch.testing.get_all_dtypes())
    def test_add_scalar_with_empty_list_and_empty_tensor(self, device, dtype):
        # TODO: enable empty list case
        for tensors in [[torch.randn([0])]]:
            res = torch._foreach_add(tensors, 1)
            self.assertEqual(res, tensors)


            

Reported by Pylint.

caffe2/python/rnn_cell.py
186 issues
No value for argument 'encoder_lengths' in method call
Error

Line: 1457 Column: 9

                          forward_only=False,
            drop_states=False,
        )
        super(MILSTMWithAttentionCell, self).__init__(
            encoder_output_dim=encoder_output_dim,
            encoder_outputs=encoder_outputs,
            decoder_cell=decoder_cell,
            decoder_state_dim=decoder_state_dim,
            name=name,

            

Reported by Pylint.

Too many arguments for logging format string
Error

Line: 1660 Column: 9

                          outputs_with_grads)
        for i in outputs_without_grad:
            model.net.ZeroGradient(outputs[i], [])
        logging.debug("Added 0 gradients for blobs:",
                      [outputs[i] for i in outputs_without_grad])

        final_output = self.cell._prepare_output_sequence(model, outputs)

        return final_output, outputs

            

Reported by Pylint.

Using deprecated method getargspec()
Error

Line: 180 Column: 21

                          extra_inputs = _RectifyNames(extra_input_names)
            extra_inputs = zip(extra_input_names, extra_input_sizes)

        arg_names = inspect.getargspec(self.apply_override).args
        rectified = [input_t, seq_lengths, states, timestep]
        if 'extra_inputs' in arg_names:
            rectified.append(extra_inputs)
        return rectified


            

Reported by Pylint.

Unused argument 'model'
Error

Line: 214 Column: 29

                      '''
        raise NotImplementedError('Abstract method')

    def prepare_input(self, model, input_blob):
        '''
        If some operations in _apply method depend only on the input,
        not on recurrent states, they could be computed in advance.

        model: ModelHelper object new operators would be added to

            

Reported by Pylint.

Unused argument 'model'
Error

Line: 255 Column: 31

                      '''
        raise NotImplementedError('Abstract method')

    def _prepare_output(self, model, states):
        '''
        Allows arbitrary post-processing of primary output.
        '''
        return states[self.get_output_state_index()]


            

Reported by Pylint.

Unused argument 'model'
Error

Line: 261 Column: 40

                      '''
        return states[self.get_output_state_index()]

    def _prepare_output_sequence(self, model, state_outputs):
        '''
        Allows arbitrary post-processing of primary sequence output.

        (Note that state_outputs alternates between full-sequence and final
        output for each state, thus the index multiplier 2.)

            

Reported by Pylint.

Method 'get_state_names_override' is abstract in class 'RNNCell' but is not overridden
Error

Line: 294 Column: 1

              

# based on https://pytorch.org/docs/master/nn.html#torch.nn.RNNCell
class BasicRNNCell(RNNCell):
    def __init__(
        self,
        input_size,
        hidden_size,
        forget_bias,

            

Reported by Pylint.

Unused argument 'forget_bias'
Error

Line: 299 Column: 9

                      self,
        input_size,
        hidden_size,
        forget_bias,
        memory_optimization,
        drop_states=False,
        initializer=None,
        activation=None,
        **kwargs

            

Reported by Pylint.

Unused argument 'memory_optimization'
Error

Line: 300 Column: 9

                      input_size,
        hidden_size,
        forget_bias,
        memory_optimization,
        drop_states=False,
        initializer=None,
        activation=None,
        **kwargs
    ):

            

Reported by Pylint.

Parameters differ from overridden 'apply_override' method
Error

Line: 317 Column: 5

                              'BasicRNNCell with unknown activation function (%s)'
                % self.activation)

    def apply_override(
        self,
        model,
        input_t,
        seq_lengths,
        states,

            

Reported by Pylint.

test/jit/test_autodiff_subgraph_slicing.py
185 issues
Unable to import 'torch.testing._internal.common_utils'
Error

Line: 4 Column: 1

              import os
import sys
import unittest
from torch.testing._internal.common_utils import GRAPH_EXECUTOR, ProfilingMode, \
    num_profiled_runs, enable_profiling_mode_for_profiling_tests
from torch.testing._internal.common_jit import check_against_reference
import torch

# Make the helper files in test/ importable

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_jit'
Error

Line: 6 Column: 1

              import unittest
from torch.testing._internal.common_utils import GRAPH_EXECUTOR, ProfilingMode, \
    num_profiled_runs, enable_profiling_mode_for_profiling_tests
from torch.testing._internal.common_jit import check_against_reference
import torch

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 7 Column: 1

              from torch.testing._internal.common_utils import GRAPH_EXECUTOR, ProfilingMode, \
    num_profiled_runs, enable_profiling_mode_for_profiling_tests
from torch.testing._internal.common_jit import check_against_reference
import torch

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, disable_autodiff_subgraph_inlining

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 12 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, disable_autodiff_subgraph_inlining
from torch.testing import FileCheck

from typing import List, Tuple, Optional

if __name__ == '__main__':

            

Reported by Pylint.

Unable to import 'torch.testing'
Error

Line: 13 Column: 1

              pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, disable_autodiff_subgraph_inlining
from torch.testing import FileCheck

from typing import List, Tuple, Optional

if __name__ == '__main__':
    raise RuntimeError("This test file is not meant to be run directly, use:\n\n"

            

Reported by Pylint.

Unexpected keyword argument 'profile_and_replay' in function call
Error

Line: 52 Column: 26

                      input = torch.rand(6, 10).requires_grad_()
        with disable_autodiff_subgraph_inlining():
            with enable_profiling_mode_for_profiling_tests():
                output = func(input, profile_and_replay=True)
                self.assertAutodiffNode(func.graph_for(input), True, ['prim::ConstantChunk'], [])


    @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "This threshold is only valid for Profiling Executor")
    def test_diff_graph_inline_threshold(self):

            

Reported by Pylint.

TODO: It is better if we can test directly on graphs instead of the current
Error

Line: 25 Column: 3

              
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.SIMPLE, "Simple Executor doesn't support gradients")
class TestAutodiffSubgraphSlicing(JitTestCase):
    # TODO: It is better if we can test directly on graphs instead of the current
    # end-to-end fashion.
    def _perform_ad_subgraph_slicing(self, fn, *input_sizes):
        with disable_autodiff_subgraph_inlining():
            with enable_profiling_mode_for_profiling_tests():
                ge = torch.jit.script(fn)

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 49 Column: 9

                          x1, x2 = torch.chunk(x, 2)
            return (x1, x2)

        input = torch.rand(6, 10).requires_grad_()
        with disable_autodiff_subgraph_inlining():
            with enable_profiling_mode_for_profiling_tests():
                output = func(input, profile_and_replay=True)
                self.assertAutodiffNode(func.graph_for(input), True, ['prim::ConstantChunk'], [])


            

Reported by Pylint.

Unused variable 'output'
Error

Line: 52 Column: 17

                      input = torch.rand(6, 10).requires_grad_()
        with disable_autodiff_subgraph_inlining():
            with enable_profiling_mode_for_profiling_tests():
                output = func(input, profile_and_replay=True)
                self.assertAutodiffNode(func.graph_for(input), True, ['prim::ConstantChunk'], [])


    @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "This threshold is only valid for Profiling Executor")
    def test_diff_graph_inline_threshold(self):

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 73 Column: 17

                                  #  two nodes should NOT be fused
                    return torch.sigmoid(x)

                input = torch.rand([4, 4], requires_grad=True)
                foo(input)
                foo(input)

                bar(input)
                bar(input)

            

Reported by Pylint.