The following issues were found

torch/quantization/fx/prepare.py
151 issues
Attempted relative import beyond top-level package
Error

Line: 18 Column: 1

              )
from torch.fx.node import Argument

from ..qconfig import QConfigAny
from .qconfig_utils import (
    convert_dict_to_ordered_dict,
    generate_qconfig_map,
    get_flattened_qconfig_dict,
)

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 19 Column: 1

              from torch.fx.node import Argument

from ..qconfig import QConfigAny
from .qconfig_utils import (
    convert_dict_to_ordered_dict,
    generate_qconfig_map,
    get_flattened_qconfig_dict,
)


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 25 Column: 1

                  get_flattened_qconfig_dict,
)

from .quantization_patterns import (
    QuantizeHandler,
    CustomModuleQuantizeHandler,
    StandaloneModuleQuantizeHandler,
)


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 31 Column: 1

                  StandaloneModuleQuantizeHandler,
)

from .quantization_types import Pattern

from ._equalize import (
    is_equalization_observer,
    node_supports_equalization,
)

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 33 Column: 1

              
from .quantization_types import Pattern

from ._equalize import (
    is_equalization_observer,
    node_supports_equalization,
)

from .graph_module import (

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 38 Column: 1

                  node_supports_equalization,
)

from .graph_module import (
    ObservedGraphModule,
    ObservedStandaloneGraphModule,
)

from .pattern_utils import (

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 43 Column: 1

                  ObservedStandaloneGraphModule,
)

from .pattern_utils import (
    MatchResult,
    get_default_quant_patterns,
    get_default_output_activation_post_process_map,
)


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 49 Column: 1

                  get_default_output_activation_post_process_map,
)

from .match_utils import (
    find_matches,
)

from .utils import (
    _parent_name,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 53 Column: 1

                  find_matches,
)

from .utils import (
    _parent_name,
    get_custom_module_class_keys,
    all_node_args_have_no_tensors,
    assert_and_get_unique_device,
    node_bool_tensor_arg_indexes,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 65 Column: 1

                  FUNCTIONAL_OPS_WITH_BIAS,
)

from ..fuser_method_mappings import DEFAULT_OP_LIST_TO_FUSER_METHOD

from ..quantization_mappings import (
    get_default_qat_module_mappings,
)


            

Reported by Pylint.

test/jit/test_isinstance.py
150 issues
Unable to import 'torch'
Error

Line: 4 Column: 1

              import os
import sys

import torch
import warnings
from typing import List, Any, Dict, Tuple, Optional

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 11 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase

if __name__ == "__main__":
    raise RuntimeError(
        "This test file is not meant to be run directly, use:\n\n"
        "\tpython test/test_jit.py TESTNAME\n\n"

            

Reported by Pylint.

TODO: above line in eager will evaluate to True while in
Error

Line: 107 Column: 3

                      def optional_test_none(x: Any):
            assert torch.jit.isinstance(x, Optional[torch.Tensor])
            # assert torch.jit.isinstance(x, Optional[str])
            # TODO: above line in eager will evaluate to True while in
            #       the TS interpreter will evaluate to False as the
            #       first torch.jit.isinstance refines the 'None' type

        x = None
        self.checkScript(optional_test_none, (x,))

            

Reported by Pylint.

Unused variable 'y'
Error

Line: 204 Column: 21

                              hit = not hit
                for el in obj:
                    # perform some tensor operation
                    y = el.clamp(0, 0.5)
            if torch.jit.isinstance(obj, Dict[str, str]):
                hit = not hit
                str_cat = ""
                for val in obj.values():
                    str_cat = str_cat + val

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import os
import sys

import torch
import warnings
from typing import List, Any, Dict, Tuple, Optional

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

            

Reported by Pylint.

standard import "import warnings" should be placed before "import torch"
Error

Line: 5 Column: 1

              import sys

import torch
import warnings
from typing import List, Any, Dict, Tuple, Optional

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

standard import "from typing import List, Any, Dict, Tuple, Optional" should be placed before "import torch"
Error

Line: 6 Column: 1

              
import torch
import warnings
from typing import List, Any, Dict, Tuple, Optional

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase

            

Reported by Pylint.

Import "from torch.testing._internal.jit_utils import JitTestCase" should be placed at the top of the module
Error

Line: 11 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase

if __name__ == "__main__":
    raise RuntimeError(
        "This test file is not meant to be run directly, use:\n\n"
        "\tpython test/test_jit.py TESTNAME\n\n"

            

Reported by Pylint.

Missing class docstring
Error

Line: 21 Column: 1

                  )

# Tests for torch.jit.isinstance
class TestIsinstance(JitTestCase):
    def test_int(self):
        def int_test(x: Any):
            assert torch.jit.isinstance(x, int)
            assert not torch.jit.isinstance(x, float)


            

Reported by Pylint.

Too many public methods (28/20)
Error

Line: 21 Column: 1

                  )

# Tests for torch.jit.isinstance
class TestIsinstance(JitTestCase):
    def test_int(self):
        def int_test(x: Any):
            assert torch.jit.isinstance(x, int)
            assert not torch.jit.isinstance(x, float)


            

Reported by Pylint.

torch/fx/experimental/graph_gradual_typechecker.py
150 issues
Module 'torch' has no 'add' member
Error

Line: 90 Column: 26

                  return register


@register_inference_rule(torch.add)
@register_inference_rule(operator.add)
def add_inference_rule(n: Node):
    assert isinstance(n.args[0], Node)
    assert isinstance(n.args[1], Node)
    t1 = n.args[0].type

            

Reported by Pylint.

Module 'torch' has no 'transpose' member
Error

Line: 148 Column: 26

                  # TODO. We leave it like this till we add a type to represent tensor sizes
    return n.type

@register_inference_rule(torch.transpose)
def transpose_inference_rule(n: Node):
    if n.target == torch.transpose:
        assert isinstance(n.args[0], Node)
        t = n.args[0].type


            

Reported by Pylint.

Module 'torch' has no 'transpose' member
Error

Line: 150 Column: 20

              
@register_inference_rule(torch.transpose)
def transpose_inference_rule(n: Node):
    if n.target == torch.transpose:
        assert isinstance(n.args[0], Node)
        t = n.args[0].type

        assert isinstance(n.args[1], int)
        assert isinstance(n.args[2], int)

            

Reported by Pylint.

Module 'torch' has no 'reshape' member
Error

Line: 176 Column: 26

                          raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')


@register_inference_rule(torch.reshape)
def reshape_inference_rule(n: Node):
    assert isinstance(n.args[0], Node)
    t1 = n.args[0].type

    assert isinstance(n.args[1], list)

            

Reported by Pylint.

Module 'torch' has no 'flatten' member
Error

Line: 458 Column: 26

                  else:
        raise TypeError(f'Incompatable dimentions {start_dim}, {end_dim - 1} in type {tensor_type}')

@register_inference_rule(torch.flatten)
def flatten_inference_rule(n: Node):
    assert isinstance(n.args[0], Node)

    # set the default start and end dims
    start_dim = 1

            

Reported by Pylint.

Module 'torch' has no 'add' member
Error

Line: 580 Column: 27

                      res = [Equality(args1[i], args2[i]) for i in range(len(args1))]
    return res

@register_refinement_rule(torch.add)
@register_refinement_rule(operator.add)
def add_eq(n: Node):
    res = []
    if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
        arg_type1 = n.args[0].type

            

Reported by Pylint.

Module 'torch' has no 'flatten' member
Error

Line: 611 Column: 27

                      res = [Equality(args1[0], args2[0]), Equality(args1[1], args2[1])]
    return res

@register_refinement_rule(torch.flatten)
def flatten_refinement_rule(n: Node):
    assert isinstance(n.args[0], Node)

    eq_const = []


            

Reported by Pylint.

Todo: maybe figure out that broadcasting definitely did not happen?
Error

Line: 114 Column: 3

                      n.meta[str(n.args[0])] = new_t1
        n.meta[str(n.args[1])] = new_t2

    # Todo: maybe figure out that broadcasting definitely did not happen?
    else:
        n.meta['broadcast'] = False

    new_t1 = t1 if not n.meta['broadcast'] else new_t1
    new_t2 = t2 if not n.meta['broadcast'] else new_t2

            

Reported by Pylint.

Unused argument 'traced'
Error

Line: 136 Column: 38

                                      f' Types should match ')

@register_inference_rule(getattr)
def get_attr_inference_rule(n: Node, traced):
    attr_node = n.args[0]
    attr_name = n.args[1]

    if attr_name == "shape":
        n.type = Dyn

            

Reported by Pylint.

Unused variable 'attr_node'
Error

Line: 137 Column: 5

              
@register_inference_rule(getattr)
def get_attr_inference_rule(n: Node, traced):
    attr_node = n.args[0]
    attr_name = n.args[1]

    if attr_name == "shape":
        n.type = Dyn
    else:

            

Reported by Pylint.

test/distributed/_sharded_tensor/test_sharded_tensor.py
150 issues
Unable to import 'torch'
Error

Line: 4 Column: 1

              from functools import wraps
import io
import sys
import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.distributed import _sharded_tensor
from torch.distributed._sharded_tensor import (
    load_with_process_group,

            

Reported by Pylint.

Unable to import 'torch.distributed'
Error

Line: 5 Column: 1

              import io
import sys
import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.distributed import _sharded_tensor
from torch.distributed._sharded_tensor import (
    load_with_process_group,
    state_dict_hook,

            

Reported by Pylint.

Unable to import 'torch.distributed'
Error

Line: 6 Column: 1

              import sys
import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.distributed import _sharded_tensor
from torch.distributed._sharded_tensor import (
    load_with_process_group,
    state_dict_hook,
    pre_load_state_dict_hook,

            

Reported by Pylint.

Unable to import 'torch.distributed'
Error

Line: 7 Column: 1

              import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.distributed import _sharded_tensor
from torch.distributed._sharded_tensor import (
    load_with_process_group,
    state_dict_hook,
    pre_load_state_dict_hook,
)

            

Reported by Pylint.

Unable to import 'torch.distributed._sharded_tensor'
Error

Line: 8 Column: 1

              import torch.distributed as dist
from torch.distributed import rpc
from torch.distributed import _sharded_tensor
from torch.distributed._sharded_tensor import (
    load_with_process_group,
    state_dict_hook,
    pre_load_state_dict_hook,
)
from torch.distributed._sharding_spec import (

            

Reported by Pylint.

Unable to import 'torch.distributed._sharding_spec'
Error

Line: 13 Column: 1

                  state_dict_hook,
    pre_load_state_dict_hook,
)
from torch.distributed._sharding_spec import (
    ChunkShardingSpec,
    EnumerableShardingSpec,
    ShardMetadata
)
from torch.testing._internal.common_distributed import (

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_distributed'
Error

Line: 18 Column: 1

                  EnumerableShardingSpec,
    ShardMetadata
)
from torch.testing._internal.common_distributed import (
    MultiProcessTestCase,
    requires_nccl,
    skip_if_lt_x_gpu,
    TEST_SKIPS,
)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 24 Column: 1

                  skip_if_lt_x_gpu,
    TEST_SKIPS,
)
from torch.testing._internal.common_utils import (
    TEST_WITH_DEV_DBG_ASAN,
    run_tests,
)

if TEST_WITH_DEV_DBG_ASAN:

            

Reported by Pylint.

Instance of 'ShardedTensorTestBase' has no 'rank' member
Error

Line: 63 Column: 18

                      dist.init_process_group(
            backend="nccl",
            world_size=self.world_size,
            rank=self.rank,
            init_method=f"file://{self.file_name}",
        )

    def init_rpc(self):
        rpc_backend_options = rpc.TensorPipeRpcBackendOptions()

            

Reported by Pylint.

Instance of 'ShardedTensorTestBase' has no 'file_name' member
Error

Line: 64 Column: 35

                          backend="nccl",
            world_size=self.world_size,
            rank=self.rank,
            init_method=f"file://{self.file_name}",
        )

    def init_rpc(self):
        rpc_backend_options = rpc.TensorPipeRpcBackendOptions()
        rpc_backend_options.init_method = f"file://{self.file_name}"

            

Reported by Pylint.

torch/utils/cpp_extension.py
149 issues
subprocess call with shell=True identified, security issue.
Security injection

Line: 90
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html

                      # Guess #2
        try:
            pipe_hipcc = subprocess.Popen(
                ["which hipcc | xargs readlink -f"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
            hipcc, _ = pipe_hipcc.communicate()
            # this will be either <ROCM_HOME>/hip/bin/hipcc or <ROCM_HOME>/bin/hipcc
            rocm_home = os.path.dirname(os.path.dirname(hipcc.decode().rstrip('\r\n')))
            if os.path.basename(rocm_home) == 'hip':
                rocm_home = os.path.dirname(rocm_home)

            

Reported by Bandit.

Attempted relative import beyond top-level package
Error

Line: 16 Column: 1

              
import torch
import torch._appdirs
from .file_baton import FileBaton
from ._cpp_extension_versioner import ExtensionVersioner
from .hipify import hipify_python
from .hipify.hipify_python import get_hip_file_path, GeneratedFileCleaner
from typing import List, Optional, Union


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 17 Column: 1

              import torch
import torch._appdirs
from .file_baton import FileBaton
from ._cpp_extension_versioner import ExtensionVersioner
from .hipify import hipify_python
from .hipify.hipify_python import get_hip_file_path, GeneratedFileCleaner
from typing import List, Optional, Union

from setuptools.command.build_ext import build_ext

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 18 Column: 1

              import torch._appdirs
from .file_baton import FileBaton
from ._cpp_extension_versioner import ExtensionVersioner
from .hipify import hipify_python
from .hipify.hipify_python import get_hip_file_path, GeneratedFileCleaner
from typing import List, Optional, Union

from setuptools.command.build_ext import build_ext
from pkg_resources import packaging  # type: ignore[attr-defined]

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 19 Column: 1

              from .file_baton import FileBaton
from ._cpp_extension_versioner import ExtensionVersioner
from .hipify import hipify_python
from .hipify.hipify_python import get_hip_file_path, GeneratedFileCleaner
from typing import List, Optional, Union

from setuptools.command.build_ext import build_ext
from pkg_resources import packaging  # type: ignore[attr-defined]


            

Reported by Pylint.

Module 'torch' has no 'version' member
Error

Line: 101 Column: 22

                          rocm_home = '/opt/rocm'
            if not os.path.exists(rocm_home):
                rocm_home = None
    if rocm_home and torch.version.hip is None:
        print(f"No ROCm runtime is found, using ROCM_HOME='{rocm_home}'")
    return rocm_home


def _join_rocm_home(*paths) -> str:

            

Reported by Pylint.

Module 'torch' has no 'version' member
Error

Line: 167 Column: 58

              '''
ROCM_HOME = _find_rocm_home()
MIOPEN_HOME = _join_rocm_home('miopen') if ROCM_HOME else None
IS_HIP_EXTENSION = True if ((ROCM_HOME is not None) and (torch.version.hip is not None)) else False
ROCM_VERSION = None
if torch.version.hip is not None:
    ROCM_VERSION = tuple(int(v) for v in torch.version.hip.split('.')[:2])

CUDA_HOME = _find_cuda_home()

            

Reported by Pylint.

Module 'torch' has no 'version' member
Error

Line: 169 Column: 4

              MIOPEN_HOME = _join_rocm_home('miopen') if ROCM_HOME else None
IS_HIP_EXTENSION = True if ((ROCM_HOME is not None) and (torch.version.hip is not None)) else False
ROCM_VERSION = None
if torch.version.hip is not None:
    ROCM_VERSION = tuple(int(v) for v in torch.version.hip.split('.')[:2])

CUDA_HOME = _find_cuda_home()
CUDNN_HOME = os.environ.get('CUDNN_HOME') or os.environ.get('CUDNN_PATH')
# PyTorch releases have the version pattern major.minor.patch, whereas when

            

Reported by Pylint.

Module 'torch' has no 'version' member
Error

Line: 170 Column: 42

              IS_HIP_EXTENSION = True if ((ROCM_HOME is not None) and (torch.version.hip is not None)) else False
ROCM_VERSION = None
if torch.version.hip is not None:
    ROCM_VERSION = tuple(int(v) for v in torch.version.hip.split('.')[:2])

CUDA_HOME = _find_cuda_home()
CUDNN_HOME = os.environ.get('CUDNN_HOME') or os.environ.get('CUDNN_PATH')
# PyTorch releases have the version pattern major.minor.patch, whereas when
# PyTorch is built from source, we append the git commit hash, which gives

            

Reported by Pylint.

Module 'torch' has no 'version' member
Error

Line: 216 Column: 56

              

def _is_binary_build() -> bool:
    return not BUILT_FROM_SOURCE_VERSION_PATTERN.match(torch.version.__version__)


def _accepted_compilers_for_platform() -> List[str]:
    # gnu-c++ and gnu-cc are the conda gcc compilers
    return ['clang++', 'clang'] if sys.platform.startswith('darwin') else ['g++', 'gcc', 'gnu-c++', 'gnu-cc']

            

Reported by Pylint.

benchmarks/fastrnns/factory.py
149 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch

from collections import namedtuple
from typing import List, Tuple
from torch import Tensor

from .cells import lstm_cell, premul_lstm_cell, premul_lstm_cell_no_bias, flat_lstm_cell



            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 5 Column: 1

              
from collections import namedtuple
from typing import List, Tuple
from torch import Tensor

from .cells import lstm_cell, premul_lstm_cell, premul_lstm_cell_no_bias, flat_lstm_cell


# list[list[T]] -> list[T]

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 7 Column: 1

              from typing import List, Tuple
from torch import Tensor

from .cells import lstm_cell, premul_lstm_cell, premul_lstm_cell_no_bias, flat_lstm_cell


# list[list[T]] -> list[T]
def flatten_list(lst):
    result = []

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 80 Column: 5

              
def lnlstm_creator(script=True, decompose_layernorm=False, **kwargs):
    assert script is True
    from .custom_lstms import script_lnlstm
    input_size = kwargs['inputSize']
    hidden_size = kwargs['hiddenSize']
    seq_len = kwargs['seqLength']
    batch_size = kwargs['miniBatch']
    ge = script_lnlstm(input_size, hidden_size, 1,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 102 Column: 5

              
def dropoutlstm_creator(script=True, **kwargs):
    assert script is True
    from .custom_lstms import script_lstm, LSTMState
    input_size = kwargs['inputSize']
    hidden_size = kwargs['hiddenSize']
    seq_len = kwargs['seqLength']
    batch_size = kwargs['miniBatch']
    num_layers = kwargs['numLayers']

            

Reported by Pylint.

String statement has no effect
Error

Line: 18 Column: 1

                  return result


'''
Define a creator as a function:
(options) -> (inputs, params, forward, backward_setup, backward)
inputs: the inputs to the returned 'forward'. One can call
    forward(*inputs) directly.
params: List[Tensor] all requires_grad=True parameters.

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 58 Column: 5

              

def pytorch_lstm_creator(**kwargs):
    input, hidden, _, module = lstm_inputs(return_module=True, **kwargs)
    return ModelDef(
        inputs=[input, hidden],
        params=flatten_list(module.all_weights),
        forward=module,
        backward_setup=lstm_backward_setup,

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 68 Column: 5

              

def lstm_creator(script=True, **kwargs):
    input, hidden, params, _ = lstm_inputs(return_module=False, **kwargs)
    inputs = [input, hidden] + params[0]
    return ModelDef(
        inputs=inputs,
        params=flatten_list(params),
        forward=lstm_factory(lstm_cell, script),

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 88 Column: 5

                  ge = script_lnlstm(input_size, hidden_size, 1,
                       decompose_layernorm=decompose_layernorm).cuda()

    input = torch.randn(seq_len, batch_size, input_size, device='cuda')
    states = [(torch.randn(batch_size, hidden_size, device='cuda'),
               torch.randn(batch_size, hidden_size, device='cuda'))]

    return ModelDef(
        inputs=[input, states],

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 110 Column: 5

                  num_layers = kwargs['numLayers']
    ge = script_lstm(input_size, hidden_size, num_layers, dropout=True).cuda()

    input = torch.randn(seq_len, batch_size, input_size, device='cuda')
    states = [LSTMState(torch.randn(batch_size, hidden_size, device='cuda'),
                        torch.randn(batch_size, hidden_size, device='cuda'))
              for _ in range(num_layers)]
    return ModelDef(
        inputs=[input, states],

            

Reported by Pylint.

test/test_sparse_csr.py
147 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch
import warnings
import unittest
import random
import itertools
from torch.testing._internal.common_utils import \
    (IS_MACOS, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, make_tensor)
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, dtypes, onlyCPU, onlyCUDA)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 6 Column: 1

              import unittest
import random
import itertools
from torch.testing._internal.common_utils import \
    (IS_MACOS, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, make_tensor)
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, dtypes, onlyCPU, onlyCUDA)

# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_device_type'
Error

Line: 8 Column: 1

              import itertools
from torch.testing._internal.common_utils import \
    (IS_MACOS, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, make_tensor)
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, dtypes, onlyCPU, onlyCUDA)

# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests

            

Reported by Pylint.

Access to member 'maxDiff' before its definition line 246
Error

Line: 245 Column: 24

                                  t.col_indices().device == t.values().device

    def test_sparse_csr_print(self, device):
        orig_maxDiff = self.maxDiff
        self.maxDiff = None
        shape_nnz = [
            ((10, 10), 10),
            ((100, 10), 10),
            ((1000, 10), 10)

            

Reported by Pylint.

Assigning the same variable 'load_tests' to itself
Error

Line: 13 Column: 1

              
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests


class TestSparseCSRSampler(TestCase):

    def test_make_crow_indices(self):

            

Reported by Pylint.

Access to a protected member _sparse_csr_tensor_unsafe of a client class
Error

Line: 90 Column: 60

                      self.assertEqual(torch.tensor([1, 2, 3, 4], dtype=dtype, device=device), sparse.values())

        # with size
        for sparse_csr_tensor in [torch.sparse_csr_tensor, torch._sparse_csr_tensor_unsafe]:
            sparse = sparse_csr_tensor([0, 2, 4],
                                       [0, 1, 0, 1],
                                       [1, 2, 3, 4],
                                       size=(2, 10),
                                       dtype=dtype,

            

Reported by Pylint.

Expression "t.crow_indices().dtype == index_dtype" is assigned to nothing
Error

Line: 238 Column: 21

                                                              device=device)
                    should_be_cuda = (device == 'cuda' or (device is None and values_device == 'cuda'))
                    self.assertEqual(should_be_cuda, t.is_cuda)
                    t.crow_indices().dtype == index_dtype
                    t.col_indices().dtype == index_dtype
                    t.values().dtype == dtype
                    t.crow_indices().device == t.values().device
                    t.col_indices().device == t.values().device


            

Reported by Pylint.

Expression "t.col_indices().dtype == index_dtype" is assigned to nothing
Error

Line: 239 Column: 21

                                  should_be_cuda = (device == 'cuda' or (device is None and values_device == 'cuda'))
                    self.assertEqual(should_be_cuda, t.is_cuda)
                    t.crow_indices().dtype == index_dtype
                    t.col_indices().dtype == index_dtype
                    t.values().dtype == dtype
                    t.crow_indices().device == t.values().device
                    t.col_indices().device == t.values().device

    def test_sparse_csr_print(self, device):

            

Reported by Pylint.

Expression "t.values().dtype == dtype" is assigned to nothing
Error

Line: 240 Column: 21

                                  self.assertEqual(should_be_cuda, t.is_cuda)
                    t.crow_indices().dtype == index_dtype
                    t.col_indices().dtype == index_dtype
                    t.values().dtype == dtype
                    t.crow_indices().device == t.values().device
                    t.col_indices().device == t.values().device

    def test_sparse_csr_print(self, device):
        orig_maxDiff = self.maxDiff

            

Reported by Pylint.

Expression "t.crow_indices().device == t.values().device" is assigned to nothing
Error

Line: 241 Column: 21

                                  t.crow_indices().dtype == index_dtype
                    t.col_indices().dtype == index_dtype
                    t.values().dtype == dtype
                    t.crow_indices().device == t.values().device
                    t.col_indices().device == t.values().device

    def test_sparse_csr_print(self, device):
        orig_maxDiff = self.maxDiff
        self.maxDiff = None

            

Reported by Pylint.

torch/distributions/transforms.py
146 issues
Module 'torch' has no 'zeros_like' member
Error

Line: 347 Column: 20

              
    def log_abs_det_jacobian(self, x, y):
        if not self.parts:
            return torch.zeros_like(x)

        # Compute intermediates. This will be free if parts[:-1] are all cached.
        xs = [x]
        for part in self.parts[:-1]:
            xs.append(part(xs[-1]))

            

Reported by Pylint.

Instance of 'ComposeTransform' has no 'domain' member
Error

Line: 356 Column: 21

                      xs.append(y)

        terms = []
        event_dim = self.domain.event_dim
        for part, x, y in zip(self.parts, xs[:-1], xs[1:]):
            terms.append(_sum_rightmost(part.log_abs_det_jacobian(x, y),
                                        event_dim - part.domain.event_dim))
            event_dim += part.codomain.event_dim - part.domain.event_dim
        return functools.reduce(operator.add, terms)

            

Reported by Pylint.

Instance of 'IndependentTransform' has no 'domain' member
Error

Line: 427 Column: 22

                      return self.base_transform.sign

    def _call(self, x):
        if x.dim() < self.domain.event_dim:
            raise ValueError("Too few dimensions on input")
        return self.base_transform(x)

    def _inverse(self, y):
        if y.dim() < self.codomain.event_dim:

            

Reported by Pylint.

Instance of 'IndependentTransform' has no 'codomain' member
Error

Line: 432 Column: 22

                      return self.base_transform(x)

    def _inverse(self, y):
        if y.dim() < self.codomain.event_dim:
            raise ValueError("Too few dimensions on input")
        return self.base_transform.inv(y)

    def log_abs_det_jacobian(self, x, y):
        result = self.base_transform.log_abs_det_jacobian(x, y)

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 465 Column: 25

                  bijective = True

    def __init__(self, in_shape, out_shape, cache_size=0):
        self.in_shape = torch.Size(in_shape)
        self.out_shape = torch.Size(out_shape)
        if self.in_shape.numel() != self.out_shape.numel():
            raise ValueError("in_shape, out_shape have different numbers of elements")
        super().__init__(cache_size=cache_size)


            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 466 Column: 26

              
    def __init__(self, in_shape, out_shape, cache_size=0):
        self.in_shape = torch.Size(in_shape)
        self.out_shape = torch.Size(out_shape)
        if self.in_shape.numel() != self.out_shape.numel():
            raise ValueError("in_shape, out_shape have different numbers of elements")
        super().__init__(cache_size=cache_size)

    @constraints.dependent_property

            

Reported by Pylint.

Module 'torch' has no 'finfo' member
Error

Line: 575 Column: 13

              

def _clipped_sigmoid(x):
    finfo = torch.finfo(x.dtype)
    return torch.clamp(torch.sigmoid(x), min=finfo.tiny, max=1. - finfo.eps)


class SigmoidTransform(Transform):
    r"""

            

Reported by Pylint.

Module 'torch' has no 'sigmoid' member
Error

Line: 576 Column: 24

              
def _clipped_sigmoid(x):
    finfo = torch.finfo(x.dtype)
    return torch.clamp(torch.sigmoid(x), min=finfo.tiny, max=1. - finfo.eps)


class SigmoidTransform(Transform):
    r"""
    Transform via the mapping :math:`y = \frac{1}{1 + \exp(-x)}` and :math:`x = \text{logit}(y)`.

            

Reported by Pylint.

Module 'torch' has no 'clamp' member
Error

Line: 576 Column: 12

              
def _clipped_sigmoid(x):
    finfo = torch.finfo(x.dtype)
    return torch.clamp(torch.sigmoid(x), min=finfo.tiny, max=1. - finfo.eps)


class SigmoidTransform(Transform):
    r"""
    Transform via the mapping :math:`y = \frac{1}{1 + \exp(-x)}` and :math:`x = \text{logit}(y)`.

            

Reported by Pylint.

Module 'torch' has no 'finfo' member
Error

Line: 595 Column: 17

                      return _clipped_sigmoid(x)

    def _inverse(self, y):
        finfo = torch.finfo(y.dtype)
        y = y.clamp(min=finfo.tiny, max=1. - finfo.eps)
        return y.log() - (-y).log1p()

    def log_abs_det_jacobian(self, x, y):
        return -F.softplus(-x) - F.softplus(x)

            

Reported by Pylint.

android/pytorch_android/generate_test_torchscripts.py
146 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch

OUTPUT_DIR = "src/androidTest/assets/"

def scriptAndSave(module, fileName):
    print('-' * 80)
    script_module = torch.jit.script(module)
    print(script_module.graph)
    outputFileName = OUTPUT_DIR + fileName

            

Reported by Pylint.

Undefined variable 'Tensor'
Error

Line: 39 Column: 42

                      return input

    @torch.jit.script_method
    def eqTensor(self, input: Tensor) -> Tensor:
        return input

    @torch.jit.script_method
    def eqDictStrKeyIntValue(self, input: Dict[str, int]) -> Dict[str, int]:
        return input

            

Reported by Pylint.

Undefined variable 'Tensor'
Error

Line: 39 Column: 31

                      return input

    @torch.jit.script_method
    def eqTensor(self, input: Tensor) -> Tensor:
        return input

    @torch.jit.script_method
    def eqDictStrKeyIntValue(self, input: Dict[str, int]) -> Dict[str, int]:
        return input

            

Reported by Pylint.

Undefined variable 'Dict'
Error

Line: 43 Column: 62

                      return input

    @torch.jit.script_method
    def eqDictStrKeyIntValue(self, input: Dict[str, int]) -> Dict[str, int]:
        return input

    @torch.jit.script_method
    def eqDictIntKeyIntValue(self, input: Dict[int, int]) -> Dict[int, int]:
        return input

            

Reported by Pylint.

Undefined variable 'Dict'
Error

Line: 43 Column: 43

                      return input

    @torch.jit.script_method
    def eqDictStrKeyIntValue(self, input: Dict[str, int]) -> Dict[str, int]:
        return input

    @torch.jit.script_method
    def eqDictIntKeyIntValue(self, input: Dict[int, int]) -> Dict[int, int]:
        return input

            

Reported by Pylint.

Undefined variable 'Dict'
Error

Line: 47 Column: 62

                      return input

    @torch.jit.script_method
    def eqDictIntKeyIntValue(self, input: Dict[int, int]) -> Dict[int, int]:
        return input

    @torch.jit.script_method
    def eqDictFloatKeyIntValue(self, input: Dict[float, int]) -> Dict[float, int]:
        return input

            

Reported by Pylint.

Undefined variable 'Dict'
Error

Line: 47 Column: 43

                      return input

    @torch.jit.script_method
    def eqDictIntKeyIntValue(self, input: Dict[int, int]) -> Dict[int, int]:
        return input

    @torch.jit.script_method
    def eqDictFloatKeyIntValue(self, input: Dict[float, int]) -> Dict[float, int]:
        return input

            

Reported by Pylint.

Undefined variable 'Dict'
Error

Line: 51 Column: 66

                      return input

    @torch.jit.script_method
    def eqDictFloatKeyIntValue(self, input: Dict[float, int]) -> Dict[float, int]:
        return input

    @torch.jit.script_method
    def listIntSumReturnTuple(self, input: List[int]) -> Tuple[List[int], int]:
        sum = 0

            

Reported by Pylint.

Undefined variable 'Dict'
Error

Line: 51 Column: 45

                      return input

    @torch.jit.script_method
    def eqDictFloatKeyIntValue(self, input: Dict[float, int]) -> Dict[float, int]:
        return input

    @torch.jit.script_method
    def listIntSumReturnTuple(self, input: List[int]) -> Tuple[List[int], int]:
        sum = 0

            

Reported by Pylint.

Undefined variable 'Tuple'
Error

Line: 55 Column: 58

                      return input

    @torch.jit.script_method
    def listIntSumReturnTuple(self, input: List[int]) -> Tuple[List[int], int]:
        sum = 0
        for x in input:
            sum += x
        return (input, sum)


            

Reported by Pylint.

torch/nn/modules/loss.py
145 issues
Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import warnings

from .distance import PairwiseDistance
from .module import Module
from .. import functional as F
from .. import _reduction as _Reduction

from torch import Tensor
from typing import Callable, Optional

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 4 Column: 1

              import warnings

from .distance import PairwiseDistance
from .module import Module
from .. import functional as F
from .. import _reduction as _Reduction

from torch import Tensor
from typing import Callable, Optional

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 5 Column: 1

              
from .distance import PairwiseDistance
from .module import Module
from .. import functional as F
from .. import _reduction as _Reduction

from torch import Tensor
from typing import Callable, Optional


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 6 Column: 1

              from .distance import PairwiseDistance
from .module import Module
from .. import functional as F
from .. import _reduction as _Reduction

from torch import Tensor
from typing import Callable, Optional



            

Reported by Pylint.

Useless super delegation in method '__init__'
Error

Line: 92 Column: 5

                  """
    __constants__ = ['reduction']

    def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
        super(L1Loss, self).__init__(size_average, reduce, reduction)

    def forward(self, input: Tensor, target: Tensor) -> Tensor:
        return F.l1_loss(input, target, reduction=self.reduction)


            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 95 Column: 23

                  def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
        super(L1Loss, self).__init__(size_average, reduce, reduction)

    def forward(self, input: Tensor, target: Tensor) -> Tensor:
        return F.l1_loss(input, target, reduction=self.reduction)


class NLLLoss(_WeightedLoss):
    r"""The negative log likelihood loss. It is useful to train a classification

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 209 Column: 23

                      super(NLLLoss, self).__init__(weight, size_average, reduce, reduction)
        self.ignore_index = ignore_index

    def forward(self, input: Tensor, target: Tensor) -> Tensor:
        return F.nll_loss(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)


class NLLLoss2d(NLLLoss):
    def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 370 Column: 23

                      self.full = full
        self.eps = eps

    def forward(self, input: Tensor, target: Tensor, var: Tensor) -> Tensor:
        return F.gaussian_nll_loss(input, target, var, full=self.full, eps=self.eps, reduction=self.reduction)


class KLDivLoss(_Loss):
    r"""The Kullback-Leibler divergence loss measure

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 453 Column: 23

                      super(KLDivLoss, self).__init__(size_average, reduce, reduction)
        self.log_target = log_target

    def forward(self, input: Tensor, target: Tensor) -> Tensor:
        return F.kl_div(input, target, reduction=self.reduction, log_target=self.log_target)


class MSELoss(_Loss):
    r"""Creates a criterion that measures the mean squared error (squared L2 norm) between

            

Reported by Pylint.

Useless super delegation in method '__init__'
Error

Line: 515 Column: 5

                  """
    __constants__ = ['reduction']

    def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
        super(MSELoss, self).__init__(size_average, reduce, reduction)

    def forward(self, input: Tensor, target: Tensor) -> Tensor:
        return F.mse_loss(input, target, reduction=self.reduction)


            

Reported by Pylint.