The following issues were found

test/jit/test_complexity.py
29 issues
Unable to import 'torch'
Error

Line: 5 Column: 1

              import sys
import unittest

import torch

# as with test_jit tests, requires global dtype set
torch.set_default_dtype(torch.double)

# Make the helper files in test/ importable

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 13 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
    get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE



            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_metaprogramming_utils'
Error

Line: 14 Column: 1

              pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
    get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE


def num_ifs_loops(graph):

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 16 Column: 1

              from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
    get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE


def num_ifs_loops(graph):
    graph_str = str(graph)
    # only look at body of graph

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import os
import sys
import unittest

import torch

# as with test_jit tests, requires global dtype set
torch.set_default_dtype(torch.double)


            

Reported by Pylint.

Import "from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode" should be placed at the top of the module
Error

Line: 13 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
    get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE



            

Reported by Pylint.

Import "from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs" should be placed at the top of the module
Error

Line: 14 Column: 1

              pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
    get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE


def num_ifs_loops(graph):

            

Reported by Pylint.

Line too long (106/100)
Error

Line: 14 Column: 1

              pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
    get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE


def num_ifs_loops(graph):

            

Reported by Pylint.

Line too long (112/100)
Error

Line: 15 Column: 1

              sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
    get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE


def num_ifs_loops(graph):
    graph_str = str(graph)

            

Reported by Pylint.

Import "from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE" should be placed at the top of the module
Error

Line: 16 Column: 1

              from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
    get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE


def num_ifs_loops(graph):
    graph_str = str(graph)
    # only look at body of graph

            

Reported by Pylint.

tools/setup_helpers/generate_code.py
29 issues
TODO: This is a little inaccurate, because it will also pick
Error

Line: 18 Column: 3

              DECLARATIONS_PATH = 'torch/share/ATen/Declarations.yaml'
NATIVE_FUNCTIONS_PATH = 'aten/src/ATen/native/native_functions.yaml'

# TODO: This is a little inaccurate, because it will also pick
# up setup_helper scripts which don't affect code generation
def all_generator_source() -> List[str]:
    r = []
    for directory, _, filenames in os.walk('tools'):
        for f in filenames:

            

Reported by Pylint.

Unused argument 'ninja_global'
Error

Line: 30 Column: 19

                  return sorted(r)


def generate_code(ninja_global: Optional[str] = None,
                  declarations_path: Optional[str] = None,
                  nn_path: Optional[str] = None,
                  native_functions_path: Optional[str] = None,
                  install_dir: Optional[str] = None,
                  subset: Optional[str] = None,

            

Reported by Pylint.

Unused argument 'nn_path'
Error

Line: 32 Column: 19

              
def generate_code(ninja_global: Optional[str] = None,
                  declarations_path: Optional[str] = None,
                  nn_path: Optional[str] = None,
                  native_functions_path: Optional[str] = None,
                  install_dir: Optional[str] = None,
                  subset: Optional[str] = None,
                  disable_autograd: bool = False,
                  force_schema_registration: bool = False,

            

Reported by Pylint.

Unused argument 'force_schema_registration'
Error

Line: 37 Column: 19

                                install_dir: Optional[str] = None,
                  subset: Optional[str] = None,
                  disable_autograd: bool = False,
                  force_schema_registration: bool = False,
                  operator_selector: Any = None) -> None:
    from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python
    from tools.autograd.gen_annotated_fn_args import gen_annotated
    from tools.codegen.selective_build.selector import SelectiveBuilder


            

Reported by Pylint.

Unused variable 'tools_jit_templates'
Error

Line: 58 Column: 5

                  runfiles_dir = os.environ.get("RUNFILES_DIR", None)
    data_dir = os.path.join(runfiles_dir, 'pytorch') if runfiles_dir else ''
    autograd_dir = os.path.join(data_dir, 'tools', 'autograd')
    tools_jit_templates = os.path.join(data_dir, 'tools', 'jit', 'templates')

    if subset == "pybindings" or not subset:
        gen_autograd_python(
            declarations_path or DECLARATIONS_PATH,
            native_functions_path or NATIVE_FUNCTIONS_PATH,

            

Reported by Pylint.

Use of unsafe yaml load. Allows instantiation of arbitrary objects. Consider yaml.safe_load().
Security criptography

Line: 95
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b506_yaml_load.html

                      # strip out the overload part
        # It's only for legacy config - do NOT copy this code!
        selected_op_list = {
            opname.split('.', 1)[0] for opname in yaml.load(f, Loader=YamlLoader)
        }

    # Internal build doesn't use this flag any more. Only used by OSS
    # build now. Every operator should be considered a root operator
    # (hence generating unboxing code for it, which is consistent with

            

Reported by Bandit.

Missing module docstring
Error

Line: 1 Column: 1

              import argparse
import os
import sys
import yaml
from typing import Any, List, Optional, cast

try:
    # use faster C loader if available
    from yaml import CSafeLoader as YamlLoader

            

Reported by Pylint.

standard import "from typing import Any, List, Optional, cast" should be placed before "import yaml"
Error

Line: 5 Column: 1

              import os
import sys
import yaml
from typing import Any, List, Optional, cast

try:
    # use faster C loader if available
    from yaml import CSafeLoader as YamlLoader
except ImportError:

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 20 Column: 1

              
# TODO: This is a little inaccurate, because it will also pick
# up setup_helper scripts which don't affect code generation
def all_generator_source() -> List[str]:
    r = []
    for directory, _, filenames in os.walk('tools'):
        for f in filenames:
            if os.path.splitext(f)[1] in source_files:
                full = os.path.join(directory, f)

            

Reported by Pylint.

Variable name "r" doesn't conform to snake_case naming style
Error

Line: 21 Column: 5

              # TODO: This is a little inaccurate, because it will also pick
# up setup_helper scripts which don't affect code generation
def all_generator_source() -> List[str]:
    r = []
    for directory, _, filenames in os.walk('tools'):
        for f in filenames:
            if os.path.splitext(f)[1] in source_files:
                full = os.path.join(directory, f)
                r.append(full)

            

Reported by Pylint.

caffe2/python/operator_test/learning_rate_adaption_op_test.py
29 issues
Unable to import 'hypothesis'
Error

Line: 10 Column: 1

              import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial

from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np


class TestLearningRateAdaption(serial.SerializedTestCase):

            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 11 Column: 1

              import caffe2.python.serialized_test.serialized_test_util as serial

from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np


class TestLearningRateAdaption(serial.SerializedTestCase):
    @given(inputs=hu.tensors(n=2),

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 24 Column: 58

                         **hu.gcs_cpu_only)
    @settings(deadline=None, max_examples=50)
    def test_learning_rate_adaption_op_normalization(self, inputs, lr, lr_alpha,
                                                     gc, dc):
        grad, effgrad = inputs
        lr = np.array([lr], dtype=np.float32)

        op = core.CreateOperator(
            'LearningRateAdaption',

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 59 Column: 76

                                         allow_nan=False, allow_infinity=False),
           **hu.gcs_cpu_only)
    def test_learning_rate_adaption_op_without_normalization(self, inputs, lr,
                                                             lr_alpha, gc, dc):
        grad, effgrad = inputs
        lr = np.array([lr], dtype=np.float32)

        op = core.CreateOperator(
            'LearningRateAdaption',

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              




from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial


            

Reported by Pylint.

Missing class docstring
Error

Line: 15 Column: 1

              import numpy as np


class TestLearningRateAdaption(serial.SerializedTestCase):
    @given(inputs=hu.tensors(n=2),
           lr=st.floats(min_value=0.01, max_value=0.99,
                        allow_nan=False, allow_infinity=False),
           lr_alpha=st.floats(min_value=0.01, max_value=0.99,
                           allow_nan=False, allow_infinity=False),

            

Reported by Pylint.

Too many arguments (6/5)
Error

Line: 23 Column: 5

                                         allow_nan=False, allow_infinity=False),
           **hu.gcs_cpu_only)
    @settings(deadline=None, max_examples=50)
    def test_learning_rate_adaption_op_normalization(self, inputs, lr, lr_alpha,
                                                     gc, dc):
        grad, effgrad = inputs
        lr = np.array([lr], dtype=np.float32)

        op = core.CreateOperator(

            

Reported by Pylint.

Argument name "lr" doesn't conform to snake_case naming style
Error

Line: 23 Column: 5

                                         allow_nan=False, allow_infinity=False),
           **hu.gcs_cpu_only)
    @settings(deadline=None, max_examples=50)
    def test_learning_rate_adaption_op_normalization(self, inputs, lr, lr_alpha,
                                                     gc, dc):
        grad, effgrad = inputs
        lr = np.array([lr], dtype=np.float32)

        op = core.CreateOperator(

            

Reported by Pylint.

Argument name "gc" doesn't conform to snake_case naming style
Error

Line: 23 Column: 5

                                         allow_nan=False, allow_infinity=False),
           **hu.gcs_cpu_only)
    @settings(deadline=None, max_examples=50)
    def test_learning_rate_adaption_op_normalization(self, inputs, lr, lr_alpha,
                                                     gc, dc):
        grad, effgrad = inputs
        lr = np.array([lr], dtype=np.float32)

        op = core.CreateOperator(

            

Reported by Pylint.

Argument name "dc" doesn't conform to snake_case naming style
Error

Line: 23 Column: 5

                                         allow_nan=False, allow_infinity=False),
           **hu.gcs_cpu_only)
    @settings(deadline=None, max_examples=50)
    def test_learning_rate_adaption_op_normalization(self, inputs, lr, lr_alpha,
                                                     gc, dc):
        grad, effgrad = inputs
        lr = np.array([lr], dtype=np.float32)

        op = core.CreateOperator(

            

Reported by Pylint.

caffe2/quantization/server/pool_dnnlowp_op_test.py
29 issues
Unable to import 'hypothesis.strategies'
Error

Line: 6 Column: 1

              import collections

import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import assume, given


            

Reported by Pylint.

Unable to import 'hypothesis'
Error

Line: 10 Column: 1

              import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import assume, given


dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])


            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 40 Column: 9

                      order,
        in_quantized,
        gc,
        dc,
    ):
        assume(kernel <= size)
        assume(pad < kernel)

        C = input_channels

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 128 Column: 9

                      order,
        in_quantized,
        gc,
        dc,
    ):
        kernel = 2  # Only kernel size 2 is supported
        assume(kernel <= size)
        assume(pad < kernel)


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              

import collections

import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close

            

Reported by Pylint.

Missing class docstring
Error

Line: 17 Column: 1

              workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])


class DNNLowPOpPoolTest(hu.HypothesisTestCase):
    @given(
        stride=st.integers(1, 3),
        pad=st.integers(0, 3),
        kernel=st.integers(1, 5),
        size=st.integers(1, 20),

            

Reported by Pylint.

Argument name "dc" doesn't conform to snake_case naming style
Error

Line: 28 Column: 5

                      order=st.sampled_from(["NCHW", "NHWC"]),
        in_quantized=st.booleans(),
        **hu.gcs_cpu_only
    )
    def test_dnnlowp_max_pool(
        self,
        stride,
        pad,
        kernel,

            

Reported by Pylint.

Argument name "gc" doesn't conform to snake_case naming style
Error

Line: 28 Column: 5

                      order=st.sampled_from(["NCHW", "NHWC"]),
        in_quantized=st.booleans(),
        **hu.gcs_cpu_only
    )
    def test_dnnlowp_max_pool(
        self,
        stride,
        pad,
        kernel,

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 28 Column: 5

                      order=st.sampled_from(["NCHW", "NHWC"]),
        in_quantized=st.booleans(),
        **hu.gcs_cpu_only
    )
    def test_dnnlowp_max_pool(
        self,
        stride,
        pad,
        kernel,

            

Reported by Pylint.

Too many arguments (11/5)
Error

Line: 28 Column: 5

                      order=st.sampled_from(["NCHW", "NHWC"]),
        in_quantized=st.booleans(),
        **hu.gcs_cpu_only
    )
    def test_dnnlowp_max_pool(
        self,
        stride,
        pad,
        kernel,

            

Reported by Pylint.

torch/nn/modules/instancenorm.py
29 issues
Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              from torch import Tensor

from .batchnorm import _LazyNormBase, _NormBase
from .. import functional as F


class _InstanceNorm(_NormBase):
    def __init__(
        self,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 4 Column: 1

              from torch import Tensor

from .batchnorm import _LazyNormBase, _NormBase
from .. import functional as F


class _InstanceNorm(_NormBase):
    def __init__(
        self,

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 22 Column: 32

                      super(_InstanceNorm, self).__init__(
            num_features, eps, momentum, affine, track_running_stats, **factory_kwargs)

    def _check_input_dim(self, input):
        raise NotImplementedError

    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
                              missing_keys, unexpected_keys, error_msgs):
        version = local_metadata.get('version', None)

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 55 Column: 23

                          state_dict, prefix, local_metadata, strict,
            missing_keys, unexpected_keys, error_msgs)

    def forward(self, input: Tensor) -> Tensor:
        self._check_input_dim(input)
        return F.instance_norm(
            input, self.running_mean, self.running_var, self.weight, self.bias,
            self.training or not self.track_running_stats, self.momentum, self.eps)


            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 130 Column: 32

                      >>> output = m(input)
    """

    def _check_input_dim(self, input):
        if input.dim() == 2:
            raise ValueError(
                'InstanceNorm1d returns 0-filled tensor to 2D tensor.'
                'This is because InstanceNorm1d reshapes inputs to'
                '(1, N * C, ...) from (N, C,...) and this makes'

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 169 Column: 32

              
    cls_to_become = InstanceNorm1d  # type: ignore[assignment]

    def _check_input_dim(self, input):
        if input.dim() == 2:
            raise ValueError(
                'InstanceNorm1d returns 0-filled tensor to 2D tensor.'
                'This is because InstanceNorm1d reshapes inputs to'
                '(1, N * C, ...) from (N, C,...) and this makes'

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 250 Column: 32

                      >>> output = m(input)
    """

    def _check_input_dim(self, input):
        if input.dim() != 4:
            raise ValueError('expected 4D input (got {}D input)'
                             .format(input.dim()))



            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 282 Column: 32

              
    cls_to_become = InstanceNorm2d  # type: ignore[assignment]

    def _check_input_dim(self, input):
        if input.dim() != 4:
            raise ValueError("expected 4D input (got {}D input)".format(input.dim()))


class InstanceNorm3d(_InstanceNorm):

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 355 Column: 32

                      >>> output = m(input)
    """

    def _check_input_dim(self, input):
        if input.dim() != 5:
            raise ValueError('expected 5D input (got {}D input)'
                             .format(input.dim()))



            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 387 Column: 32

              
    cls_to_become = InstanceNorm3d  # type: ignore[assignment]

    def _check_input_dim(self, input):
        if input.dim() != 5:
            raise ValueError("expected 5D input (got {}D input)".format(input.dim()))

            

Reported by Pylint.

tools/autograd/gen_autograd_functions.py
29 issues
Attempted relative import beyond top-level package
Error

Line: 7 Column: 1

              #  Functions.h/cpp: subclasses of autograd::Node
#  python_functions.h/cpp: Python bindings for the above classes
#
from .gen_inplace_or_view_type import VIEW_FUNCTIONS

from typing import List, Sequence, Tuple

from tools.codegen.api.autograd import (Derivative, DifferentiabilityInfo,
                                        SavedAttribute, uses_retain_variables,

            

Reported by Pylint.

TODO: This is probably not exhaustive, but it's a start
Error

Line: 292 Column: 3

              # VIEW_FUNCTIONS are not traceable because they use as_strided, which
# has an untraceable backwards, see
# https://github.com/pytorch/pytorch/issues/4250
# TODO: This is probably not exhaustive, but it's a start
UNTRACEABLE_FUNCTIONS = VIEW_FUNCTIONS

def gen_autograd_functions_lib(
    out: str,
    differentiability_infos: Sequence[DifferentiabilityInfo],

            

Reported by Pylint.

Cell variable fname defined in loop
Error

Line: 316 Column: 79

                  for suffix in ['.h', '.cpp']:
        fname = file_basename + suffix
        fm.write_with_template(fname, fname, lambda: {
            'generated_comment': '@' + f'generated from {fm.template_dir}/' + fname,
            'autograd_function_declarations': declarations,
            'autograd_function_definitions': definitions,
        })

def gen_autograd_functions_python(

            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 377 Column: 9

              
    def save_var(var: SavedAttribute, is_output: bool) -> None:
        name = var.nctype.name
        type = var.nctype.type
        should_append_getsetdef = True
        should_append_raw_getsetdef = False

        if type == BaseCType(tensorT) or type == OptionalCType(BaseCType(tensorT)) or \
                type == MutRefCType(OptionalCType(BaseCType(tensorT))) or \

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # Generates C++ autograd functions for the derivatives of ATen operations
#
# This writes two files:
#  Functions.h/cpp: subclasses of autograd::Node
#  python_functions.h/cpp: Python bindings for the above classes
#
from .gen_inplace_or_view_type import VIEW_FUNCTIONS

from typing import List, Sequence, Tuple

            

Reported by Pylint.

standard import "from typing import List, Sequence, Tuple" should be placed before "from .gen_inplace_or_view_type import VIEW_FUNCTIONS"
Error

Line: 9 Column: 1

              #
from .gen_inplace_or_view_type import VIEW_FUNCTIONS

from typing import List, Sequence, Tuple

from tools.codegen.api.autograd import (Derivative, DifferentiabilityInfo,
                                        SavedAttribute, uses_retain_variables,
                                        uses_single_grad)
from tools.codegen.api.types import (Binding, BaseCType, OptionalCType, tensorT, intT,

            

Reported by Pylint.

third party import "from tools.codegen.api.autograd import Derivative, DifferentiabilityInfo, SavedAttribute, uses_retain_variables, uses_single_grad" should be placed before "from .gen_inplace_or_view_type import VIEW_FUNCTIONS"
Error

Line: 11 Column: 1

              
from typing import List, Sequence, Tuple

from tools.codegen.api.autograd import (Derivative, DifferentiabilityInfo,
                                        SavedAttribute, uses_retain_variables,
                                        uses_single_grad)
from tools.codegen.api.types import (Binding, BaseCType, OptionalCType, tensorT, intT,
                                     doubleT, scalarT, stringT, boolT, intArrayRefT,
                                     tensorListT, MutRefCType, ListCType, ArrayRefCType)

            

Reported by Pylint.

third party import "from tools.codegen.api.types import Binding, BaseCType, OptionalCType, tensorT, intT, doubleT, scalarT, stringT, boolT, intArrayRefT, tensorListT, MutRefCType, ListCType, ArrayRefCType" should be placed before "from .gen_inplace_or_view_type import VIEW_FUNCTIONS"
Error

Line: 14 Column: 1

              from tools.codegen.api.autograd import (Derivative, DifferentiabilityInfo,
                                        SavedAttribute, uses_retain_variables,
                                        uses_single_grad)
from tools.codegen.api.types import (Binding, BaseCType, OptionalCType, tensorT, intT,
                                     doubleT, scalarT, stringT, boolT, intArrayRefT,
                                     tensorListT, MutRefCType, ListCType, ArrayRefCType)
from tools.codegen.code_template import CodeTemplate
from tools.codegen.gen import FileManager
from tools.codegen.model import Argument

            

Reported by Pylint.

third party import "from tools.codegen.code_template import CodeTemplate" should be placed before "from .gen_inplace_or_view_type import VIEW_FUNCTIONS"
Error

Line: 17 Column: 1

              from tools.codegen.api.types import (Binding, BaseCType, OptionalCType, tensorT, intT,
                                     doubleT, scalarT, stringT, boolT, intArrayRefT,
                                     tensorListT, MutRefCType, ListCType, ArrayRefCType)
from tools.codegen.code_template import CodeTemplate
from tools.codegen.gen import FileManager
from tools.codegen.model import Argument

FUNCTION_DECLARATION = CodeTemplate("""\
struct TORCH_API ${op} : public ${superclass} {

            

Reported by Pylint.

third party import "from tools.codegen.gen import FileManager" should be placed before "from .gen_inplace_or_view_type import VIEW_FUNCTIONS"
Error

Line: 18 Column: 1

                                                   doubleT, scalarT, stringT, boolT, intArrayRefT,
                                     tensorListT, MutRefCType, ListCType, ArrayRefCType)
from tools.codegen.code_template import CodeTemplate
from tools.codegen.gen import FileManager
from tools.codegen.model import Argument

FUNCTION_DECLARATION = CodeTemplate("""\
struct TORCH_API ${op} : public ${superclass} {
  using ${superclass}::${superclass};

            

Reported by Pylint.

torch/jit/__init__.py
29 issues
Access to a protected member _c of a client class
Error

Line: 67 Column: 37

                      have a LiteScriptModule and want to get the currently present
        list of ops call _export_operator_list instead.
    """
    return torch._C._export_opnames(m._c)


# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")

            

Reported by Pylint.

Access to a protected member _export_opnames of a client class
Error

Line: 67 Column: 12

                      have a LiteScriptModule and want to get the currently present
        list of ops call _export_operator_list instead.
    """
    return torch._C._export_opnames(m._c)


# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 67 Column: 12

                      have a LiteScriptModule and want to get the currently present
        list of ops call _export_operator_list instead.
    """
    return torch._C._export_opnames(m._c)


# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 71 Column: 9

              

# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")
# This is not perfect but works in common cases
Error.__name__ = "Error"
Error.__qualname__ = "Error"


            

Reported by Pylint.

Unused argument 'the_type'
Error

Line: 78 Column: 14

              Error.__qualname__ = "Error"

# for use in python if using annotate
def annotate(the_type, the_value):
    """
    This method is a pass-through function that returns `the_value`, used to hint TorchScript
    compiler the type of `the_value`. It is a no-op when running outside of TorchScript.

    Though TorchScript can infer correct type for most Python expressions, there are some cases where

            

Reported by Pylint.

Redefining built-in 'isinstance'
Error

Line: 147 Column: 1

              

# for torch.jit.isinstance
def isinstance(obj, target_type):
    """
    This function provides for conatiner type refinement in TorchScript. It can refine
    parameterized containers of the List, Dict, Tuple, and Optional types. E.g. ``List[str]``,
    ``Dict[str, List[torch.Tensor]]``, ``Optional[Tuple[int,str,int]]``. It can also
    refine basic types such as bools and ints that are available in TorchScript.

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 194 Column: 32

              # Graph class, so mypy checks need to be skipped.
@contextmanager
def _hide_source_ranges() -> Iterator[None]:
    old_enable_source_ranges = torch._C.Graph.global_print_source_ranges  # type: ignore[attr-defined]
    try:
        torch._C.Graph.set_global_print_source_ranges(False)  # type: ignore[attr-defined]
        yield
    finally:
        torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges)  # type: ignore[attr-defined]

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 196 Column: 9

              def _hide_source_ranges() -> Iterator[None]:
    old_enable_source_ranges = torch._C.Graph.global_print_source_ranges  # type: ignore[attr-defined]
    try:
        torch._C.Graph.set_global_print_source_ranges(False)  # type: ignore[attr-defined]
        yield
    finally:
        torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges)  # type: ignore[attr-defined]



            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 199 Column: 9

                      torch._C.Graph.set_global_print_source_ranges(False)  # type: ignore[attr-defined]
        yield
    finally:
        torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges)  # type: ignore[attr-defined]


if not torch._C._jit_init():
    raise RuntimeError("JIT initialization failed")

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 202 Column: 8

                      torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges)  # type: ignore[attr-defined]


if not torch._C._jit_init():
    raise RuntimeError("JIT initialization failed")

            

Reported by Pylint.

test/jit/test_backend_nnapi.py
29 issues
Unable to import 'torch'
Error

Line: 5 Column: 1

              import sys
import unittest

import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN


            

Reported by Pylint.

Unable to import 'torch._C'
Error

Line: 6 Column: 1

              import unittest

import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN

# Make the helper files in test/ importable

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 9 Column: 1

              import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)


            

Reported by Pylint.

String statement has no effect
Error

Line: 22 Column: 1

                      "instead."
    )

"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.

            

Reported by Pylint.

Access to a protected member _jit_to_backend of a client class
Error

Line: 53 Column: 16

                  # Override
    def call_lowering_to_nnapi(self, traced_module, args):
        compile_spec = {"forward": {"inputs": args}}
        return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)

    def test_tensor_input(self):
        # Lower a simple module
        args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
        module = torch.nn.PReLU()

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 53 Column: 16

                  # Override
    def call_lowering_to_nnapi(self, traced_module, args):
        compile_spec = {"forward": {"inputs": args}}
        return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)

    def test_tensor_input(self):
        # Lower a simple module
        args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
        module = torch.nn.PReLU()

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 81 Column: 13

                      # No forward key
        compile_spec = {"backward": {"inputs": args}}
        with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
            torch._C._jit_to_backend("nnapi", traced, compile_spec)

        # No dictionary under the forward key
        compile_spec = {"forward": 1}
        with self.assertRaisesRegex(RuntimeError,
                                    "method_compile_spec does not contain a dictionary with an \"inputs\" key, "

            

Reported by Pylint.

Access to a protected member _jit_to_backend of a client class
Error

Line: 81 Column: 13

                      # No forward key
        compile_spec = {"backward": {"inputs": args}}
        with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
            torch._C._jit_to_backend("nnapi", traced, compile_spec)

        # No dictionary under the forward key
        compile_spec = {"forward": 1}
        with self.assertRaisesRegex(RuntimeError,
                                    "method_compile_spec does not contain a dictionary with an \"inputs\" key, "

            

Reported by Pylint.

Access to a protected member _jit_to_backend of a client class
Error

Line: 89 Column: 13

                                                  "method_compile_spec does not contain a dictionary with an \"inputs\" key, "
                                    "under it's \"forward\" key."
                                    + errorMsgTail):
            torch._C._jit_to_backend("nnapi", traced, compile_spec)

        # No inputs key (in the dictionary under the forward key)
        compile_spec = {"forward": {"not inputs": args}}
        with self.assertRaisesRegex(RuntimeError,
                                    "method_compile_spec does not contain a dictionary with an \"inputs\" key, "

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 89 Column: 13

                                                  "method_compile_spec does not contain a dictionary with an \"inputs\" key, "
                                    "under it's \"forward\" key."
                                    + errorMsgTail):
            torch._C._jit_to_backend("nnapi", traced, compile_spec)

        # No inputs key (in the dictionary under the forward key)
        compile_spec = {"forward": {"not inputs": args}}
        with self.assertRaisesRegex(RuntimeError,
                                    "method_compile_spec does not contain a dictionary with an \"inputs\" key, "

            

Reported by Pylint.

torch/fx/passes/split_module.py
29 issues
Unused argument 'root_m'
Error

Line: 28 Column: 5

              # Creates subgraphs out of main graph
def split_module(
    m: GraphModule,
    root_m: torch.nn.Module,
    split_callback: Callable[[torch.fx.node.Node], int],
):
    partitions: Dict[str, Partition] = {}
    orig_nodes: Dict[str, torch.fx.node.Node] = {}


            

Reported by Pylint.

TODO currently placeholders/parameters aren't put into random partitions,
Error

Line: 54 Column: 3

                  for node in m.graph.nodes:
        orig_nodes[node.name] = node

        # TODO currently placeholders/parameters aren't put into random partitions,
        # rather they're added to the graphs where they are used down below
        if node.op in ["placeholder", "get_attr"]:
            continue
        if node.op == 'output':
            torch.fx.graph.map_arg(node.args[0], lambda n: record_cross_partition_use(n, None))

            

Reported by Pylint.

Access to a protected member _fx_partition of a client class
Error

Line: 69 Column: 9

                          partitions[partition_name] = partition = Partition(partition_name)

        partition.node_names.append(node.name)
        node._fx_partition = partition_name

        torch.fx.graph.map_arg(node.args, lambda def_node: record_cross_partition_use(def_node, node))
        torch.fx.graph.map_arg(node.kwargs, lambda def_node: record_cross_partition_use(def_node, node))  # noqa: B950

    # find partitions with no dependencies

            

Reported by Pylint.

Cell variable node defined in loop
Error

Line: 71 Column: 97

                      partition.node_names.append(node.name)
        node._fx_partition = partition_name

        torch.fx.graph.map_arg(node.args, lambda def_node: record_cross_partition_use(def_node, node))
        torch.fx.graph.map_arg(node.kwargs, lambda def_node: record_cross_partition_use(def_node, node))  # noqa: B950

    # find partitions with no dependencies
    root_partitions : List[str] = []
    for partition_name, partition in partitions.items():

            

Reported by Pylint.

Cell variable node defined in loop
Error

Line: 72 Column: 99

                      node._fx_partition = partition_name

        torch.fx.graph.map_arg(node.args, lambda def_node: record_cross_partition_use(def_node, node))
        torch.fx.graph.map_arg(node.kwargs, lambda def_node: record_cross_partition_use(def_node, node))  # noqa: B950

    # find partitions with no dependencies
    root_partitions : List[str] = []
    for partition_name, partition in partitions.items():
        if not len(partition.partitions_dependent_on):

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 95 Column: 13

                  # add placeholders to parititons
    for partition_name in sorted_partitions:
        partition = partitions[partition_name]
        for input in partition.inputs:
            placeholder = partition.graph.placeholder(input)
            placeholder.meta = orig_nodes[input].meta.copy()
            partition.environment[orig_nodes[input]] = placeholder

    # Transform nodes and collect targets for partition's submodule

            

Reported by Pylint.

Access to a protected member _fx_partition of a client class
Error

Line: 103 Column: 36

                  # Transform nodes and collect targets for partition's submodule
    for node in m.graph.nodes:
        if hasattr(node, '_fx_partition'):
            partition = partitions[node._fx_partition]

            # swap out old graph nodes in kw/args with references to new nodes in this submodule
            environment = partition.environment
            gathered_args = torch.fx.graph.map_arg(node.args, lambda n : environment[n])
            gathered_kwargs = torch.fx.graph.map_arg(node.kwargs, lambda n : environment[n])

            

Reported by Pylint.

Cell variable environment defined in loop
Error

Line: 107 Column: 74

              
            # swap out old graph nodes in kw/args with references to new nodes in this submodule
            environment = partition.environment
            gathered_args = torch.fx.graph.map_arg(node.args, lambda n : environment[n])
            gathered_kwargs = torch.fx.graph.map_arg(node.kwargs, lambda n : environment[n])

            if node.op not in ['call_module', 'get_attr']:
                target = node.target
            else:

            

Reported by Pylint.

Cell variable environment defined in loop
Error

Line: 108 Column: 78

                          # swap out old graph nodes in kw/args with references to new nodes in this submodule
            environment = partition.environment
            gathered_args = torch.fx.graph.map_arg(node.args, lambda n : environment[n])
            gathered_kwargs = torch.fx.graph.map_arg(node.kwargs, lambda n : environment[n])

            if node.op not in ['call_module', 'get_attr']:
                target = node.target
            else:
                target_atoms = node.target.split('.')

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
from torch.fx.graph_module import GraphModule
from typing import Callable, List, Dict, Any, Optional

class Partition:
    def __init__(self, name: str):
        self.name: str = name
        self.node_names: List[str] = []
        self.inputs: Dict[str, None] = {}

            

Reported by Pylint.

test/ao/sparsity/test_scheduler.py
29 issues
Unable to import 'torch'
Error

Line: 2 Column: 1

              # -*- coding: utf-8 -*-
from torch import nn
from torch.ao.sparsity import WeightNormSparsifier
from torch.ao.sparsity import BaseScheduler, LambdaSL

from torch.testing._internal.common_utils import TestCase

import warnings


            

Reported by Pylint.

Unable to import 'torch.ao.sparsity'
Error

Line: 3 Column: 1

              # -*- coding: utf-8 -*-
from torch import nn
from torch.ao.sparsity import WeightNormSparsifier
from torch.ao.sparsity import BaseScheduler, LambdaSL

from torch.testing._internal.common_utils import TestCase

import warnings


            

Reported by Pylint.

Unable to import 'torch.ao.sparsity'
Error

Line: 4 Column: 1

              # -*- coding: utf-8 -*-
from torch import nn
from torch.ao.sparsity import WeightNormSparsifier
from torch.ao.sparsity import BaseScheduler, LambdaSL

from torch.testing._internal.common_utils import TestCase

import warnings


            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 6 Column: 1

              from torch.ao.sparsity import WeightNormSparsifier
from torch.ao.sparsity import BaseScheduler, LambdaSL

from torch.testing._internal.common_utils import TestCase

import warnings

class ImplementedScheduler(BaseScheduler):
    def get_sl(self):

            

Reported by Pylint.

Access to a protected member _step_count of a client class
Error

Line: 29 Column: 16

                      scheduler = ImplementedScheduler(sparsifier)

        assert scheduler.sparsifier is sparsifier
        assert scheduler._step_count == 1
        assert scheduler.base_sl == [sparsifier.module_groups[0]['sparsity_level']]

    def test_order_of_steps(self):
        """Checks if the warning is thrown if the scheduler step is called
        before the sparsifier step"""

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # -*- coding: utf-8 -*-
from torch import nn
from torch.ao.sparsity import WeightNormSparsifier
from torch.ao.sparsity import BaseScheduler, LambdaSL

from torch.testing._internal.common_utils import TestCase

import warnings


            

Reported by Pylint.

standard import "import warnings" should be placed before "from torch import nn"
Error

Line: 8 Column: 1

              
from torch.testing._internal.common_utils import TestCase

import warnings

class ImplementedScheduler(BaseScheduler):
    def get_sl(self):
        if self.last_epoch > 0:
            return [group['sparsity_level'] * 0.5

            

Reported by Pylint.

Missing class docstring
Error

Line: 10 Column: 1

              
import warnings

class ImplementedScheduler(BaseScheduler):
    def get_sl(self):
        if self.last_epoch > 0:
            return [group['sparsity_level'] * 0.5
                    for group in self.sparsifier.module_groups]
        else:

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 10 Column: 1

              
import warnings

class ImplementedScheduler(BaseScheduler):
    def get_sl(self):
        if self.last_epoch > 0:
            return [group['sparsity_level'] * 0.5
                    for group in self.sparsifier.module_groups]
        else:

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 11 Column: 5

              import warnings

class ImplementedScheduler(BaseScheduler):
    def get_sl(self):
        if self.last_epoch > 0:
            return [group['sparsity_level'] * 0.5
                    for group in self.sparsifier.module_groups]
        else:
            return list(self.base_sl)

            

Reported by Pylint.