The following issues were found

torch/quantization/fx/convert.py
114 issues
Attempted relative import beyond top-level package
Error

Line: 14 Column: 1

                  Node,
)
from torch.fx.node import Argument
from .quantization_types import Pattern
from ..qconfig import QConfigAny
from .match_utils import (
    find_matches,
)
from .graph_module import (

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 15 Column: 1

              )
from torch.fx.node import Argument
from .quantization_types import Pattern
from ..qconfig import QConfigAny
from .match_utils import (
    find_matches,
)
from .graph_module import (
    is_observed_module,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 16 Column: 1

              from torch.fx.node import Argument
from .quantization_types import Pattern
from ..qconfig import QConfigAny
from .match_utils import (
    find_matches,
)
from .graph_module import (
    is_observed_module,
    is_observed_standalone_module,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 19 Column: 1

              from .match_utils import (
    find_matches,
)
from .graph_module import (
    is_observed_module,
    is_observed_standalone_module,
    QuantizedGraphModule,
)
from .quantization_patterns import (

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 24 Column: 1

                  is_observed_standalone_module,
    QuantizedGraphModule,
)
from .quantization_patterns import (
    QuantizeHandler,
)
from ._equalize import update_obs_for_equalization, convert_eq_obs
from .utils import (
    is_get_tensor_info_node,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 27 Column: 1

              from .quantization_patterns import (
    QuantizeHandler,
)
from ._equalize import update_obs_for_equalization, convert_eq_obs
from .utils import (
    is_get_tensor_info_node,
    node_return_type_is_int,
    quantize_node,
    get_new_attr_name_with_prefix,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 28 Column: 1

                  QuantizeHandler,
)
from ._equalize import update_obs_for_equalization, convert_eq_obs
from .utils import (
    is_get_tensor_info_node,
    node_return_type_is_int,
    quantize_node,
    get_new_attr_name_with_prefix,
    collect_producer_nodes,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 39 Column: 1

                  WEIGHT_INDEX_DICT,
)

from ..quantize import (
    _remove_qconfig,
    is_activation_post_process,
)
from ..utils import (
    activation_is_statically_quantized,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 43 Column: 1

                  _remove_qconfig,
    is_activation_post_process,
)
from ..utils import (
    activation_is_statically_quantized,
    activation_dtype,
)

# weight prepacking ops

            

Reported by Pylint.

Module 'torch' has no 'dtype' member
Error

Line: 199 Column: 34

                  run_weight_observers(model)

    quantized_graph = Graph()
    env: Dict[str, Dict[Optional[torch.dtype], Node]] = defaultdict(lambda: defaultdict(Node))  # type: ignore[arg-type]

    graph_inputs: List[str] = []
    for node in model.graph.nodes:
        if node.op == 'placeholder':
            graph_inputs.append(node.name)

            

Reported by Pylint.

torch/autograd/profiler_util.py
113 issues
Unused variable 'thread_id'
Error

Line: 93 Column: 13

                      #
        # Algorithm has O(N * log(N)) complexity where N is number of
        # intervals
        for thread_id, thread_events in threads:
            thread_events_ = sorted(
                thread_events,
                key=lambda event: [event.time_range.start, -event.time_range.end],
            )
            current_events: List[FunctionEvent] = []

            

Reported by Pylint.

Unused variable 'cur_end'
Error

Line: 99 Column: 13

                              key=lambda event: [event.time_range.start, -event.time_range.end],
            )
            current_events: List[FunctionEvent] = []
            cur_end = 0
            for event in thread_events_:
                while len(current_events) > 0:
                    parent = current_events[-1]
                    if event.time_range.start >= parent.time_range.end or \
                            event.time_range.end > parent.time_range.end:

            

Reported by Pylint.

Unused variable 'chrome_events'
Error

Line: 187 Column: 13

                      """
        import os
        with open(path, 'w') as f:
            chrome_events = []
            next_id = 0
            # Use file IO over using json.dump since JSON dumping is very slow and
            # this technique is proven to give a 4x speedup.
            f.write("[")
            for evt in self:

            

Reported by Pylint.

Unused variable 'k'
Error

Line: 212 Column: 21

                                      else f'" node_id:{evt.node_id}, thread_id:{evt.thread} "',
                    )
                )
                for k in evt.kernels:
                    # 's' and 'f' draw Flow arrows from
                    # the CPU launch to the GPU kernel
                    f.write('{"name": "%s", '
                            '"ph": "s", '
                            '"ts": %s, '

            

Reported by Pylint.

Unused variable '<lambda>'
Error

Line: 336 Column: 12

                      return str(nbytes) + ' b'

def _attr_formatter(name):
    return property(lambda self: _format_time(getattr(self, name)))


class FormattedTimesMixin(object):
    """Helpers for FunctionEvent and FunctionEventAvg.


            

Reported by Pylint.

Redefining built-in 'id'
Error

Line: 375 Column: 19

              class FunctionEvent(FormattedTimesMixin):
    """Profiling information about a single function."""
    def __init__(
            self, id, name, thread, start_us, end_us, fwd_thread=None, input_shapes=None,
            stack=None, scope=0, cpu_memory_usage=0, cuda_memory_usage=0, is_async=False,
            is_remote=False, sequence_nr=-1, node_id=-1, device_type=DeviceType.CPU, device_index=0,
            is_legacy=False, flops=None, trace_name=None):
        self.id: int = id
        self.node_id: int = node_id

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import itertools
import torch
from torch.autograd import DeviceType

from collections import defaultdict, namedtuple
from operator import attrgetter

from typing import Dict, List, Tuple, Optional


            

Reported by Pylint.

standard import "from collections import defaultdict, namedtuple" should be placed before "import torch"
Error

Line: 5 Column: 1

              import torch
from torch.autograd import DeviceType

from collections import defaultdict, namedtuple
from operator import attrgetter

from typing import Dict, List, Tuple, Optional

import bisect

            

Reported by Pylint.

standard import "from operator import attrgetter" should be placed before "import torch"
Error

Line: 6 Column: 1

              from torch.autograd import DeviceType

from collections import defaultdict, namedtuple
from operator import attrgetter

from typing import Dict, List, Tuple, Optional

import bisect
import math

            

Reported by Pylint.

standard import "from typing import Dict, List, Tuple, Optional" should be placed before "import torch"
Error

Line: 8 Column: 1

              from collections import defaultdict, namedtuple
from operator import attrgetter

from typing import Dict, List, Tuple, Optional

import bisect
import math



            

Reported by Pylint.

caffe2/python/examples/imagenet_trainer.py
112 issues
Too many arguments for format string
Error

Line: 107 Column: 14

              

def SaveModel(args, train_model, epoch, use_ideep):
    prefix = "[]_{}".format(train_model._device_prefix, train_model._devices[0])
    predictor_export_meta = pred_exp.PredictorExportMeta(
        predict_net=train_model.net.Proto(),
        parameters=data_parallel_model.GetCheckpointParams(train_model),
        inputs=[prefix + "/data"],
        outputs=[prefix + "/softmax"],

            

Reported by Pylint.

Too many arguments for format string
Error

Line: 107 Column: 14

              

def SaveModel(args, train_model, epoch, use_ideep):
    prefix = "[]_{}".format(train_model._device_prefix, train_model._devices[0])
    predictor_export_meta = pred_exp.PredictorExportMeta(
        predict_net=train_model.net.Proto(),
        parameters=data_parallel_model.GetCheckpointParams(train_model),
        inputs=[prefix + "/data"],
        outputs=[prefix + "/softmax"],

            

Reported by Pylint.

String statement has no effect
Error

Line: 20 Column: 1

              import caffe2.python.predictor.predictor_py_utils as pred_utils
from caffe2.python.predictor_constants import predictor_constants

'''
Parallelized multi-GPU distributed trainer for Resne(X)t & Shufflenet.
Can be used to train on imagenet data, for example.
The default parameters can train a standard Resnet-50 (1x64d), and parameters
can be provided to train ResNe(X)t models (e.g., ResNeXt-101 32x4d).


            

Reported by Pylint.

String statement has no effect
Error

Line: 20 Column: 1

              import caffe2.python.predictor.predictor_py_utils as pred_utils
from caffe2.python.predictor_constants import predictor_constants

'''
Parallelized multi-GPU distributed trainer for Resne(X)t & Shufflenet.
Can be used to train on imagenet data, for example.
The default parameters can train a standard Resnet-50 (1x64d), and parameters
can be provided to train ResNe(X)t models (e.g., ResNeXt-101 32x4d).


            

Reported by Pylint.

Unused variable 'label'
Error

Line: 61 Column: 11

                  The image input operator loads image and label data from the reader and
    applies transformations to the images (random cropping, mirroring, ...).
    '''
    data, label = brew.image_input(
        model,
        reader, ["data", "label"],
        batch_size=batch_size,
        output_type=dtype,
        use_gpu_transform=True if core.IsGPUDeviceType(model._device_type) else False,

            

Reported by Pylint.

Unused variable 'label'
Error

Line: 61 Column: 11

                  The image input operator loads image and label data from the reader and
    applies transformations to the images (random cropping, mirroring, ...).
    '''
    data, label = brew.image_input(
        model,
        reader, ["data", "label"],
        batch_size=batch_size,
        output_type=dtype,
        use_gpu_transform=True if core.IsGPUDeviceType(model._device_type) else False,

            

Reported by Pylint.

Access to a protected member _device_type of a client class
Error

Line: 66 Column: 56

                      reader, ["data", "label"],
        batch_size=batch_size,
        output_type=dtype,
        use_gpu_transform=True if core.IsGPUDeviceType(model._device_type) else False,
        use_caffe_datum=True,
        mean_per_channel=mean_per_channel,
        std_per_channel=std_per_channel,
        # mean_per_channel takes precedence over mean
        mean=128.,

            

Reported by Pylint.

Access to a protected member _device_type of a client class
Error

Line: 66 Column: 56

                      reader, ["data", "label"],
        batch_size=batch_size,
        output_type=dtype,
        use_gpu_transform=True if core.IsGPUDeviceType(model._device_type) else False,
        use_caffe_datum=True,
        mean_per_channel=mean_per_channel,
        std_per_channel=std_per_channel,
        # mean_per_channel takes precedence over mean
        mean=128.,

            

Reported by Pylint.

Unused argument 'reader'
Error

Line: 82 Column: 25

                  data = model.StopGradient(data, data)


def AddNullInput(model, reader, batch_size, img_size, dtype):
    '''
    The null input function uses a gaussian fill operator to emulate real image
    input. A label blob is hardcoded to a single value. This is useful if you
    want to test compute throughput or don't have a dataset available.
    '''

            

Reported by Pylint.

Unused argument 'reader'
Error

Line: 82 Column: 25

                  data = model.StopGradient(data, data)


def AddNullInput(model, reader, batch_size, img_size, dtype):
    '''
    The null input function uses a gaussian fill operator to emulate real image
    input. A label blob is hardcoded to a single value. This is useful if you
    want to test compute throughput or don't have a dataset available.
    '''

            

Reported by Pylint.

caffe2/python/operator_test/layer_norm_op_test.py
111 issues
Unable to import 'hypothesis'
Error

Line: 9 Column: 1

              from caffe2.python import brew, core, workspace
from caffe2.python.model_helper import ModelHelper
from functools import partial
from hypothesis import given, settings
from typing import Optional, Tuple

import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st

            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 14 Column: 1

              
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st

import numpy as np
import torch

import unittest

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 275 Column: 55

                          expected_norm, expected_mean, expected_std = \
                _layer_norm_with_affine_ref(axis, eps, X, gamma, beta)
            actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
                torch.tensor(X), torch.tensor(gamma), torch.tensor(beta),
                axis, eps, True)
        else:
            expected_norm, expected_mean, expected_std = _layer_norm_ref(
                axis, eps, X)
            actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 275 Column: 17

                          expected_norm, expected_mean, expected_std = \
                _layer_norm_with_affine_ref(axis, eps, X, gamma, beta)
            actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
                torch.tensor(X), torch.tensor(gamma), torch.tensor(beta),
                axis, eps, True)
        else:
            expected_norm, expected_mean, expected_std = _layer_norm_ref(
                axis, eps, X)
            actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 275 Column: 34

                          expected_norm, expected_mean, expected_std = \
                _layer_norm_with_affine_ref(axis, eps, X, gamma, beta)
            actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
                torch.tensor(X), torch.tensor(gamma), torch.tensor(beta),
                axis, eps, True)
        else:
            expected_norm, expected_mean, expected_std = _layer_norm_ref(
                axis, eps, X)
            actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 281 Column: 17

                          expected_norm, expected_mean, expected_std = _layer_norm_ref(
                axis, eps, X)
            actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
                torch.tensor(X), None, None, axis, eps)

        torch.testing.assert_allclose(
            expected_norm, actual_norm, rtol=1e-4, atol=1e-4)
        torch.testing.assert_allclose(expected_mean, actual_mean)
        torch.testing.assert_allclose(expected_std, actual_std)

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 304 Column: 17

                          expected_norm, expected_mean, expected_std = \
                _layer_norm_with_affine_ref(axis, eps, X, gamma, beta)
            actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
                torch.tensor(X).cuda(),
                torch.tensor(gamma).cuda(),
                torch.tensor(beta).cuda(),
                axis,
                eps,
                True)

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 305 Column: 17

                              _layer_norm_with_affine_ref(axis, eps, X, gamma, beta)
            actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
                torch.tensor(X).cuda(),
                torch.tensor(gamma).cuda(),
                torch.tensor(beta).cuda(),
                axis,
                eps,
                True)
        else:

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 306 Column: 17

                          actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
                torch.tensor(X).cuda(),
                torch.tensor(gamma).cuda(),
                torch.tensor(beta).cuda(),
                axis,
                eps,
                True)
        else:
            expected_norm, expected_mean, expected_std = _layer_norm_ref(

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 314 Column: 17

                          expected_norm, expected_mean, expected_std = _layer_norm_ref(
                axis, eps, X)
            actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
                torch.tensor(X).cuda(), None, None, axis, eps)

        torch.testing.assert_allclose(
            expected_norm, actual_norm.cpu(), rtol=1e-4, atol=1e-4)
        torch.testing.assert_allclose(expected_mean, actual_mean.cpu())
        torch.testing.assert_allclose(expected_std, actual_std.cpu())

            

Reported by Pylint.

test/distributed/test_store.py
111 issues
Unable to import 'torch'
Error

Line: 9 Column: 1

              from datetime import timedelta
from sys import platform

import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc

if not dist.is_available():
    print("torch.distributed not available, skipping tests", file=sys.stderr)

            

Reported by Pylint.

Unable to import 'torch.distributed'
Error

Line: 10 Column: 1

              from sys import platform

import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc

if not dist.is_available():
    print("torch.distributed not available, skipping tests", file=sys.stderr)
    sys.exit(0)

            

Reported by Pylint.

Unable to import 'torch.distributed.rpc'
Error

Line: 11 Column: 1

              
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc

if not dist.is_available():
    print("torch.distributed not available, skipping tests", file=sys.stderr)
    sys.exit(0)


            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 17 Column: 1

                  print("torch.distributed not available, skipping tests", file=sys.stderr)
    sys.exit(0)

import torch.testing._internal.common_utils as common
from torch._six import string_classes
from torch.testing._internal.common_distributed import (
    skip_if_win32,
    create_tcp_store
)

            

Reported by Pylint.

Unable to import 'torch._six'
Error

Line: 18 Column: 1

                  sys.exit(0)

import torch.testing._internal.common_utils as common
from torch._six import string_classes
from torch.testing._internal.common_distributed import (
    skip_if_win32,
    create_tcp_store
)
from torch.testing._internal.common_utils import (

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_distributed'
Error

Line: 19 Column: 1

              
import torch.testing._internal.common_utils as common
from torch._six import string_classes
from torch.testing._internal.common_distributed import (
    skip_if_win32,
    create_tcp_store
)
from torch.testing._internal.common_utils import (
    TestCase,

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 23 Column: 1

                  skip_if_win32,
    create_tcp_store
)
from torch.testing._internal.common_utils import (
    TestCase,
    load_tests,
    run_tests,
    retry_on_connect_failures,
    ADDRESS_IN_USE,

            

Reported by Pylint.

Instance of 'StoreTestBase' has no 'assertEqual' member
Error

Line: 79 Column: 9

                      fs.add("key3", 4)
        fs.add("key3", 5)
        fs.add("key3", 6)
        self.assertEqual(fs.num_keys(), self.num_keys_total)
        self.assertEqual(b"6", fs.get("key"))
        self.assertEqual(b"value0", fs.get("key0"))
        self.assertEqual(b"value1", fs.get("key1"))
        self.assertEqual(b"value2", fs.get("key2"))
        self.assertEqual(b"21", fs.get("key3"))

            

Reported by Pylint.

Instance of 'StoreTestBase' has no 'assertEqual' member
Error

Line: 80 Column: 9

                      fs.add("key3", 5)
        fs.add("key3", 6)
        self.assertEqual(fs.num_keys(), self.num_keys_total)
        self.assertEqual(b"6", fs.get("key"))
        self.assertEqual(b"value0", fs.get("key0"))
        self.assertEqual(b"value1", fs.get("key1"))
        self.assertEqual(b"value2", fs.get("key2"))
        self.assertEqual(b"21", fs.get("key3"))


            

Reported by Pylint.

Instance of 'StoreTestBase' has no 'assertEqual' member
Error

Line: 81 Column: 9

                      fs.add("key3", 6)
        self.assertEqual(fs.num_keys(), self.num_keys_total)
        self.assertEqual(b"6", fs.get("key"))
        self.assertEqual(b"value0", fs.get("key0"))
        self.assertEqual(b"value1", fs.get("key1"))
        self.assertEqual(b"value2", fs.get("key2"))
        self.assertEqual(b"21", fs.get("key3"))

    def test_set_get(self):

            

Reported by Pylint.

caffe2/python/operator_test/matmul_op_test.py
111 issues
Unable to import 'hypothesis'
Error

Line: 10 Column: 1

              
import numpy as np

from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial


            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 11 Column: 1

              import numpy as np

from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial



            

Reported by Pylint.

Using deprecated method getargspec()
Error

Line: 170 Column: 23

                      # relaxing the "threshold" for fp16 to 150x of the default
        def relax_fp16_check(check_func, *args, **kwargs):
            # inspect the default "threshold" value in check_func
            argspec = inspect.getargspec(check_func)
            threshold = argspec.defaults[
                argspec.args.index('threshold') -
                (len(argspec.args) - len(argspec.defaults))]

            if dtype == np.float16:

            

Reported by Pylint.

TODO: test trans_a and trans_b
Error

Line: 253 Column: 3

                  def test_numpy_batch_matmul_1d(self, K, gc, dc):
        dtype = np.float32
        X = np.random.rand(K).astype(dtype) - 0.5
        # TODO: test trans_a and trans_b
        Y = np.random.rand(K).astype(dtype) - 0.5

        self._test_batch_matmul_with_broadcast_common(X, Y, dtype, gc, dc)

    @settings(max_examples=30, deadline=None)

            

Reported by Pylint.

TODO: test trans_a and trans_b
Error

Line: 267 Column: 3

                  def test_numpy_batch_matmul_1d_2d(self, K, N, gc, dc):
        dtype = np.float32
        X = np.random.rand(K).astype(dtype) - 0.5
        # TODO: test trans_a and trans_b
        Y = np.random.rand(*[K, N]).astype(dtype) - 0.5

        self._test_batch_matmul_with_broadcast_common(X, Y, dtype, gc, dc)

    @settings(max_examples=30, deadline=None)

            

Reported by Pylint.

TODO: test trans_a and trans_b
Error

Line: 281 Column: 3

                  def test_numpy_batch_matmul_2d_1d(self, M, K, gc, dc):
        dtype = np.float32
        X = np.random.rand(*[M, K]).astype(dtype) - 0.5
        # TODO: test trans_a and trans_b
        Y = np.random.rand(K).astype(dtype) - 0.5

        self._test_batch_matmul_with_broadcast_common(X, Y, dtype, gc, dc)



            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              




import inspect

import numpy as np


            

Reported by Pylint.

Missing class docstring
Error

Line: 17 Column: 1

              import caffe2.python.serialized_test.serialized_test_util as serial


class TestMatMul(serial.SerializedTestCase):
    @serial.given(
        M=st.integers(min_value=1, max_value=10),
        K=st.integers(min_value=1, max_value=10),
        N=st.integers(min_value=1, max_value=10),
        trans_a=st.booleans(),

            

Reported by Pylint.

Argument name "M" doesn't conform to snake_case naming style
Error

Line: 25 Column: 5

                      trans_a=st.booleans(),
        trans_b=st.booleans(),
        **hu.gcs
    )
    def test_matmul(self, M, K, N, trans_a, trans_b, gc, dc):
        X = np.random.rand(M, K).astype(np.float32) - 0.5
        if trans_a:
            X = X.transpose()


            

Reported by Pylint.

Argument name "K" doesn't conform to snake_case naming style
Error

Line: 25 Column: 5

                      trans_a=st.booleans(),
        trans_b=st.booleans(),
        **hu.gcs
    )
    def test_matmul(self, M, K, N, trans_a, trans_b, gc, dc):
        X = np.random.rand(M, K).astype(np.float32) - 0.5
        if trans_a:
            X = X.transpose()


            

Reported by Pylint.

test/jit/test_python_builtins.py
110 issues
Unable to import 'torch'
Error

Line: 7 Column: 1

              import random
from textwrap import dedent

import torch
from torch.testing._internal.jit_utils import JitTestCase, execWrapper

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 8 Column: 1

              from textwrap import dedent

import torch
from torch.testing._internal.jit_utils import JitTestCase, execWrapper

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)


            

Reported by Pylint.

function already defined line 414
Error

Line: 421 Column: 9

                          self.assertTrue(foo(s))

        @torch.jit.script
        def foo(a):
            return 0.5 == float('0.5')
        s = torch.rand(1)
        self.assertTrue(foo(s))

        @torch.jit.script

            

Reported by Pylint.

function already defined line 414
Error

Line: 427 Column: 9

                      self.assertTrue(foo(s))

        @torch.jit.script
        def foo(a):
            return 0. == float('0')
        s = torch.rand(1)
        self.assertTrue(foo(s))

            

Reported by Pylint.

Use of exec
Error

Line: 149 Column: 9

              
    def _check_code(self, code_str, fn_name, inputs):
        scope = {}
        exec(code_str, globals(), scope)
        cu = torch.jit.CompilationUnit(code_str)
        self.assertEqual(cu.func(*inputs), scope[fn_name](*inputs))

    def test_stepped_tuple_slicing(self):
        def check_slicing_tuple(slicing, tuple_type, tuple):

            

Reported by Pylint.

Use of exec detected.
Security

Line: 149
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html

              
    def _check_code(self, code_str, fn_name, inputs):
        scope = {}
        exec(code_str, globals(), scope)
        cu = torch.jit.CompilationUnit(code_str)
        self.assertEqual(cu.func(*inputs), scope[fn_name](*inputs))

    def test_stepped_tuple_slicing(self):
        def check_slicing_tuple(slicing, tuple_type, tuple):

            

Reported by Bandit.

Redefining built-in 'tuple'
Error

Line: 154 Column: 54

                      self.assertEqual(cu.func(*inputs), scope[fn_name](*inputs))

    def test_stepped_tuple_slicing(self):
        def check_slicing_tuple(slicing, tuple_type, tuple):
            template = dedent("""
            def func(x):
                # type: ({}) -> Any
                return x{}
            """)

            

Reported by Pylint.

Unused argument 'start'
Error

Line: 173 Column: 26

                      check_slicing_tuple("[3::-2]", "Tuple[int, int, int, int, int]", (0, 1, 2, 3, 4))

    def test_index(self):
        def consec(size, start=0):
            numel = torch.tensor(size).prod().item()
            return torch.arange(numel).view(size)

        def check_indexing(indexing, tensor):
            template = dedent("""

            

Reported by Pylint.

Unused argument 'start'
Error

Line: 251 Column: 26

                      check_dynamic_indexing("[i:j, i]", consec((3, 3, 2)), 0, 2)

    def test_advancedindex(self):
        def consec(size, start=0):
            numel = torch.tensor(size).prod().item()
            return torch.arange(numel).view(size)

        def check_indexing(indexing, tensor, **kwargs):
            indices_dict = kwargs

            

Reported by Pylint.

XXX: When we can index with sequences, replace these cases with
Error

Line: 284 Column: 3

                      # NB: indexing with tensors and indexing with sequences can be implemented
        # in a very similar way (sequences are converted to tensors), so only one
        # case needs to be tested extensively.
        # XXX: When we can index with sequences, replace these cases with
        # sequence indexing expressions; those are much easier to read.

        # Misc sequence advanced indexing
        inp = consec((4, 8, 5))
        to_check = [

            

Reported by Pylint.

tools/autograd/gen_variable_type.py
110 issues
Attempted relative import beyond top-level package
Error

Line: 25 Column: 1

              #     which will in turn dispatch back to VariableType for its
#     differentiable subcomponents.
#
from .context import with_native_function_with_differentiability_info
from .gen_trace_type import (
    MANUAL_BACKEND, MANUAL_AUTOGRAD_AND_TRACER, declare_returned_variables,
    tie_return_values, get_return_value, type_wrapper_name,
)
from .gen_inplace_or_view_type import (

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 26 Column: 1

              #     differentiable subcomponents.
#
from .context import with_native_function_with_differentiability_info
from .gen_trace_type import (
    MANUAL_BACKEND, MANUAL_AUTOGRAD_AND_TRACER, declare_returned_variables,
    tie_return_values, get_return_value, type_wrapper_name,
)
from .gen_inplace_or_view_type import (
    get_view_info, is_tensor_type, is_tensor_list_type, unpack_args, get_base_name,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 30 Column: 1

                  MANUAL_BACKEND, MANUAL_AUTOGRAD_AND_TRACER, declare_returned_variables,
    tie_return_values, get_return_value, type_wrapper_name,
)
from .gen_inplace_or_view_type import (
    get_view_info, is_tensor_type, is_tensor_list_type, unpack_args, get_base_name,
    use_derived, modifies_arguments, WRAPPER_REGISTRATION, TMP_VAR, METHOD_DEFINITION,
    ASSIGN_RETURN_VALUE, gen_formals, ALL_VIEW_FUNCTIONS, unpacked_name
)


            

Reported by Pylint.

Unused argument 'native_yaml_path'
Error

Line: 360 Column: 5

              
def gen_variable_type(
    out: str,
    native_yaml_path: str,
    fns_with_diff_infos: List[NativeFunctionWithDifferentiabilityInfo],
    template_path: str,
) -> None:

    """VariableType.h and VariableType.cpp body

            

Reported by Pylint.

TODO: `cpp_type` is only to keep it byte-for-byte compatible with the old codegen, should remove.
Error

Line: 453 Column: 3

                          return None
        a: Argument = arg.argument if isinstance(arg, SelfArgument) else arg

        # TODO: `cpp_type` is only to keep it byte-for-byte compatible with the old codegen, should remove.
        # NB: This is not a clone of cpp.argument() - TensorOptionsArguments / faithful / binds are
        # not handled properly as they are irrelevant for this codegen.
        cpp_type = cpp.argument_type(a, binds=a.name).cpp_type()

        if not is_differentiable(a.name, a.type, info):

            

Reported by Pylint.

Using possibly undefined loop variable 'edge_off'
Error

Line: 546 Column: 54

                          else:
                raise AssertionError()

            return f'grad_fn->should_compute_output({edge_off})'

        setup.extend(save_variables(info.all_saved_inputs, False, guard_for))
        for arg in args_with_derivatives:
            if is_tensor_list_type(arg.type):
                setup.append(f'grad_fn->{arg.name}_size_ = {arg.name}.size();')

            

Reported by Pylint.

TODO: should be `arg.type.is_tensor_like()`?
Error

Line: 588 Column: 3

                          return body
        for arg in differentiable_outputs:
            name = arg.name
            # TODO: should be `arg.type.is_tensor_like()`?
            if arg.cpp_type in ['at::Tensor', 'at::TensorList', 'const c10::List<c10::optional<at::Tensor>> &']:
                body.append(f'throw_error_for_complex_autograd({name}, "{base_name}");')
        return body

    def emit_check_no_requires_grad(

            

Reported by Pylint.

Second argument of isinstance is not a type
Error

Line: 637 Column: 44

                      # assign the saved variables to the generated grad_fn
        stmts: List[str] = []
        for arg in saved_variables:
            name = arg.nctype.name.name if isinstance(arg.nctype.name, SpecialArgName) else arg.nctype.name
            type = arg.nctype.type
            expr = arg.expr
            stmts_prepend = None
            if type == BaseCType(tensorT) or type == OptionalCType(BaseCType(tensorT)) or \
                    type == MutRefCType(OptionalCType(BaseCType(tensorT))) or (is_output and type == BaseCType(scalarT)):

            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 638 Column: 13

                      stmts: List[str] = []
        for arg in saved_variables:
            name = arg.nctype.name.name if isinstance(arg.nctype.name, SpecialArgName) else arg.nctype.name
            type = arg.nctype.type
            expr = arg.expr
            stmts_prepend = None
            if type == BaseCType(tensorT) or type == OptionalCType(BaseCType(tensorT)) or \
                    type == MutRefCType(OptionalCType(BaseCType(tensorT))) or (is_output and type == BaseCType(scalarT)):
                var = name

            

Reported by Pylint.

Unused argument 'input_base'
Error

Line: 681 Column: 47

                  #  - Pre-compute the full DispatchKeySet. This saves the dispatcher from having to read from TLS.
    #  - redispatch() avoids a redundant call to RecordFunction, which was already called right before
    #    we entered this autograd kernel.
    def emit_dispatch_call(f: NativeFunction, input_base: str, unpacked_args: Sequence[str]) -> str:
        """ Dispatch call via function in a namespace or method on Tensor."""
        dispatcher_sig = DispatcherSignature.from_schema(f.func)
        dispatcher_exprs = dispatcher_sig.exprs()

        # code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance.

            

Reported by Pylint.

test/test_python_dispatch.py
109 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils._pytree import tree_map

from typing import Iterator, List
import logging
import contextlib
import itertools


            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 2 Column: 1

              import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils._pytree import tree_map

from typing import Iterator, List
import logging
import contextlib
import itertools


            

Reported by Pylint.

Unable to import 'torch.utils._pytree'
Error

Line: 3 Column: 1

              import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils._pytree import tree_map

from typing import Iterator, List
import logging
import contextlib
import itertools


            

Reported by Pylint.

Too many arguments for logging format string
Error

Line: 88 Column: 5

                      self.log_list.append(f'{fmt_rets} = {record.msg}({fmt_args})')

def log_input(name: str, var: object):
    logging.getLogger("LoggingTensor").info("input", (name,), {}, (var,))

@contextlib.contextmanager
def capture_logs() -> Iterator[List[str]]:
    logger = logging.getLogger("LoggingTensor")
    log_list = []

            

Reported by Pylint.

TODO: move this into library proper
Error

Line: 10 Column: 3

              import contextlib
import itertools

# TODO: move this into library proper
@contextlib.contextmanager
def no_dispatch() -> Iterator[None]:
    guard = torch._C._DisableTorchDispatch()
    try:
        yield

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 13 Column: 13

              # TODO: move this into library proper
@contextlib.contextmanager
def no_dispatch() -> Iterator[None]:
    guard = torch._C._DisableTorchDispatch()
    try:
        yield
    finally:
        del guard


            

Reported by Pylint.

Access to a protected member _DisableTorchDispatch of a client class
Error

Line: 13 Column: 13

              # TODO: move this into library proper
@contextlib.contextmanager
def no_dispatch() -> Iterator[None]:
    guard = torch._C._DisableTorchDispatch()
    try:
        yield
    finally:
        del guard


            

Reported by Pylint.

TODO: TensorBase should work
Error

Line: 26 Column: 3

              # 3. Enter dispatcher, wind your way through Autograd
# 4. Hit Python dispatch key, call __torch_dispatch__

# TODO: TensorBase should work
class LoggingTensor(torch.Tensor):
    elem: torch.Tensor

    __slots__ = ['elem']


            

Reported by Pylint.

Unused argument 'kwargs'
Error

Line: 33 Column: 1

                  __slots__ = ['elem']

    @staticmethod
    def __new__(cls, elem, *args, **kwargs):
        # The wrapping tensor (LoggingTensor) is just a meta tensor, so it
        # doesn't hold any memory (meta tensor is generally the preferred type
        # of tensor you want to make a subclass from)...
        r = torch.Tensor._make_subclass(cls, elem.to('meta'), elem.requires_grad)
        # ...the real tensor is held as an element on the tensor.

            

Reported by Pylint.

Unused argument 'args'
Error

Line: 33 Column: 1

                  __slots__ = ['elem']

    @staticmethod
    def __new__(cls, elem, *args, **kwargs):
        # The wrapping tensor (LoggingTensor) is just a meta tensor, so it
        # doesn't hold any memory (meta tensor is generally the preferred type
        # of tensor you want to make a subclass from)...
        r = torch.Tensor._make_subclass(cls, elem.to('meta'), elem.requires_grad)
        # ...the real tensor is held as an element on the tensor.

            

Reported by Pylint.

test/test_numpy_interop.py
109 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch
import numpy as np

from itertools import product

from torch.testing._internal.common_utils import \
    (TestCase, run_tests)
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, onlyCPU, dtypes)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 6 Column: 1

              
from itertools import product

from torch.testing._internal.common_utils import \
    (TestCase, run_tests)
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, onlyCPU, dtypes)

# For testing handling NumPy objects and sending tensors to / accepting

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_device_type'
Error

Line: 8 Column: 1

              
from torch.testing._internal.common_utils import \
    (TestCase, run_tests)
from torch.testing._internal.common_device_type import \
    (instantiate_device_type_tests, onlyCPU, dtypes)

# For testing handling NumPy objects and sending tensors to / accepting
#   arrays from NumPy.
class TestNumPyInterop(TestCase):

            

Reported by Pylint.

'arr.flags' does not support item assignment
Error

Line: 20 Column: 9

                  @onlyCPU
    def test_numpy_non_writeable(self, device):
        arr = np.zeros(5)
        arr.flags['WRITEABLE'] = False
        self.assertWarns(UserWarning, lambda: torch.from_numpy(arr))

    @onlyCPU
    def test_numpy_unresizable(self, device) -> None:
        x = np.zeros((2, 2))

            

Reported by Pylint.

Unused argument 'device'
Error

Line: 18 Column: 40

                  # other instances of this warning should be addressed to avoid
    # the tests depending on the order in which they're run.
    @onlyCPU
    def test_numpy_non_writeable(self, device):
        arr = np.zeros(5)
        arr.flags['WRITEABLE'] = False
        self.assertWarns(UserWarning, lambda: torch.from_numpy(arr))

    @onlyCPU

            

Reported by Pylint.

Unused argument 'device'
Error

Line: 24 Column: 38

                      self.assertWarns(UserWarning, lambda: torch.from_numpy(arr))

    @onlyCPU
    def test_numpy_unresizable(self, device) -> None:
        x = np.zeros((2, 2))
        y = torch.from_numpy(x)
        with self.assertRaises(ValueError):
            x.resize((5, 5))


            

Reported by Pylint.

Unused variable 'y'
Error

Line: 26 Column: 9

                  @onlyCPU
    def test_numpy_unresizable(self, device) -> None:
        x = np.zeros((2, 2))
        y = torch.from_numpy(x)
        with self.assertRaises(ValueError):
            x.resize((5, 5))

        z = torch.randn(5, 5)
        w = z.numpy()

            

Reported by Pylint.

Unused argument 'device'
Error

Line: 38 Column: 29

                          w.resize((10, 10))

    @onlyCPU
    def test_to_numpy(self, device) -> None:
        def get_castable_tensor(shape, dtype):
            if dtype.is_floating_point:
                dtype_info = torch.finfo(dtype)
                # can't directly use min and max, because for double, max - min
                # is greater than double range and sampling always gives inf.

            

Reported by Pylint.

Redefining name 'dtypes' from outer scope (line 8)
Error

Line: 55 Column: 9

                              t = torch.empty(shape, dtype=torch.int64).random_(low, high)
            return t.to(dtype)

        dtypes = [
            torch.uint8,
            torch.int8,
            torch.short,
            torch.int,
            torch.half,

            

Reported by Pylint.

Cell variable sz1 defined in loop
Error

Line: 83 Column: 32

                              self.assertEqual(x[i], y[i])

            def check2d(x, y):
                for i in range(sz1):
                    for j in range(sz2):
                        self.assertEqual(x[i][j], y[i][j])

            # empty
            x = torch.tensor([]).to(dtp)

            

Reported by Pylint.