The following issues were found

test/distributed/pipeline/sync/test_checkpoint.py
65 issues
Unable to import 'pytest'
Error

Line: 9 Column: 1

              # LICENSE file in the root directory of this source tree.
from functools import partial

import pytest
import torch
from torch import nn
import torch.cuda

from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 10 Column: 1

              from functools import partial

import pytest
import torch
from torch import nn
import torch.cuda

from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.dependency import fork, join

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 11 Column: 1

              
import pytest
import torch
from torch import nn
import torch.cuda

from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.microbatch import Batch

            

Reported by Pylint.

Unable to import 'torch.cuda'
Error

Line: 12 Column: 1

              import pytest
import torch
from torch import nn
import torch.cuda

from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.microbatch import Batch


            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync.checkpoint'
Error

Line: 14 Column: 1

              from torch import nn
import torch.cuda

from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.microbatch import Batch

devices = ["cpu"]
if torch.cuda.is_available():

            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync.dependency'
Error

Line: 15 Column: 1

              import torch.cuda

from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.microbatch import Batch

devices = ["cpu"]
if torch.cuda.is_available():
    devices.append("cuda")

            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync.microbatch'
Error

Line: 16 Column: 1

              
from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.microbatch import Batch

devices = ["cpu"]
if torch.cuda.is_available():
    devices.append("cuda")


            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 119 Column: 27

                  logs = []

    class Detect(nn.Module):
        def forward(self, input):
            logs.append((is_checkpointing(), is_recomputing()))
            return input

    model = Detect()
    input = torch.rand(1, requires_grad=True)

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 124 Column: 5

                          return input

    model = Detect()
    input = torch.rand(1, requires_grad=True)

    output = checkpoint(model, input)
    output.backward()

    assert logs == [(True, False), (False, True)]

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 136 Column: 27

                  logs = []

    class Detect(nn.Module):
        def forward(self, input):
            logs.append((is_checkpointing(), is_recomputing()))
            return input

    model = Detect()
    input = torch.rand(1, requires_grad=True)

            

Reported by Pylint.

test/fx/test_dce_pass.py
65 issues
Unable to import 'torch'
Error

Line: 4 Column: 1

              import unittest

from typing import Set, Type
import torch
import torch.fx


class TestDCE(unittest.TestCase):
    def _has_nodes_without_users(self, m: torch.fx.GraphModule):

            

Reported by Pylint.

Unable to import 'torch.fx'
Error

Line: 5 Column: 1

              
from typing import Set, Type
import torch
import torch.fx


class TestDCE(unittest.TestCase):
    def _has_nodes_without_users(self, m: torch.fx.GraphModule):
        for node in m.graph.nodes:

            

Reported by Pylint.

Unused variable 'a'
Error

Line: 74 Column: 17

                              self.attr_1 = torch.nn.Parameter(torch.tensor([-0.9]))

            def forward(self, x):
                a = x + 1
                return x + self.attr_1

        self._run_dce_and_test(TestModule(), expect_dce_changes=True)

    def test_dead_chain(self):

            

Reported by Pylint.

Unused variable 'b'
Error

Line: 91 Column: 17

              
            def forward(self, x):
                a = x + 1
                b = a * 7
                return x + self.attr_1

        self._run_dce_and_test(TestModule(), expect_dce_changes=True)

    def test_dead_getattr(self):

            

Reported by Pylint.

Unused variable 'b'
Error

Line: 108 Column: 17

              
            def forward(self, x):
                a = x + 1
                b = a * self.attr_1
                return x + 11

        self._run_dce_and_test(TestModule(), expect_dce_changes=True)

    def test_dead_placeholder(self):

            

Reported by Pylint.

Useless super delegation in method '__init__'
Error

Line: 120 Column: 13

                      """

        class TestModule(torch.nn.Module):
            def __init__(self):
                super().__init__()

            def forward(self, x, y):
                return x + 7


            

Reported by Pylint.

Unused argument 'y'
Error

Line: 123 Column: 34

                          def __init__(self):
                super().__init__()

            def forward(self, x, y):
                return x + 7

        self._run_dce_and_test(TestModule(), expect_dce_changes=False)

    def test_dead_placeholder_with_user(self):

            

Reported by Pylint.

Useless super delegation in method '__init__'
Error

Line: 137 Column: 13

                      """

        class TestModule(torch.nn.Module):
            def __init__(self):
                super().__init__()

            def forward(self, x, y):
                a = y + 2
                return x + 7

            

Reported by Pylint.

Unused variable 'a'
Error

Line: 141 Column: 17

                              super().__init__()

            def forward(self, x, y):
                a = y + 2
                return x + 7

        self._run_dce_and_test(TestModule(), expect_dce_changes=True)

    def test_keep_module_with_side_effects(self):

            

Reported by Pylint.

Unused variable 'r'
Error

Line: 160 Column: 17

                              self.relu = ReLUImpure()

            def forward(self, a: torch.Tensor) -> torch.Tensor:
                r = self.relu(a)
                return a * 2

        self._run_dce_and_test(
            TestModule(), expect_dce_changes=False, modules_to_be_leafs={ReLUImpure}
        )

            

Reported by Pylint.

torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py
65 issues
Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 21 Column: 17

              def two_args_two_kwargs(
    first_arg,
    second_arg,
    first_kwarg=torch.tensor([3, 3]),
    second_kwarg=torch.tensor([4, 4]),
):
    return first_arg + second_arg + first_kwarg + second_kwarg



            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 22 Column: 18

                  first_arg,
    second_arg,
    first_kwarg=torch.tensor([3, 3]),
    second_kwarg=torch.tensor([4, 4]),
):
    return first_arg + second_arg + first_kwarg + second_kwarg


@torch.jit.script

            

Reported by Pylint.

Instance of 'JitFaultyAgentRpcTest' has no 'rank' member
Error

Line: 90 Column: 12

                  def test_timeout_in_torchscript_function(self):
        # Call rpc_async + fut.wait() in torchscript function and ensure that
        # timeout is raised.
        if self.rank != 0:
            return

        dst_worker_name = worker_name((self.rank + 1) % self.world_size)

        args = (torch.tensor([1, 1]), torch.tensor([2, 2]))

            

Reported by Pylint.

Instance of 'JitFaultyAgentRpcTest' has no 'rank' member
Error

Line: 93 Column: 40

                      if self.rank != 0:
            return

        dst_worker_name = worker_name((self.rank + 1) % self.world_size)

        args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
        kwargs = {
            "first_kwarg": torch.tensor([2, 2]),
            "second_kwarg": torch.tensor([3, 3]),

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 95 Column: 39

              
        dst_worker_name = worker_name((self.rank + 1) % self.world_size)

        args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
        kwargs = {
            "first_kwarg": torch.tensor([2, 2]),
            "second_kwarg": torch.tensor([3, 3]),
        }
        expected_error = self.get_timeout_error_regex()

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 95 Column: 17

              
        dst_worker_name = worker_name((self.rank + 1) % self.world_size)

        args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
        kwargs = {
            "first_kwarg": torch.tensor([2, 2]),
            "second_kwarg": torch.tensor([3, 3]),
        }
        expected_error = self.get_timeout_error_regex()

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 97 Column: 28

              
        args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
        kwargs = {
            "first_kwarg": torch.tensor([2, 2]),
            "second_kwarg": torch.tensor([3, 3]),
        }
        expected_error = self.get_timeout_error_regex()
        # Ensure that we get a timeout if we override the default timeout and
        # the RPC takes longer to execute.

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 98 Column: 29

                      args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
        kwargs = {
            "first_kwarg": torch.tensor([2, 2]),
            "second_kwarg": torch.tensor([3, 3]),
        }
        expected_error = self.get_timeout_error_regex()
        # Ensure that we get a timeout if we override the default timeout and
        # the RPC takes longer to execute.
        with self.assertRaisesRegex(RuntimeError, expected_error):

            

Reported by Pylint.

Instance of 'JitFaultyAgentRpcTest' has no 'assertRaisesRegex' member
Error

Line: 103 Column: 14

                      expected_error = self.get_timeout_error_regex()
        # Ensure that we get a timeout if we override the default timeout and
        # the RPC takes longer to execute.
        with self.assertRaisesRegex(RuntimeError, expected_error):
            rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0.5)

        # Ensure that we timeout if we don't specify a timeout but the default
        # is less than the RPC takes to execute.
        rpc._set_rpc_timeout(0.001)

            

Reported by Pylint.

Instance of 'JitFaultyAgentRpcTest' has no 'assertRaisesRegex' member
Error

Line: 109 Column: 14

                      # Ensure that we timeout if we don't specify a timeout but the default
        # is less than the RPC takes to execute.
        rpc._set_rpc_timeout(0.001)
        with self.assertRaisesRegex(RuntimeError, expected_error):
            script_rpc_async_call(
                dst_worker_name, args, kwargs
            )

        # Ensure that we run to completion if zero timeout is specified.

            

Reported by Pylint.

torch/utils/data/_utils/worker.py
65 issues
Attempted relative import beyond top-level package
Error

Line: 14 Column: 1

              from dataclasses import dataclass
from torch._utils import ExceptionWrapper
from typing import Union
from . import signal_handling, MP_STATUS_CHECK_INTERVAL, IS_WINDOWS, HAS_NUMPY

if IS_WINDOWS:
    import ctypes
    from ctypes.wintypes import DWORD, BOOL, HANDLE


            

Reported by Pylint.

Module 'torch' has no 'set_num_threads' member
Error

Line: 216 Column: 9

                      # https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers
        signal_handling._set_worker_signal_handlers()

        torch.set_num_threads(1)
        seed = base_seed + worker_id
        random.seed(seed)
        torch.manual_seed(seed)
        if HAS_NUMPY:
            np_seed = _generate_state(base_seed, worker_id)

            

Reported by Pylint.

String statement has no effect
Error

Line: 112 Column: 1

                  return _worker_info


r"""Dummy class used to signal the end of an IterableDataset"""
@dataclass(frozen=True)
class _IterableDatasetStopIteration(object):
    worker_id: int

r"""Dummy class used to resume the fetching when worker reuse is enabled"""

            

Reported by Pylint.

String statement has no effect
Error

Line: 117 Column: 1

              class _IterableDatasetStopIteration(object):
    worker_id: int

r"""Dummy class used to resume the fetching when worker reuse is enabled"""
@dataclass(frozen=True)
class _ResumeIteration(object):
    pass

# The function `_generate_state` is adapted from `numpy.random.SeedSequence`

            

Reported by Pylint.

TODO: Implement `SeedSequence` like object for `torch.random`
Error

Line: 150 Column: 3

              # This function generates an array of int32 as the seed for
# `numpy.random`, in order to prevent state collision due to same
# seed and algorithm for `numpy.random` and `random` modules.
# TODO: Implement `SeedSequence` like object for `torch.random`
def _generate_state(base_seed, worker_id):
    INIT_A = 0x43b0d7e5
    MULT_A = 0x931e8875
    INIT_B = 0x8b51f9dd
    MULT_B = 0x58f38ded

            

Reported by Pylint.

Redefining built-in 'hash'
Error

Line: 166 Column: 5

              
    hash_const_A = INIT_A

    def hash(value):
        nonlocal hash_const_A
        value = (value ^ hash_const_A) & MASK32
        hash_const_A = (hash_const_A * MULT_A) & MASK32
        value = (value * hash_const_A) & MASK32
        value = (value ^ (value >> XSHIFT)) & MASK32

            

Reported by Pylint.

Unused argument 'persistent_workers'
Error

Line: 204 Column: 31

              
def _worker_loop(dataset_kind, dataset, index_queue, data_queue, done_event,
                 auto_collation, collate_fn, drop_last, base_seed, init_fn, worker_id,
                 num_workers, persistent_workers):
    # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
    # logic of this function.

    try:
        # Initialize C side signal handlers for SIGBUS and SIGSEGV. Python signal

            

Reported by Pylint.

Access to a protected member _set_worker_signal_handlers of a client class
Error

Line: 214 Column: 9

                      # handlers, likely when the same fatal signal had already happened
        # again.
        # https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers
        signal_handling._set_worker_signal_handlers()

        torch.set_num_threads(1)
        seed = base_seed + worker_id
        random.seed(seed)
        torch.manual_seed(seed)

            

Reported by Pylint.

Using the global statement
Error

Line: 225 Column: 9

                          import numpy as np
            np.random.seed(np_seed)

        global _worker_info
        _worker_info = WorkerInfo(id=worker_id, num_workers=num_workers,
                                  seed=seed, dataset=dataset)

        from torch.utils.data import _DatasetKind


            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 238 Column: 16

                              init_fn(worker_id)

            fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset, auto_collation, collate_fn, drop_last)
        except Exception:
            init_exception = ExceptionWrapper(
                where="in DataLoader worker process {}".format(worker_id))

        # When using Iterable mode, some worker can exit earlier than others due
        # to the IterableDataset behaving differently for different workers.

            

Reported by Pylint.

test/jit/test_slice.py
65 issues
Unable to import 'torch'
Error

Line: 4 Column: 1

              import os
import sys

import torch
from typing import List

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 10 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase

if __name__ == '__main__':
    raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
                       "\tpython test/test_jit.py TESTNAME\n\n"
                       "instead.")

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import os
import sys

import torch
from typing import List

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

standard import "from typing import List" should be placed before "import torch"
Error

Line: 5 Column: 1

              import sys

import torch
from typing import List

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase

            

Reported by Pylint.

Import "from torch.testing._internal.jit_utils import JitTestCase" should be placed at the top of the module
Error

Line: 10 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase

if __name__ == '__main__':
    raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
                       "\tpython test/test_jit.py TESTNAME\n\n"
                       "instead.")

            

Reported by Pylint.

Missing class docstring
Error

Line: 18 Column: 1

                                     "instead.")

# Tests that Python slice class is supported in TorchScript
class TestSlice(JitTestCase):
    def test_slice_kwarg(self):
        def slice_kwarg(x: List[int]):
            return x[slice(1, stop=2)]

        with self.assertRaisesRegex(RuntimeError, "Slice does not accept any keyword arguments"):

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 19 Column: 5

              
# Tests that Python slice class is supported in TorchScript
class TestSlice(JitTestCase):
    def test_slice_kwarg(self):
        def slice_kwarg(x: List[int]):
            return x[slice(1, stop=2)]

        with self.assertRaisesRegex(RuntimeError, "Slice does not accept any keyword arguments"):
            torch.jit.script(slice_kwarg)

            

Reported by Pylint.

Argument name "x" doesn't conform to snake_case naming style
Error

Line: 20 Column: 9

              # Tests that Python slice class is supported in TorchScript
class TestSlice(JitTestCase):
    def test_slice_kwarg(self):
        def slice_kwarg(x: List[int]):
            return x[slice(1, stop=2)]

        with self.assertRaisesRegex(RuntimeError, "Slice does not accept any keyword arguments"):
            torch.jit.script(slice_kwarg)


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 26 Column: 5

                      with self.assertRaisesRegex(RuntimeError, "Slice does not accept any keyword arguments"):
            torch.jit.script(slice_kwarg)

    def test_slice_three_nones(self):
        def three_nones(x: List[int]):
            return x[slice(None, None, None)]

        self.checkScript(three_nones, (range(10),))


            

Reported by Pylint.

Argument name "x" doesn't conform to snake_case naming style
Error

Line: 27 Column: 9

                          torch.jit.script(slice_kwarg)

    def test_slice_three_nones(self):
        def three_nones(x: List[int]):
            return x[slice(None, None, None)]

        self.checkScript(three_nones, (range(10),))

    def test_slice_two_nones(self):

            

Reported by Pylint.

torch/nn/modules/batchnorm.py
65 issues
Attempted relative import beyond top-level package
Error

Line: 7 Column: 1

              from torch import Tensor
from torch.nn.parameter import Parameter, UninitializedParameter, UninitializedBuffer

from .. import functional as F
from .. import init
from ._functions import SyncBatchNorm as sync_batch_norm
from .lazy import LazyModuleMixin
from .module import Module


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 8 Column: 1

              from torch.nn.parameter import Parameter, UninitializedParameter, UninitializedBuffer

from .. import functional as F
from .. import init
from ._functions import SyncBatchNorm as sync_batch_norm
from .lazy import LazyModuleMixin
from .module import Module



            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 9 Column: 1

              
from .. import functional as F
from .. import init
from ._functions import SyncBatchNorm as sync_batch_norm
from .lazy import LazyModuleMixin
from .module import Module


class _NormBase(Module):

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 10 Column: 1

              from .. import functional as F
from .. import init
from ._functions import SyncBatchNorm as sync_batch_norm
from .lazy import LazyModuleMixin
from .module import Module


class _NormBase(Module):
    """Common base of _InstanceNorm and _BatchNorm"""

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 11 Column: 1

              from .. import init
from ._functions import SyncBatchNorm as sync_batch_norm
from .lazy import LazyModuleMixin
from .module import Module


class _NormBase(Module):
    """Common base of _InstanceNorm and _BatchNorm"""


            

Reported by Pylint.

Module 'torch' has no 'empty' member
Error

Line: 45 Column: 37

                      self.affine = affine
        self.track_running_stats = track_running_stats
        if self.affine:
            self.weight = Parameter(torch.empty(num_features, **factory_kwargs))
            self.bias = Parameter(torch.empty(num_features, **factory_kwargs))
        else:
            self.register_parameter("weight", None)
            self.register_parameter("bias", None)
        if self.track_running_stats:

            

Reported by Pylint.

Module 'torch' has no 'empty' member
Error

Line: 46 Column: 35

                      self.track_running_stats = track_running_stats
        if self.affine:
            self.weight = Parameter(torch.empty(num_features, **factory_kwargs))
            self.bias = Parameter(torch.empty(num_features, **factory_kwargs))
        else:
            self.register_parameter("weight", None)
            self.register_parameter("bias", None)
        if self.track_running_stats:
            self.register_buffer('running_mean', torch.zeros(num_features, **factory_kwargs))

            

Reported by Pylint.

Module 'torch' has no 'zeros' member
Error

Line: 51 Column: 50

                          self.register_parameter("weight", None)
            self.register_parameter("bias", None)
        if self.track_running_stats:
            self.register_buffer('running_mean', torch.zeros(num_features, **factory_kwargs))
            self.register_buffer('running_var', torch.ones(num_features, **factory_kwargs))
            self.running_mean: Optional[Tensor]
            self.running_var: Optional[Tensor]
            self.register_buffer('num_batches_tracked',
                                 torch.tensor(0, dtype=torch.long,

            

Reported by Pylint.

Module 'torch' has no 'ones' member
Error

Line: 52 Column: 49

                          self.register_parameter("bias", None)
        if self.track_running_stats:
            self.register_buffer('running_mean', torch.zeros(num_features, **factory_kwargs))
            self.register_buffer('running_var', torch.ones(num_features, **factory_kwargs))
            self.running_mean: Optional[Tensor]
            self.running_var: Optional[Tensor]
            self.register_buffer('num_batches_tracked',
                                 torch.tensor(0, dtype=torch.long,
                                              **{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))

            

Reported by Pylint.

Module 'torch' has no 'long' member
Error

Line: 56 Column: 56

                          self.running_mean: Optional[Tensor]
            self.running_var: Optional[Tensor]
            self.register_buffer('num_batches_tracked',
                                 torch.tensor(0, dtype=torch.long,
                                              **{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))
            self.num_batches_tracked: Optional[Tensor]
        else:
            self.register_buffer("running_mean", None)
            self.register_buffer("running_var", None)

            

Reported by Pylint.

torch/testing/_internal/common_quantized.py
64 issues
Module 'torch' has no 'qint8' member
Error

Line: 54 Column: 17

                  according to the min and max element of the tensor"""
    if isinstance(X, torch.Tensor):
        X = X.numpy()
    if dtype == torch.qint8:
        if reduce_range:
            qmin, qmax = -64, 63
        else:
            qmin, qmax = -128, 127
    else:  # dtype == torch.quint8

            

Reported by Pylint.

Module 'torch' has no 'iinfo' member
Error

Line: 84 Column: 18

                  according to the min and max element of the tensor"""
    if isinstance(X, torch.Tensor):
        X = X.numpy()
    qmin, qmax = torch.iinfo(dtype).min, torch.iinfo(dtype).max
    n_levels = qmax - qmin
    scale = np.zeros(X.shape[0], dtype=np.float64)
    zero_point = np.zeros(X.shape[0], dtype=np.int64)
    for i in range(zero_point.shape[0]):
        min_val = X.min()

            

Reported by Pylint.

Module 'torch' has no 'iinfo' member
Error

Line: 84 Column: 42

                  according to the min and max element of the tensor"""
    if isinstance(X, torch.Tensor):
        X = X.numpy()
    qmin, qmax = torch.iinfo(dtype).min, torch.iinfo(dtype).max
    n_levels = qmax - qmin
    scale = np.zeros(X.shape[0], dtype=np.float64)
    zero_point = np.zeros(X.shape[0], dtype=np.int64)
    for i in range(zero_point.shape[0]):
        min_val = X.min()

            

Reported by Pylint.

Module 'torch' has no 'float32' member
Error

Line: 181 Column: 55

              # Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
def _fake_quantize_per_channel_affine_reference(X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
    dtype = X.dtype
    X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis)
    res = torch.zeros_like(X)

    for i in range(X.size()[0]):
        res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) +
                  per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i]

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 182 Column: 11

              def _fake_quantize_per_channel_affine_reference(X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
    dtype = X.dtype
    X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis)
    res = torch.zeros_like(X)

    for i in range(X.size()[0]):
        res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) +
                  per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i]


            

Reported by Pylint.

Module 'torch' has no 'clamp' member
Error

Line: 185 Column: 19

                  res = torch.zeros_like(X)

    for i in range(X.size()[0]):
        res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) +
                  per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i]

    out = res.permute(tuple(permute_axis_list))
    return out.to(dtype)


            

Reported by Pylint.

Module 'torch' has no 'round' member
Error

Line: 185 Column: 31

                  res = torch.zeros_like(X)

    for i in range(X.size()[0]):
        res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) +
                  per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i]

    out = res.permute(tuple(permute_axis_list))
    return out.to(dtype)


            

Reported by Pylint.

Module 'torch' has no 'float32' member
Error

Line: 195 Column: 55

              # Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
    dtype = X.dtype
    X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis)
    Xq = torch.zeros_like(X)
    for i in range(X.size()[0]):
        Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i])
    Xq = Xq.permute(tuple(permute_axis_list))
    mask = (Xq >= quant_min) * (Xq <= quant_max)

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 196 Column: 10

              def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
    dtype = X.dtype
    X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis)
    Xq = torch.zeros_like(X)
    for i in range(X.size()[0]):
        Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i])
    Xq = Xq.permute(tuple(permute_axis_list))
    mask = (Xq >= quant_min) * (Xq <= quant_max)
    res = torch.zeros_like(dY)

            

Reported by Pylint.

Module 'torch' has no 'round' member
Error

Line: 198 Column: 17

                  X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis)
    Xq = torch.zeros_like(X)
    for i in range(X.size()[0]):
        Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i])
    Xq = Xq.permute(tuple(permute_axis_list))
    mask = (Xq >= quant_min) * (Xq <= quant_max)
    res = torch.zeros_like(dY)
    res[mask] = dY[mask]
    return res.to(dtype)

            

Reported by Pylint.

torch/distributed/elastic/multiprocessing/api.py
64 issues
Module 'signal' has no 'Signals' member
Error

Line: 45 Column: 42

                  if the death signal got received by the process.
    """

    def __init__(self, msg: str, sigval: signal.Signals) -> None:
        super().__init__(msg)
        self.sigval = sigval


def _terminate_process_handler(signum: int, frame: FrameType) -> None:

            

Reported by Pylint.

Module 'signal' has no 'Signals' member
Error

Line: 59 Column: 14

                  so the exception should not be silently ignored, otherwise the process will never
    be terminated.
    """
    sigval = signal.Signals(signum)
    raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)


def _get_kill_signal() -> signal.Signals:
    """

            

Reported by Pylint.

Module 'signal' has no 'Signals' member
Error

Line: 63 Column: 27

                  raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval)


def _get_kill_signal() -> signal.Signals:
    """
    Get the kill signal. SIGKILL for unix, CTRL_C_EVENT for windows.
    """
    if IS_WINDOWS:
        return signal.CTRL_C_EVENT  # type: ignore[attr-defined] # noqa: F821

            

Reported by Pylint.

Module 'signal' has no 'CTRL_C_EVENT' member
Error

Line: 68 Column: 16

                  Get the kill signal. SIGKILL for unix, CTRL_C_EVENT for windows.
    """
    if IS_WINDOWS:
        return signal.CTRL_C_EVENT  # type: ignore[attr-defined] # noqa: F821
    else:
        return signal.SIGKILL


def _get_default_signal() -> signal.Signals:

            

Reported by Pylint.

Module 'signal' has no 'Signals' member
Error

Line: 73 Column: 30

                      return signal.SIGKILL


def _get_default_signal() -> signal.Signals:
    """
    Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows.
    """
    if IS_WINDOWS:
        return signal.CTRL_C_EVENT  # type: ignore[attr-defined] # noqa: F821

            

Reported by Pylint.

Module 'signal' has no 'CTRL_C_EVENT' member
Error

Line: 78 Column: 16

                  Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows.
    """
    if IS_WINDOWS:
        return signal.CTRL_C_EVENT  # type: ignore[attr-defined] # noqa: F821
    else:
        return signal.SIGTERM


def _validate_full_rank(d: Dict[int, Any], nprocs: int, what: str):

            

Reported by Pylint.

Module 'signal' has no 'Signals' member
Error

Line: 309 Column: 33

                      raise NotImplementedError()

    @abc.abstractmethod
    def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
        r"""
        Terminates all processes managed by this context and cleans up any
        meta resources (e.g. redirect, error_file files).
        """
        raise NotImplementedError()

            

Reported by Pylint.

Module 'signal' has no 'Signals' member
Error

Line: 317 Column: 35

                      raise NotImplementedError()

    def close(
        self, death_sig: Optional[signal.Signals] = None, timeout: int = 30
    ) -> None:
        r"""
        Terminates all processes managed by this context and cleans up any
        meta resources (e.g. redirect, error_file files).


            

Reported by Pylint.

Module 'signal' has no 'Signals' member
Error

Line: 530 Column: 33

                      assert self._pc is not None  # assertion for mypy type checking
        return {local_rank: pid for local_rank, pid in enumerate(self._pc.pids())}

    def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
        if not self._pc:
            return
        for proc in self._pc.processes:
            if proc.is_alive():
                log.warning(f"Closing process {proc.pid} via signal {death_sig.name}")

            

Reported by Pylint.

Module 'signal' has no 'Signals' member
Error

Line: 596 Column: 41

                          stderr=self._stderr,
        )

    def close(self, death_sig: Optional[signal.Signals] = None) -> None:
        if not death_sig:
            death_sig = _get_default_signal()
        self.proc.send_signal(death_sig)
        if self._stdout:
            self._stdout.close()

            

Reported by Pylint.

caffe2/python/layers/layers.py
64 issues
Access to a protected member _net of a client class
Error

Line: 213 Column: 17

                      with workspace.WorkspaceGuard("model_init_by_loading_params"):
            try:
                net = core.Net("shape_checker")
                net._net.op.extend([self.initializer])
                shape_blob = net.NextScopedBlob(self.parameter + "_shape")
                net.Shape([self.parameter], shape_blob)
                workspace.RunNetOnce(net)
                shape = workspace.FetchBlob(shape_blob).tolist()
                # ResetWorkspace to save memory

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 223 Column: 21

                              return shape
            except RuntimeError as exp:
                logger.warning(
                    "Cannot infer the shape of blob {} from operator {}: {}".format(
                        self.parameter, self.initializer.type, exp
                    )
                )
                workspace.ResetWorkspace()
                return None

            

Reported by Pylint.

TODO(amalevich): Either return back to lambdas, that add
Error

Line: 374 Column: 3

                      Adds layer initialization operators to passed net.
        """
        for param in self.params:
            # TODO(amalevich): Either return back to lambdas, that add
            # all params (looks a bit safer and breaking less
            # abstractions) or extend Net interface to this type of
            # operations better
            # TODO(xlwang) init_net._net.op has type google.protobuf.\
            # internal.containers.RepeatedCompositeFieldContainer, but

            

Reported by Pylint.

TODO(xlwang) init_net._net.op has type google.protobuf.\
Error

Line: 378 Column: 3

                          # all params (looks a bit safer and breaking less
            # abstractions) or extend Net interface to this type of
            # operations better
            # TODO(xlwang) init_net._net.op has type google.protobuf.\
            # internal.containers.RepeatedCompositeFieldContainer, but
            # the version of protobuf in fbcode does not support append
            # so extend is used
            init_op = param.initializer
            current_device_scope = scope.CurrentDeviceScope()

            

Reported by Pylint.

Access to a protected member _net of a client class
Error

Line: 395 Column: 27

                          # do not add duplicated init ops
            if any(
                utils.OpAlmostEqual(op, init_op, "debug_info")
                for op in init_net._net.op
            ):
                continue

            init_net._net.op.extend([init_op])


            

Reported by Pylint.

Access to a protected member _net of a client class
Error

Line: 399 Column: 13

                          ):
                continue

            init_net._net.op.extend([init_op])

    def create_param(
        self, param_name, shape, initializer, optimizer, ps_param=None, regularizer=None
    ):
        with scope.NameScope(self.name, reset=True):

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              ## @package layers
# Module caffe2.python.layers.layers


import logging
from collections import namedtuple

import numpy as np
from caffe2.proto import caffe2_pb2

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 24 Column: 1

              IdScoreListWithEvicted = schema.MapWithEvicted(np.int64, np.float32)


def almost_equal_schemas(
    record,
    original_schema,
    check_field_names=True,
    check_field_types=True,
    check_field_metas=False,

            

Reported by Pylint.

Unnecessary "elif" after "return"
Error

Line: 31 Column: 5

                  check_field_types=True,
    check_field_metas=False,
):
    if original_schema == IdList:
        return schema.equal_schemas(
            record,
            IdList,
            check_field_names=check_field_names,
            check_field_types=check_field_types,

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 63 Column: 1

                      return schema.equal_schemas(record, original_schema)


def get_key(record):
    if almost_equal_schemas(record, IdList):
        key = "values"
    elif almost_equal_schemas(
        record, IdScoreList, check_field_types=False
    ):

            

Reported by Pylint.

benchmarks/distributed/pipeline/pipe.py
64 issues
Unable to import 'torch'
Error

Line: 7 Column: 1

              import time

from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
from torch.distributed import rpc
import torch.nn as nn
from torch.utils.data import DataLoader

from torch.distributed.pipeline.sync import Pipe

            

Reported by Pylint.

Unable to import 'torch.distributed'
Error

Line: 8 Column: 1

              
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
from torch.distributed import rpc
import torch.nn as nn
from torch.utils.data import DataLoader

from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.utils import partition_model

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 9 Column: 1

              from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
from torch.distributed import rpc
import torch.nn as nn
from torch.utils.data import DataLoader

from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.utils import partition_model
from torch.optim import Adam

            

Reported by Pylint.

Unable to import 'torch.utils.data'
Error

Line: 10 Column: 1

              import torch
from torch.distributed import rpc
import torch.nn as nn
from torch.utils.data import DataLoader

from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.utils import partition_model
from torch.optim import Adam


            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync'
Error

Line: 12 Column: 1

              import torch.nn as nn
from torch.utils.data import DataLoader

from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.utils import partition_model
from torch.optim import Adam

def sizeof_fmt(num, suffix='B'):
    for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti']:

            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync.utils'
Error

Line: 13 Column: 1

              from torch.utils.data import DataLoader

from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.utils import partition_model
from torch.optim import Adam

def sizeof_fmt(num, suffix='B'):
    for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti']:
        if abs(num) < 1024.0:

            

Reported by Pylint.

Unable to import 'torch.optim'
Error

Line: 14 Column: 1

              
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.utils import partition_model
from torch.optim import Adam

def sizeof_fmt(num, suffix='B'):
    for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti']:
        if abs(num) < 1024.0:
            return "%3.2f%sB" % (num, unit)

            

Reported by Pylint.

Unused argument 'suffix'
Error

Line: 16 Column: 21

              from torch.distributed.pipeline.sync.utils import partition_model
from torch.optim import Adam

def sizeof_fmt(num, suffix='B'):
    for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti']:
        if abs(num) < 1024.0:
            return "%3.2f%sB" % (num, unit)
        num /= 1024.0


            

Reported by Pylint.

Using the global statement
Error

Line: 71 Column: 9

                      self.src_mask = None

    def forward(self, src):
        global iteration_count
        iteration_count += 1

        if self.src_mask is None or self.src_mask.size(0) != len(src):
            device = src.device
            mask = nn.Transformer.generate_square_subsequent_mask(len(src)).to(device)

            

Reported by Pylint.

Redefining name 'args' from outer scope (line 270)
Error

Line: 105 Column: 16

                      super(TransformerLMSequential, self).__init__(*layers)


def make_model(args, device, ntokens):
    ninp = 2048  # embedding dimension
    nhid = 2048  # the dimension of the feedforward network model in nn.TransformerEncoder
    nhead = 32  # the number of heads in the multiheadattention models
    dropout = 0
    initrange = 0.1

            

Reported by Pylint.