The following issues were found

caffe2/quantization/server/dnnlowp_test_utils.py
103 issues
No name 'dnnlowp_pybind11' in module 'caffe2.quantization.server'
Error

Line: 7 Column: 1

              
import numpy as np
from caffe2.python import utils, workspace
from caffe2.quantization.server import dnnlowp_pybind11
from hypothesis import assume


# This function asserts quantized results (output[1:]) are close enough to
# floating point results (output[0]).

            

Reported by Pylint.

Unable to import 'hypothesis'
Error

Line: 8 Column: 1

              import numpy as np
from caffe2.python import utils, workspace
from caffe2.quantization.server import dnnlowp_pybind11
from hypothesis import assume


# This function asserts quantized results (output[1:]) are close enough to
# floating point results (output[0]).
# The error bound is derived based on assumption that there's no input

            

Reported by Pylint.

Unused argument 'X_max'
Error

Line: 44 Column: 60

              
# Make sure we won't have overflows from vpmaddubsw instruction used in fbgemm)
def avoid_vpmaddubsw_overflow_fc(
    batch_size, input_channels, output_channels, X, X_min, X_max, W, W_min, W_max
):
    for i, j in np.ndindex((batch_size, output_channels)):
        for k in range(0, input_channels // 2 * 2, 2):
            x0 = X[i, k] - X_min
            x1 = X[i, k + 1] - X_min

            

Reported by Pylint.

Unused argument 'W_max'
Error

Line: 44 Column: 77

              
# Make sure we won't have overflows from vpmaddubsw instruction used in fbgemm)
def avoid_vpmaddubsw_overflow_fc(
    batch_size, input_channels, output_channels, X, X_min, X_max, W, W_min, W_max
):
    for i, j in np.ndindex((batch_size, output_channels)):
        for k in range(0, input_channels // 2 * 2, 2):
            x0 = X[i, k] - X_min
            x1 = X[i, k + 1] - X_min

            

Reported by Pylint.

Unused argument 'X_max'
Error

Line: 87 Column: 5

                  batch_size,
    X,
    X_min,
    X_max,
    W,
    W_min,
    W_max,
):
    ndim = len(sizes)

            

Reported by Pylint.

Unused argument 'W_max'
Error

Line: 90 Column: 5

                  X_max,
    W,
    W_min,
    W_max,
):
    ndim = len(sizes)
    dkernels = tuple((dilations[i] * (kernels[i] - 1) + 1) for i in range(ndim))
    size_cols = tuple(
        (sizes[i] + 2 * pads[i] - dkernels[i]) // strides[i] + 1 for i in range(ndim)

            

Reported by Pylint.

Unused variable 'i'
Error

Line: 413 Column: 9

              
    if init_net:
        test_case.ws.run(init_net)
    for i in range(1 if engine == "" else 2):
        test_case.ws.run(net)
        Y = test_case.ws.blobs["Y"].fetch()
        if order:
            outputs.append(Output(Y=Y, op_type=op_type, engine=engine, order=order))
        else:

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              

import collections

import numpy as np
from caffe2.python import utils, workspace
from caffe2.quantization.server import dnnlowp_pybind11
from hypothesis import assume


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 15 Column: 1

              # floating point results (output[0]).
# The error bound is derived based on assumption that there's no input
# quantization error.
def check_quantized_results_close(outputs, ref=None, symmetric=False, atol_scale=0.53):
    if ref is None:
        ref = outputs[0][0]
    if ref.size == 0:
        return
    ref_min = min(np.min(ref), 0)

            

Reported by Pylint.

Variable name "o" doesn't conform to snake_case naming style
Error

Line: 29 Column: 9

                  # should be divided by 2 in an exact math, but divide by 1.9 here
    # considering finite precision in floating-point numbers
    atol = ref_scale * atol_scale
    for o in outputs[1:]:
        np.testing.assert_allclose(o[0], outputs[0][0], atol=atol, rtol=0)


def pairwise(iterable):
    "s -> (s0,s1), (s1,s2), (s2, s3), ..."

            

Reported by Pylint.

caffe2/python/operator_test/pooling_test.py
103 issues
Unable to import 'hypothesis'
Error

Line: 6 Column: 1

              

import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import os
import unittest

from caffe2.python import core, utils, workspace

            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 7 Column: 1

              
import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import os
import unittest

from caffe2.python import core, utils, workspace
import caffe2.python.hip_test_util as hiputl

            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 68 Column: 38

                                   "This is a test that reproduces a cudnn error. If you "
                     "want to run it, set env variable CAFFE2_DEBUG=1.")
    @given(**hu.gcs_cuda_only)
    def test_pooling_big_batch(self, gc, dc):
        op = core.CreateOperator(
            "AveragePool",
            ["X"],
            ["Y"],
            stride=1,

            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 200 Column: 61

                         batch_size=st.integers(0, 3),
           **hu.gcs_gpu_only)
    def test_pooling_with_index(self, stride, pad, kernel, size,
                                input_channels, batch_size, gc, dc):
        assume(pad < kernel)
        op = core.CreateOperator(
            "MaxPoolWithIndex",
            ["X"],
            ["Y", "Y_index"],

            

Reported by Pylint.

Reimport 'unittest' (imported line 9)
Error

Line: 466 Column: 5

              

if __name__ == "__main__":
    import unittest
    unittest.main()

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              



import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import os
import unittest

            

Reported by Pylint.

standard import "import os" should be placed before "import numpy as np"
Error

Line: 8 Column: 1

              import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import os
import unittest

from caffe2.python import core, utils, workspace
import caffe2.python.hip_test_util as hiputl
import caffe2.python.hypothesis_test_util as hu

            

Reported by Pylint.

standard import "import unittest" should be placed before "import numpy as np"
Error

Line: 9 Column: 1

              from hypothesis import assume, given, settings
import hypothesis.strategies as st
import os
import unittest

from caffe2.python import core, utils, workspace
import caffe2.python.hip_test_util as hiputl
import caffe2.python.hypothesis_test_util as hu


            

Reported by Pylint.

Missing class docstring
Error

Line: 15 Column: 1

              import caffe2.python.hip_test_util as hiputl
import caffe2.python.hypothesis_test_util as hu

class TestPooling(hu.HypothesisTestCase):
    # CUDNN does NOT support different padding values and we skip it
    @given(stride_h=st.integers(1, 3),
           stride_w=st.integers(1, 3),
           pad_t=st.integers(0, 3),
           pad_l=st.integers(0, 3),

            

Reported by Pylint.

Too many local variables (17/15)
Error

Line: 32 Column: 5

                                                 "MaxPool2D", "AveragePool2D"]),
           **hu.gcs)
    @settings(deadline=10000)
    def test_pooling_separate_stride_pad(self, stride_h, stride_w,
                                         pad_t, pad_l, pad_b,
                                         pad_r, kernel, size,
                                         input_channels,
                                         batch_size, order,
                                         op_type,

            

Reported by Pylint.

caffe2/python/regularizer.py
103 issues
String statement has no effect
Error

Line: 18 Column: 5

                  def __init__(self):
        self.kEpsilon = 1e-9

    """
    Adds regularization to train_net for given parameter. Its factor ahead of
    regularization is given when initialization.
    The param should be a BlobReference.
    """


            

Reported by Pylint.

Unused argument 'grad'
Error

Line: 39 Column: 56

                      )
        return getattr(self, run_func)(net, param_init_net, param, grad)

    def _run_on_loss(self, net, param_init_net, param, grad=None):
        return None

    def _run_after_optimizer(self, net, param_init_net, param, grad):
        return None


            

Reported by Pylint.

Unused argument 'param'
Error

Line: 39 Column: 49

                      )
        return getattr(self, run_func)(net, param_init_net, param, grad)

    def _run_on_loss(self, net, param_init_net, param, grad=None):
        return None

    def _run_after_optimizer(self, net, param_init_net, param, grad):
        return None


            

Reported by Pylint.

Unused argument 'param_init_net'
Error

Line: 39 Column: 33

                      )
        return getattr(self, run_func)(net, param_init_net, param, grad)

    def _run_on_loss(self, net, param_init_net, param, grad=None):
        return None

    def _run_after_optimizer(self, net, param_init_net, param, grad):
        return None


            

Reported by Pylint.

Unused argument 'net'
Error

Line: 39 Column: 28

                      )
        return getattr(self, run_func)(net, param_init_net, param, grad)

    def _run_on_loss(self, net, param_init_net, param, grad=None):
        return None

    def _run_after_optimizer(self, net, param_init_net, param, grad):
        return None


            

Reported by Pylint.

Unused argument 'param'
Error

Line: 42 Column: 57

                  def _run_on_loss(self, net, param_init_net, param, grad=None):
        return None

    def _run_after_optimizer(self, net, param_init_net, param, grad):
        return None

    def _feature_grouping(self, param, net):
        # Possible alternative grouping method via summing over absolute values
        # Compute l2norm over feature weights

            

Reported by Pylint.

Unused argument 'net'
Error

Line: 42 Column: 36

                  def _run_on_loss(self, net, param_init_net, param, grad=None):
        return None

    def _run_after_optimizer(self, net, param_init_net, param, grad):
        return None

    def _feature_grouping(self, param, net):
        # Possible alternative grouping method via summing over absolute values
        # Compute l2norm over feature weights

            

Reported by Pylint.

Unused argument 'param_init_net'
Error

Line: 42 Column: 41

                  def _run_on_loss(self, net, param_init_net, param, grad=None):
        return None

    def _run_after_optimizer(self, net, param_init_net, param, grad):
        return None

    def _feature_grouping(self, param, net):
        # Possible alternative grouping method via summing over absolute values
        # Compute l2norm over feature weights

            

Reported by Pylint.

Unused argument 'grad'
Error

Line: 42 Column: 64

                  def _run_on_loss(self, net, param_init_net, param, grad=None):
        return None

    def _run_after_optimizer(self, net, param_init_net, param, grad):
        return None

    def _feature_grouping(self, param, net):
        # Possible alternative grouping method via summing over absolute values
        # Compute l2norm over feature weights

            

Reported by Pylint.

Redefining built-in 'min'
Error

Line: 66 Column: 9

                      net,
        param,
        grad=None,
        min=None,
        max=None,
        open_range=False,
        left_open=False,
        right_open=False,
    ):

            

Reported by Pylint.

test/jit/test_complex.py
103 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase, execWrapper
from torch.testing._internal.common_utils import IS_MACOS
from typing import List, Dict
from itertools import product
from textwrap import dedent
import cmath

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 4 Column: 1

              import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase, execWrapper
from torch.testing._internal.common_utils import IS_MACOS
from typing import List, Dict
from itertools import product
from textwrap import dedent
import cmath

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 5 Column: 1

              import os
import sys
from torch.testing._internal.jit_utils import JitTestCase, execWrapper
from torch.testing._internal.common_utils import IS_MACOS
from typing import List, Dict
from itertools import product
from textwrap import dedent
import cmath


            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 26 Column: 9

                      def fn(a: List[complex], idx: int):
            return a[idx]

        input = [1j, 2, 3 + 4j, -5, -7j]
        self.checkScript(fn, (input, 2))

    def test_complexdict(self):
        def fn(a: Dict[complex, complex], key: complex) -> complex:
            return a[key]

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 33 Column: 9

                      def fn(a: Dict[complex, complex], key: complex) -> complex:
            return a[key]

        input = {2 + 3j : 2 - 3j, -4.3 - 2j: 3j}
        self.checkScript(fn, (input, -4.3 - 2j))

    def test_pickle(self):
        class ComplexModule(torch.jit.ScriptModule):
            def __init__(self):

            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 94 Column: 24

                              res_script = None
                try:
                    res_python = f(a)
                except Exception as e:
                    res_python = e
                try:
                    res_script = f_script(a)
                except Exception as e:
                    res_script = e

            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 98 Column: 24

                                  res_python = e
                try:
                    res_script = f_script(a)
                except Exception as e:
                    res_script = e

                if res_python != res_script:
                    if isinstance(res_python, Exception):
                        continue

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase, execWrapper
from torch.testing._internal.common_utils import IS_MACOS
from typing import List, Dict
from itertools import product
from textwrap import dedent
import cmath

            

Reported by Pylint.

standard import "import os" should be placed before "import torch"
Error

Line: 2 Column: 1

              import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase, execWrapper
from torch.testing._internal.common_utils import IS_MACOS
from typing import List, Dict
from itertools import product
from textwrap import dedent
import cmath

            

Reported by Pylint.

standard import "import sys" should be placed before "import torch"
Error

Line: 3 Column: 1

              import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase, execWrapper
from torch.testing._internal.common_utils import IS_MACOS
from typing import List, Dict
from itertools import product
from textwrap import dedent
import cmath

            

Reported by Pylint.

torch/quantization/fake_quantize.py
101 issues
Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import torch
from torch.nn import Module
from .observer import MovingAverageMinMaxObserver, HistogramObserver, MovingAveragePerChannelMinMaxObserver, _with_args
import re
from abc import ABC, abstractmethod
from typing import Any, Tuple

def _is_per_channel(qscheme: 'torch.qscheme') -> bool:
    return qscheme in [torch.per_channel_symmetric, torch.per_channel_affine]

            

Reported by Pylint.

Module 'torch' has no 'per_channel_symmetric' member
Error

Line: 9 Column: 24

              from typing import Any, Tuple

def _is_per_channel(qscheme: 'torch.qscheme') -> bool:
    return qscheme in [torch.per_channel_symmetric, torch.per_channel_affine]

def _is_per_tensor(qscheme: 'torch.qscheme') -> bool:
    return qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine]

def _is_symmetric_quant(qscheme: 'torch.qscheme') -> bool:

            

Reported by Pylint.

Module 'torch' has no 'per_channel_affine' member
Error

Line: 9 Column: 53

              from typing import Any, Tuple

def _is_per_channel(qscheme: 'torch.qscheme') -> bool:
    return qscheme in [torch.per_channel_symmetric, torch.per_channel_affine]

def _is_per_tensor(qscheme: 'torch.qscheme') -> bool:
    return qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine]

def _is_symmetric_quant(qscheme: 'torch.qscheme') -> bool:

            

Reported by Pylint.

Module 'torch' has no 'per_tensor_symmetric' member
Error

Line: 12 Column: 24

                  return qscheme in [torch.per_channel_symmetric, torch.per_channel_affine]

def _is_per_tensor(qscheme: 'torch.qscheme') -> bool:
    return qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine]

def _is_symmetric_quant(qscheme: 'torch.qscheme') -> bool:
    return qscheme in [torch.per_tensor_symmetric, torch.per_channel_symmetric]

class FakeQuantizeBase(ABC, Module):

            

Reported by Pylint.

Module 'torch' has no 'per_tensor_affine' member
Error

Line: 12 Column: 52

                  return qscheme in [torch.per_channel_symmetric, torch.per_channel_affine]

def _is_per_tensor(qscheme: 'torch.qscheme') -> bool:
    return qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine]

def _is_symmetric_quant(qscheme: 'torch.qscheme') -> bool:
    return qscheme in [torch.per_tensor_symmetric, torch.per_channel_symmetric]

class FakeQuantizeBase(ABC, Module):

            

Reported by Pylint.

Module 'torch' has no 'per_tensor_symmetric' member
Error

Line: 15 Column: 24

                  return qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine]

def _is_symmetric_quant(qscheme: 'torch.qscheme') -> bool:
    return qscheme in [torch.per_tensor_symmetric, torch.per_channel_symmetric]

class FakeQuantizeBase(ABC, Module):
    r""" Base fake quantize module
    Any fake quantize implementation should derive from this class.


            

Reported by Pylint.

Module 'torch' has no 'per_channel_symmetric' member
Error

Line: 15 Column: 52

                  return qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine]

def _is_symmetric_quant(qscheme: 'torch.qscheme') -> bool:
    return qscheme in [torch.per_tensor_symmetric, torch.per_channel_symmetric]

class FakeQuantizeBase(ABC, Module):
    r""" Base fake quantize module
    Any fake quantize implementation should derive from this class.


            

Reported by Pylint.

Module 'torch' has no 'uint8' member
Error

Line: 36 Column: 76

                      # fake_quant_enabled and observer_enabled are buffers to support their
        # replication in DDP. Data type is uint8 because NCCL does not support
        # bool tensors.
        self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
        self.register_buffer('observer_enabled', torch.tensor([1], dtype=torch.uint8))

    @abstractmethod
    def forward(self, x):
        pass

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 36 Column: 52

                      # fake_quant_enabled and observer_enabled are buffers to support their
        # replication in DDP. Data type is uint8 because NCCL does not support
        # bool tensors.
        self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
        self.register_buffer('observer_enabled', torch.tensor([1], dtype=torch.uint8))

    @abstractmethod
    def forward(self, x):
        pass

            

Reported by Pylint.

Module 'torch' has no 'uint8' member
Error

Line: 37 Column: 74

                      # replication in DDP. Data type is uint8 because NCCL does not support
        # bool tensors.
        self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
        self.register_buffer('observer_enabled', torch.tensor([1], dtype=torch.uint8))

    @abstractmethod
    def forward(self, x):
        pass


            

Reported by Pylint.

test/distributed/test_c10d_spawn_gloo.py
100 issues
Unable to import 'torch'
Error

Line: 7 Column: 1

              import tempfile

import test_c10d_spawn
import torch
import torch.distributed as c10d
import torch.nn as nn
from test_c10d_spawn import _torch_dist_nn_available
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
from torch.testing._internal.common_distributed import requires_gloo, \

            

Reported by Pylint.

Unable to import 'torch.distributed'
Error

Line: 8 Column: 1

              
import test_c10d_spawn
import torch
import torch.distributed as c10d
import torch.nn as nn
from test_c10d_spawn import _torch_dist_nn_available
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
from torch.testing._internal.common_distributed import requires_gloo, \
    create_device, MultiProcessTestCase, skip_if_lt_x_gpu

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 9 Column: 1

              import test_c10d_spawn
import torch
import torch.distributed as c10d
import torch.nn as nn
from test_c10d_spawn import _torch_dist_nn_available
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
from torch.testing._internal.common_distributed import requires_gloo, \
    create_device, MultiProcessTestCase, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if, TEST_WITH_TSAN, TEST_WITH_DEV_DBG_ASAN

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_cuda'
Error

Line: 11 Column: 1

              import torch.distributed as c10d
import torch.nn as nn
from test_c10d_spawn import _torch_dist_nn_available
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
from torch.testing._internal.common_distributed import requires_gloo, \
    create_device, MultiProcessTestCase, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if, TEST_WITH_TSAN, TEST_WITH_DEV_DBG_ASAN

# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_distributed'
Error

Line: 12 Column: 1

              import torch.nn as nn
from test_c10d_spawn import _torch_dist_nn_available
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
from torch.testing._internal.common_distributed import requires_gloo, \
    create_device, MultiProcessTestCase, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if, TEST_WITH_TSAN, TEST_WITH_DEV_DBG_ASAN

# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619
if sys.version_info < (3, 9):

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 14 Column: 1

              from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
from torch.testing._internal.common_distributed import requires_gloo, \
    create_device, MultiProcessTestCase, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if, TEST_WITH_TSAN, TEST_WITH_DEV_DBG_ASAN

# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619
if sys.version_info < (3, 9):
    class ProcessGroupShareTensorTest(test_c10d_spawn.AbstractProcessGroupShareTensorTest, TestCase):


            

Reported by Pylint.

Access to a protected member _Options of a client class
Error

Line: 22 Column: 20

              
        @classmethod
        def opts(cls, threads=2):
            opts = c10d.ProcessGroupGloo._Options()
            opts._timeout = 5.0
            opts._devices = [create_device(interface='lo')]
            opts._threads = threads
            return opts


            

Reported by Pylint.

Access to a protected member _timeout of a client class
Error

Line: 23 Column: 13

                      @classmethod
        def opts(cls, threads=2):
            opts = c10d.ProcessGroupGloo._Options()
            opts._timeout = 5.0
            opts._devices = [create_device(interface='lo')]
            opts._threads = threads
            return opts

        @classmethod

            

Reported by Pylint.

Access to a protected member _devices of a client class
Error

Line: 24 Column: 13

                      def opts(cls, threads=2):
            opts = c10d.ProcessGroupGloo._Options()
            opts._timeout = 5.0
            opts._devices = [create_device(interface='lo')]
            opts._threads = threads
            return opts

        @classmethod
        def _init_pg_gloo(cls, rank, filename, world_size):

            

Reported by Pylint.

Access to a protected member _threads of a client class
Error

Line: 25 Column: 13

                          opts = c10d.ProcessGroupGloo._Options()
            opts._timeout = 5.0
            opts._devices = [create_device(interface='lo')]
            opts._threads = threads
            return opts

        @classmethod
        def _init_pg_gloo(cls, rank, filename, world_size):
            store = c10d.FileStore(filename, world_size)

            

Reported by Pylint.

torch/utils/hipify/hipify_python.py
100 issues
Attempted relative import beyond top-level package
Error

Line: 33 Column: 1

              import sys
import os

from . import constants
from .cuda_to_hip_mappings import CUDA_TO_HIP_MAPPINGS
from .cuda_to_hip_mappings import MATH_TRANSPILATIONS

from typing import Dict, List, Iterator, Optional
from collections.abc import Mapping, Iterable

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 34 Column: 1

              import os

from . import constants
from .cuda_to_hip_mappings import CUDA_TO_HIP_MAPPINGS
from .cuda_to_hip_mappings import MATH_TRANSPILATIONS

from typing import Dict, List, Iterator, Optional
from collections.abc import Mapping, Iterable
HipifyResult = Dict[str, Optional[str]]

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 35 Column: 1

              
from . import constants
from .cuda_to_hip_mappings import CUDA_TO_HIP_MAPPINGS
from .cuda_to_hip_mappings import MATH_TRANSPILATIONS

from typing import Dict, List, Iterator, Optional
from collections.abc import Mapping, Iterable
HipifyResult = Dict[str, Optional[str]]
HipifyFinalResult = Dict[str, HipifyResult]

            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 109 Column: 24

                          os.mkdir(dn)
            self.dirs_to_clean.append(os.path.abspath(dn))

    def __exit__(self, type, value, traceback):
        if not self.keep_intermediates:
            for f in self.files_to_clean:
                os.unlink(f)
            for d in self.dirs_to_clean[::-1]:
                os.rmdir(d)

            

Reported by Pylint.

Redefining name 'value' from outer scope (line 679)
Error

Line: 109 Column: 30

                          os.mkdir(dn)
            self.dirs_to_clean.append(os.path.abspath(dn))

    def __exit__(self, type, value, traceback):
        if not self.keep_intermediates:
            for f in self.files_to_clean:
                os.unlink(f)
            for d in self.dirs_to_clean[::-1]:
                os.rmdir(d)

            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 645 Column: 24

                              try:
                    recurse = self._pattern(data[char])
                    alt.append(self.quote(char) + recurse)
                except Exception:
                    cc.append(self.quote(char))
            else:
                q = 1
        cconly = not len(alt) > 0


            

Reported by Pylint.

Unused variable 'dirpath'
Error

Line: 746 Column: 13

                  def mk_repl(templ, include_current_dir=True):
        def repl(m):
            f = m.group(1)
            dirpath, filename = os.path.split(f)
            if (
                f.startswith("ATen/cuda")
                or f.startswith("ATen/native/cuda")
                or f.startswith("ATen/native/quantized/cuda")
                or f.startswith("ATen/native/sparse/cuda")

            

Reported by Pylint.

Redefining name 'value' from outer scope (line 679)
Error

Line: 785 Column: 17

                                                                  os.path.relpath(header_filepath, output_directory),
                                                    all_files, includes, stats, hip_clang_launch, is_pytorch_extension,
                                                    clean_ctx, show_progress)
                value = HIPIFY_FINAL_RESULT[header_filepath]["hipified_path"]
                assert value is not None
                return templ.format(os.path.relpath(value, header_dir))

            return m.group(0)
        return repl

            

Reported by Pylint.

standard import "from typing import Dict, List, Iterator, Optional" should be placed before "from . import constants"
Error

Line: 37 Column: 1

              from .cuda_to_hip_mappings import CUDA_TO_HIP_MAPPINGS
from .cuda_to_hip_mappings import MATH_TRANSPILATIONS

from typing import Dict, List, Iterator, Optional
from collections.abc import Mapping, Iterable
HipifyResult = Dict[str, Optional[str]]
HipifyFinalResult = Dict[str, HipifyResult]
HIPIFY_C_BREADCRUMB = "// !!! This is a file automatically generated by hipify!!!\n"
HIPIFY_FINAL_RESULT: HipifyFinalResult = {}

            

Reported by Pylint.

standard import "from collections.abc import Mapping, Iterable" should be placed before "from . import constants"
Error

Line: 38 Column: 1

              from .cuda_to_hip_mappings import MATH_TRANSPILATIONS

from typing import Dict, List, Iterator, Optional
from collections.abc import Mapping, Iterable
HipifyResult = Dict[str, Optional[str]]
HipifyFinalResult = Dict[str, HipifyResult]
HIPIFY_C_BREADCRUMB = "// !!! This is a file automatically generated by hipify!!!\n"
HIPIFY_FINAL_RESULT: HipifyFinalResult = {}


            

Reported by Pylint.

torch/nn/modules/sparse.py
98 issues
Attempted relative import beyond top-level package
Error

Line: 7 Column: 1

              from torch import Tensor
from torch.nn.parameter import Parameter

from .module import Module
from .. import functional as F
from .. import init


class Embedding(Module):

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 8 Column: 1

              from torch.nn.parameter import Parameter

from .module import Module
from .. import functional as F
from .. import init


class Embedding(Module):
    r"""A simple lookup table that stores embeddings of a fixed dictionary and size.

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 9 Column: 1

              
from .module import Module
from .. import functional as F
from .. import init


class Embedding(Module):
    r"""A simple lookup table that stores embeddings of a fixed dictionary and size.


            

Reported by Pylint.

Module 'torch' has no 'empty' member
Error

Line: 139 Column: 37

                      self.norm_type = norm_type
        self.scale_grad_by_freq = scale_grad_by_freq
        if _weight is None:
            self.weight = Parameter(torch.empty((num_embeddings, embedding_dim), **factory_kwargs))
            self.reset_parameters()
        else:
            assert list(_weight.shape) == [num_embeddings, embedding_dim], \
                'Shape of weight does not match num_embeddings and embedding_dim'
            self.weight = Parameter(_weight)

            

Reported by Pylint.

Module 'torch' has no 'empty' member
Error

Line: 333 Column: 37

                              padding_idx = self.num_embeddings + padding_idx
        self.padding_idx = padding_idx
        if _weight is None:
            self.weight = Parameter(torch.empty((num_embeddings, embedding_dim), **factory_kwargs))
            self.reset_parameters()
        else:
            assert list(_weight.shape) == [num_embeddings, embedding_dim], \
                'Shape of weight does not match num_embeddings and embedding_dim'
            self.weight = Parameter(_weight)

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 157 Column: 23

                          with torch.no_grad():
                self.weight[self.padding_idx].fill_(0)

    def forward(self, input: Tensor) -> Tensor:
        return F.embedding(
            input, self.weight, self.padding_idx, self.max_norm,
            self.norm_type, self.scale_grad_by_freq, self.sparse)

    def extra_repr(self) -> str:

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 352 Column: 23

                          with torch.no_grad():
                self.weight[self.padding_idx].fill_(0)

    def forward(self, input: Tensor, offsets: Optional[Tensor] = None, per_sample_weights: Optional[Tensor] = None) -> Tensor:
        """Forward pass of EmbeddingBag.

        Args:
            input (Tensor): Tensor containing bags of indices into the embedding matrix.
            offsets (Tensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from typing import Optional

import torch
from torch import Tensor
from torch.nn.parameter import Parameter

from .module import Module
from .. import functional as F
from .. import init

            

Reported by Pylint.

Too many instance attributes (8/7)
Error

Line: 12 Column: 1

              from .. import init


class Embedding(Module):
    r"""A simple lookup table that stores embeddings of a fixed dictionary and size.

    This module is often used to store word embeddings and retrieve them using indices.
    The input to the module is a list of indices, and the output is the corresponding
    word embeddings.

            

Reported by Pylint.

Line too long (120/100)
Error

Line: 22 Column: 1

                  Args:
        num_embeddings (int): size of the dictionary of embeddings
        embedding_dim (int): the size of each embedding vector
        padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
                                     therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
                                     i.e. it remains as a fixed "pad". For a newly constructed Embedding,
                                     the embedding vector at :attr:`padding_idx` will default to all zeros,
                                     but can be updated to another value to be used as the padding vector.
        max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`

            

Reported by Pylint.

caffe2/python/operator_test/activation_ops_test.py
98 issues
Unable to import 'hypothesis'
Error

Line: 8 Column: 1

              
import numpy as np

from hypothesis import given, assume, settings
import hypothesis.strategies as st

from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu

            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 9 Column: 1

              import numpy as np

from hypothesis import given, assume, settings
import hypothesis.strategies as st

from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
import caffe2.python.serialized_test.serialized_test_util as serial

            

Reported by Pylint.

Unable to import 'scipy.stats'
Error

Line: 16 Column: 1

              import caffe2.python.mkl_test_util as mu
import caffe2.python.serialized_test.serialized_test_util as serial

from scipy.stats import norm

import unittest


class TestActivations(serial.SerializedTestCase):

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 69 Column: 55

                                   "Relu for float16 can only run on GPU now.")
    @given(X=hu.tensor(dtype=np.float16), in_place=st.booleans(),
           engine=st.sampled_from(["", "CUDNN"]), **hu.gcs)
    def test_relu_fp16(self, X, in_place, engine, gc, dc):
        # fp16 is only supported on CUDA/HIP
        assume(core.IsGPUDeviceType(gc.device_type))
        op = core.CreateOperator(
            "Relu",
            ["X"],

            

Reported by Pylint.

Unused argument 'fwd_inputs'
Error

Line: 82 Column: 43

                      def relu_ref(X):
            return [np.maximum(X, 0.0)]

        def relu_grad_ref(g_out, outputs, fwd_inputs):
            dY = g_out
            [Y] = outputs
            dX = dY
            dX[Y == 0] = 0
            return [dX]

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 268 Column: 35

              
    @given(n=st.integers(0, 6), m=st.integers(4, 6),
           seed=st.integers(0, 1000), **hu.gcs_cpu_only)
    def test_mish(self, n, m, gc, dc, seed):
        np.random.seed(seed)
        X = np.random.rand(n, m).astype(np.float32)

        def mish_ref(X):
            return (X * np.tanh(np.log1p(np.exp(X))),)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              




import numpy as np

from hypothesis import given, assume, settings
import hypothesis.strategies as st

            

Reported by Pylint.

standard import "import unittest" should be placed before "import numpy as np"
Error

Line: 18 Column: 1

              
from scipy.stats import norm

import unittest


class TestActivations(serial.SerializedTestCase):
    @given(X=hu.tensor(), in_place=st.booleans(),
                  engine=st.sampled_from(["", "CUDNN"]), **mu.gcs)

            

Reported by Pylint.

Missing class docstring
Error

Line: 21 Column: 1

              import unittest


class TestActivations(serial.SerializedTestCase):
    @given(X=hu.tensor(), in_place=st.booleans(),
                  engine=st.sampled_from(["", "CUDNN"]), **mu.gcs)
    @settings(deadline=10000)
    def test_relu(self, X, in_place, engine, gc, dc):
        if gc == mu.mkl_do:

            

Reported by Pylint.

Argument name "X" doesn't conform to snake_case naming style
Error

Line: 25 Column: 5

                  @given(X=hu.tensor(), in_place=st.booleans(),
                  engine=st.sampled_from(["", "CUDNN"]), **mu.gcs)
    @settings(deadline=10000)
    def test_relu(self, X, in_place, engine, gc, dc):
        if gc == mu.mkl_do:
            in_place = False

        op = core.CreateOperator(
            "Relu",

            

Reported by Pylint.

test/distributed/elastic/multiprocessing/api_test.py
98 issues
Unable to import 'torch'
Error

Line: 22 Column: 1

              from unittest import mock
from unittest.mock import patch

import torch
import torch.multiprocessing as mp
from torch.distributed.elastic.multiprocessing import ProcessFailure, start_processes
from torch.distributed.elastic.multiprocessing.api import (
    MultiprocessContext,
    SignalException,

            

Reported by Pylint.

Unable to import 'torch.multiprocessing'
Error

Line: 23 Column: 1

              from unittest.mock import patch

import torch
import torch.multiprocessing as mp
from torch.distributed.elastic.multiprocessing import ProcessFailure, start_processes
from torch.distributed.elastic.multiprocessing.api import (
    MultiprocessContext,
    SignalException,
    RunProcsResult,

            

Reported by Pylint.

Unable to import 'torch.distributed.elastic.multiprocessing'
Error

Line: 24 Column: 1

              
import torch
import torch.multiprocessing as mp
from torch.distributed.elastic.multiprocessing import ProcessFailure, start_processes
from torch.distributed.elastic.multiprocessing.api import (
    MultiprocessContext,
    SignalException,
    RunProcsResult,
    Std,

            

Reported by Pylint.

Unable to import 'torch.distributed.elastic.multiprocessing.api'
Error

Line: 25 Column: 1

              import torch
import torch.multiprocessing as mp
from torch.distributed.elastic.multiprocessing import ProcessFailure, start_processes
from torch.distributed.elastic.multiprocessing.api import (
    MultiprocessContext,
    SignalException,
    RunProcsResult,
    Std,
    _validate_full_rank,

            

Reported by Pylint.

Unable to import 'torch.distributed.elastic.multiprocessing.errors.error_handler'
Error

Line: 34 Column: 1

                  to_map,
    _wrap,
)
from torch.distributed.elastic.multiprocessing.errors.error_handler import _write_error
from torch.testing._internal.common_utils import (
    NO_MULTIPROCESSING_SPAWN,
    TEST_WITH_ASAN,
    TEST_WITH_DEV_DBG_ASAN,
    TEST_WITH_TSAN,

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 35 Column: 1

                  _wrap,
)
from torch.distributed.elastic.multiprocessing.errors.error_handler import _write_error
from torch.testing._internal.common_utils import (
    NO_MULTIPROCESSING_SPAWN,
    TEST_WITH_ASAN,
    TEST_WITH_DEV_DBG_ASAN,
    TEST_WITH_TSAN,
    IS_IN_CI,

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 45 Column: 1

                  IS_MACOS,
    sandcastle_skip_if,
)
from torch.testing._internal.common_utils import run_tests


class RunProcResultsTest(unittest.TestCase):
    def setUp(self):
        self.test_dir = tempfile.mkdtemp(prefix=f"{self.__class__.__name__}_")

            

Reported by Pylint.

Module 'signal' has no 'Signals' member
Error

Line: 825 Column: 69

                              self._test_zombie_workflow(wait_fn, s)

        def _test_zombie_workflow(
            self, entrypoint: Union[str, Callable], signal_to_send: signal.Signals
        ) -> None:
            mp_queue = mp.get_context("spawn").Queue()
            child_nproc = 2
            ctx = mp.spawn(
                start_processes_zombie_test,

            

Reported by Pylint.

Unused argument 'log_mock'
Error

Line: 66 Column: 33

                      self.assertTrue(pr_fail.is_failed())

    @patch("torch.distributed.elastic.multiprocessing.errors.log")
    def test_get_failures(self, log_mock):
        with mock.patch("time.time", side_effect=[3, 2, 1]):
            error_file0 = os.path.join(self.test_dir, "error0.json")
            error_file1 = os.path.join(self.test_dir, "error1.json")
            _write_error(RuntimeError("error 0"), error_file0)
            _write_error(RuntimeError("error 1"), error_file1)

            

Reported by Pylint.

Redefining built-in 'bin'
Error

Line: 179 Column: 1

                  ]


def bin(name: str):
    dir = os.path.dirname(__file__)
    return os.path.join(dir, "bin", name)


def wait_fn(wait_time: int = 300) -> None:

            

Reported by Pylint.