The following issues were found

torch/distributions/relaxed_bernoulli.py
32 issues
Module 'torch' has no 'Size' member
Error

Line: 45 Column: 27

                          self.logits, = broadcast_all(logits)
        self._param = self.probs if probs is not None else self.logits
        if is_scalar:
            batch_shape = torch.Size()
        else:
            batch_shape = self._param.size()
        super(LogitRelaxedBernoulli, self).__init__(batch_shape, validate_args=validate_args)

    def expand(self, batch_shape, _instance=None):

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 52 Column: 23

              
    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(LogitRelaxedBernoulli, _instance)
        batch_shape = torch.Size(batch_shape)
        new.temperature = self.temperature
        if 'probs' in self.__dict__:
            new.probs = self.probs.expand(batch_shape)
            new._param = new.probs
        if 'logits' in self.__dict__:

            

Reported by Pylint.

An attribute defined in relaxed_bernoulli line 42 hides this method
Error

Line: 68 Column: 5

                      return self._param.new(*args, **kwargs)

    @lazy_property
    def logits(self):
        return probs_to_logits(self.probs, is_binary=True)

    @lazy_property
    def probs(self):
        return logits_to_probs(self.logits, is_binary=True)

            

Reported by Pylint.

An attribute defined in relaxed_bernoulli line 39 hides this method
Error

Line: 72 Column: 5

                      return probs_to_logits(self.probs, is_binary=True)

    @lazy_property
    def probs(self):
        return logits_to_probs(self.logits, is_binary=True)

    @property
    def param_shape(self):
        return self._param.size()

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 79 Column: 36

                  def param_shape(self):
        return self._param.size()

    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        probs = clamp_probs(self.probs.expand(shape))
        uniforms = clamp_probs(torch.rand(shape, dtype=probs.dtype, device=probs.device))
        return (uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p()) / self.temperature


            

Reported by Pylint.

Module 'torch' has no 'rand' member
Error

Line: 82 Column: 32

                  def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        probs = clamp_probs(self.probs.expand(shape))
        uniforms = clamp_probs(torch.rand(shape, dtype=probs.dtype, device=probs.device))
        return (uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p()) / self.temperature

    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)

            

Reported by Pylint.

Method 'icdf' is abstract in class 'Distribution' but is not overridden
Error

Line: 10 Column: 1

              from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs


class LogitRelaxedBernoulli(Distribution):
    r"""
    Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs`
    or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli
    distribution.


            

Reported by Pylint.

Method 'variance' is abstract in class 'Distribution' but is not overridden
Error

Line: 10 Column: 1

              from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs


class LogitRelaxedBernoulli(Distribution):
    r"""
    Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs`
    or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli
    distribution.


            

Reported by Pylint.

Method 'enumerate_support' is abstract in class 'Distribution' but is not overridden
Error

Line: 10 Column: 1

              from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs


class LogitRelaxedBernoulli(Distribution):
    r"""
    Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs`
    or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli
    distribution.


            

Reported by Pylint.

Method 'entropy' is abstract in class 'Distribution' but is not overridden
Error

Line: 10 Column: 1

              from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs


class LogitRelaxedBernoulli(Distribution):
    r"""
    Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs`
    or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli
    distribution.


            

Reported by Pylint.

test/jit/test_script_profile.py
32 issues
Unable to import 'torch'
Error

Line: 4 Column: 1

              import os
import sys

import torch
from torch import nn

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 5 Column: 1

              import sys

import torch
from torch import nn

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 10 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase

if __name__ == '__main__':
    raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
                       "\tpython test/test_jit.py TESTNAME\n\n"
                       "instead.")

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 24 Column: 23

                      self.lstm2 = nn.LSTMCell(51, 51)
        self.linear = nn.Linear(51, 1)

    def forward(self, input):
        outputs = []
        h_t = torch.zeros(input.size(0), 51)
        c_t = torch.zeros(input.size(0), 51)
        h_t2 = torch.zeros(input.size(0), 51)
        c_t2 = torch.zeros(input.size(0), 51)

            

Reported by Pylint.

Access to a protected member _ScriptProfile of a client class
Error

Line: 43 Column: 13

              
    def test_basic(self):
        seq = torch.jit.script(Sequence())
        p = torch.jit._ScriptProfile()
        p.enable()
        seq(torch.rand((10, 100)))
        p.disable()
        self.assertNotEqual(p.dump_string(), "")


            

Reported by Pylint.

Access to a protected member _ScriptProfile of a client class
Error

Line: 54 Column: 17

              
        @torch.jit.script
        def fn():
            p = torch.jit._ScriptProfile()
            p.enable()
            _ = seq(torch.rand((10, 100)))
            p.disable()
            return p


            

Reported by Pylint.

Access to a protected member _ScriptProfile of a client class
Error

Line: 64 Column: 21

              
    def test_multi(self):
        seq = torch.jit.script(Sequence())
        profiles = [torch.jit._ScriptProfile() for _ in range(5)]
        for p in profiles:
            p.enable()

        last = None
        while len(profiles) > 0:

            

Reported by Pylint.

Access to a protected member _ScriptProfile of a client class
Error

Line: 84 Column: 17

              
        @torch.jit.script
        def fn():
            p = torch.jit._ScriptProfile()
            p.enable()
            _ = seq(torch.rand((10, 100)))
            p.disable()
            stats0 = p.dump_string()


            

Reported by Pylint.

Access to a protected member _ScriptProfile of a client class
Error

Line: 106 Column: 13

                      self.assertNotEqual(s1, s2)

    def test_empty(self):
        p = torch.jit._ScriptProfile()
        p.enable()
        p.disable()
        self.assertEqual(p.dump_string(), "")

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import os
import sys

import torch
from torch import nn

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

torch/distributions/relaxed_categorical.py
32 issues
Module 'torch' has no 'Size' member
Error

Line: 46 Column: 23

              
    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(ExpRelaxedCategorical, _instance)
        batch_shape = torch.Size(batch_shape)
        new.temperature = self.temperature
        new._categorical = self._categorical.expand(batch_shape)
        super(ExpRelaxedCategorical, new).__init__(batch_shape, self.event_shape, validate_args=False)
        new._validate_args = self._validate_args
        return new

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 68 Column: 36

                  def probs(self):
        return self._categorical.probs

    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        uniforms = clamp_probs(torch.rand(shape, dtype=self.logits.dtype, device=self.logits.device))
        gumbels = -((-(uniforms.log())).log())
        scores = (self.logits + gumbels) / self.temperature
        return scores - scores.logsumexp(dim=-1, keepdim=True)

            

Reported by Pylint.

Module 'torch' has no 'rand' member
Error

Line: 70 Column: 32

              
    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        uniforms = clamp_probs(torch.rand(shape, dtype=self.logits.dtype, device=self.logits.device))
        gumbels = -((-(uniforms.log())).log())
        scores = (self.logits + gumbels) / self.temperature
        return scores - scores.logsumexp(dim=-1, keepdim=True)

    def log_prob(self, value):

            

Reported by Pylint.

Module 'torch' has no 'full_like' member
Error

Line: 80 Column: 22

                      if self._validate_args:
            self._validate_sample(value)
        logits, value = broadcast_all(self.logits, value)
        log_scale = (torch.full_like(self.temperature, float(K)).lgamma() -
                     self.temperature.log().mul(-(K - 1)))
        score = logits - value.mul(self.temperature)
        score = (score - score.logsumexp(dim=-1, keepdim=True)).sum(-1)
        return score + log_scale


            

Reported by Pylint.

Method 'icdf' is abstract in class 'Distribution' but is not overridden
Error

Line: 10 Column: 1

              from torch.distributions.transforms import ExpTransform


class ExpRelaxedCategorical(Distribution):
    r"""
    Creates a ExpRelaxedCategorical parameterized by
    :attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
    Returns the log of a point in the simplex. Based on the interface to
    :class:`OneHotCategorical`.

            

Reported by Pylint.

Method 'enumerate_support' is abstract in class 'Distribution' but is not overridden
Error

Line: 10 Column: 1

              from torch.distributions.transforms import ExpTransform


class ExpRelaxedCategorical(Distribution):
    r"""
    Creates a ExpRelaxedCategorical parameterized by
    :attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
    Returns the log of a point in the simplex. Based on the interface to
    :class:`OneHotCategorical`.

            

Reported by Pylint.

Method 'cdf' is abstract in class 'Distribution' but is not overridden
Error

Line: 10 Column: 1

              from torch.distributions.transforms import ExpTransform


class ExpRelaxedCategorical(Distribution):
    r"""
    Creates a ExpRelaxedCategorical parameterized by
    :attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
    Returns the log of a point in the simplex. Based on the interface to
    :class:`OneHotCategorical`.

            

Reported by Pylint.

Method 'entropy' is abstract in class 'Distribution' but is not overridden
Error

Line: 10 Column: 1

              from torch.distributions.transforms import ExpTransform


class ExpRelaxedCategorical(Distribution):
    r"""
    Creates a ExpRelaxedCategorical parameterized by
    :attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
    Returns the log of a point in the simplex. Based on the interface to
    :class:`OneHotCategorical`.

            

Reported by Pylint.

Method 'mean' is abstract in class 'Distribution' but is not overridden
Error

Line: 10 Column: 1

              from torch.distributions.transforms import ExpTransform


class ExpRelaxedCategorical(Distribution):
    r"""
    Creates a ExpRelaxedCategorical parameterized by
    :attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
    Returns the log of a point in the simplex. Based on the interface to
    :class:`OneHotCategorical`.

            

Reported by Pylint.

Method 'variance' is abstract in class 'Distribution' but is not overridden
Error

Line: 10 Column: 1

              from torch.distributions.transforms import ExpTransform


class ExpRelaxedCategorical(Distribution):
    r"""
    Creates a ExpRelaxedCategorical parameterized by
    :attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
    Returns the log of a point in the simplex. Based on the interface to
    :class:`OneHotCategorical`.

            

Reported by Pylint.

benchmarks/operator_benchmark/pt/cat_test.py
32 issues
Unable to import 'torch'
Error

Line: 2 Column: 1

              import operator_benchmark as op_bench
import torch
import random
from typing import List


"""Microbenchmarks for Cat operator"""

cross_product_configs = {

            

Reported by Pylint.

Module 'operator_benchmark' has no 'config_list' member
Error

Line: 14 Column: 21

              }

# Configs for PT Cat operator
cat_configs_short = op_bench.config_list(
    attr_names=['sizes', 'N', 'dim'],
    attrs=[
        [(1,    1,      1), 2, 0],  # noqa: E241
        [(512,  512,    2), 2, 1],  # noqa: E241
        [(128, 1024,    2), 2, 1],  # noqa: E241

            

Reported by Pylint.

Module 'operator_benchmark' has no 'config_list' member
Error

Line: 26 Column: 30

              )

# Configs specific to static runtime feature - a fast path runtime for pared down models
cat_configs_static_runtime = op_bench.config_list(
    attr_names=['sizes', 'N', 'dim'],
    attrs=[
        [[(1, 160), (1, 14)], -1, 1],
        [[(1, 20, 40), (1, 4, 40), (1, 5, 40)], -1, 1],
        [[(1, 580), (1, 174)], -1, 1],

            

Reported by Pylint.

Module 'operator_benchmark' has no 'config_list' member
Error

Line: 40 Column: 20

                  tags=['static_runtime'],
)

cat_configs_long = op_bench.config_list(
    attr_names=['sizes', 'N', 'dim'],
    attrs=[
        [(2**10,    2**10,      2), 2, 0],  # noqa: E241
        [(2**10+1,  2**10-1,    2), 2, 1],  # noqa: E226,E241
        [(2**10,    2**10,      2), 2, 2],  # noqa: E241

            

Reported by Pylint.

Module 'operator_benchmark' has no 'config_list' member
Error

Line: 66 Column: 24

              )

# There is a different codepath on CUDA for >4 dimensions
cat_configs_multidim = op_bench.config_list(
    attr_names=['sizes', 'N', 'dim'],
    attrs=[
        [(2**6,     2**5,   2**2,   2**4,   2**5), 2, 2],  # noqa: E241
        [(2**4,     2**5,   2**2,   2**4,   2**5), 8, 2],  # noqa: E241
        [(2**3+1,   2**5-1, 2**2+1, 2**4-1, 2**5+1), 17, 4],  # noqa: E226,E241

            

Reported by Pylint.

Module 'operator_benchmark' has no 'config_list' member
Error

Line: 77 Column: 26

                  tags=['multidim'],
)

cat_configs_manyinputs = op_bench.config_list(
    attr_names=['sizes', 'N', 'dim'],
    attrs=[
        [[lambda: random.randint(1, 10000)], 100, 0],
        [[lambda: random.randint(1, 1000)], 1000, 0],
        [[lambda: random.randint(1, 500)], 2000, 0],

            

Reported by Pylint.

Module 'operator_benchmark' has no 'TorchBenchmarkBase' member
Error

Line: 89 Column: 20

                  tags=['manyinputs'],
)

class CatBenchmark(op_bench.TorchBenchmarkBase):
    def init(self, sizes, N, dim, device):
        random.seed(42)
        inputs = []
        gen_sizes = []
        if type(sizes) == list and N == -1:

            

Reported by Pylint.

Module 'operator_benchmark' has no 'generate_pt_test' member
Error

Line: 114 Column: 1

                      return torch.cat(inputs, dim=dim, out=result)


op_bench.generate_pt_test(cat_configs_short +
                          cat_configs_long +
                          cat_configs_multidim +
                          cat_configs_manyinputs +
                          cat_configs_static_runtime,
                          CatBenchmark)

            

Reported by Pylint.

String statement has no effect
Error

Line: 7 Column: 1

              from typing import List


"""Microbenchmarks for Cat operator"""

cross_product_configs = {
    'device': ['cpu', 'cuda'],
}


            

Reported by Pylint.

Unused variable 'i'
Error

Line: 97 Column: 17

                      if type(sizes) == list and N == -1:
            gen_sizes = sizes
        else:
            for i in range(N):
                gen_sizes.append([old_size() if callable(old_size) else old_size for old_size in sizes])

        for s in gen_sizes:
            inputs.append(torch.rand(s, device=device))
        result = torch.empty(0, device=device)

            

Reported by Pylint.

torch/distributed/pipeline/sync/pipeline.py
32 issues
Attempted relative import beyond top-level package
Error

Line: 17 Column: 1

              from torch import Tensor, nn
from torch.autograd.profiler import record_function

from .checkpoint import Checkpointing
from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 18 Column: 1

              from torch.autograd.profiler import record_function

from .checkpoint import Checkpointing
from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 19 Column: 1

              
from .checkpoint import Checkpointing
from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 20 Column: 1

              from .checkpoint import Checkpointing
from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 21 Column: 1

              from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers

__all__: List[str] = []

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 22 Column: 1

              from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers

__all__: List[str] = []


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 23 Column: 1

              from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers

__all__: List[str] = []



            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 24 Column: 1

              from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers

__all__: List[str] = []


Tensors = Sequence[Tensor]

            

Reported by Pylint.

Value 'Queue' is unsubscriptable
Error

Line: 37 Column: 15

              # Queue is generic only in stubs.
# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime
if TYPE_CHECKING:
    InQueue = Queue[Optional["Task"]]
    OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]]
else:
    InQueue = Queue
    OutQueue = Queue


            

Reported by Pylint.

Value 'Queue' is unsubscriptable
Error

Line: 38 Column: 16

              # https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime
if TYPE_CHECKING:
    InQueue = Queue[Optional["Task"]]
    OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]]
else:
    InQueue = Queue
    OutQueue = Queue



            

Reported by Pylint.

torch/distributed/algorithms/quantization.py
32 issues
Module 'torch' has no 'finfo' member
Error

Line: 9 Column: 18

              from enum import Enum


TORCH_HALF_MIN = torch.finfo(torch.float16).min
TORCH_HALF_MAX = torch.finfo(torch.float16).max

class DQuantType(Enum):
    FP16 = "fp16"


            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 9 Column: 30

              from enum import Enum


TORCH_HALF_MIN = torch.finfo(torch.float16).min
TORCH_HALF_MAX = torch.finfo(torch.float16).max

class DQuantType(Enum):
    FP16 = "fp16"


            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 10 Column: 30

              

TORCH_HALF_MIN = torch.finfo(torch.float16).min
TORCH_HALF_MAX = torch.finfo(torch.float16).max

class DQuantType(Enum):
    FP16 = "fp16"

    def __str__(self) -> str:

            

Reported by Pylint.

Module 'torch' has no 'finfo' member
Error

Line: 10 Column: 18

              

TORCH_HALF_MIN = torch.finfo(torch.float16).min
TORCH_HALF_MAX = torch.finfo(torch.float16).max

class DQuantType(Enum):
    FP16 = "fp16"

    def __str__(self) -> str:

            

Reported by Pylint.

__str__ does not return str
Error

Line: 15 Column: 5

              class DQuantType(Enum):
    FP16 = "fp16"

    def __str__(self) -> str:
        return self.value


def _fp32_to_fp16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
    return torch.clamp(tensor, TORCH_HALF_MIN, TORCH_HALF_MAX).half()

            

Reported by Pylint.

Module 'torch' has no 'clamp' member
Error

Line: 20 Column: 12

              

def _fp32_to_fp16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
    return torch.clamp(tensor, TORCH_HALF_MIN, TORCH_HALF_MAX).half()

def _quantize_tensor(tensor, qtype):
    if not isinstance(tensor, torch.Tensor):
        raise RuntimeError(
            f"_quantize_tensor expecting torch.Tensor as input but found {type(tensor)}"

            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 55 Column: 28

                          f"_dequantize_tensor expecting torch.Tensor as input but found {type(tensor)}"
        )
    if (qtype == DQuantType.FP16):
        if tensor.dtype != torch.float16:
            raise RuntimeError(
                f"tensor dtype is {tensor.dtype} while expected to be FP16."
            )
        elif tensor.dtype == torch.float16 and quant_loss is None:
            return tensor.float()

            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 59 Column: 30

                          raise RuntimeError(
                f"tensor dtype is {tensor.dtype} while expected to be FP16."
            )
        elif tensor.dtype == torch.float16 and quant_loss is None:
            return tensor.float()
        else:
            return tensor.float() / quant_loss
    else:
        raise RuntimeError(

            

Reported by Pylint.

Unused argument 'quant_loss'
Error

Line: 69 Column: 49

                      )


def _dequantize_tensor_list(tensor_list, qtype, quant_loss=None):
    if not isinstance(tensor_list, list) or not all(
        isinstance(p, torch.Tensor) for p in tensor_list
    ):
        raise RuntimeError(
            f"_dequantize_tensor_list expecting list of torch.Tensor as input but found {type(tensor_list)}"

            

Reported by Pylint.

Comparing against a callable, did you omit the parenthesis?
Error

Line: 110 Column: 13

                          raise RuntimeError(
                'The async_op=True mode is not supported yet.'
            )
        if (func == dist.all_gather):
            tensors = args[0]
            input_tensors = _quantize_tensor(args[1], qtype)
            out_tensors = _quantize_tensor_list(tensors, qtype)
            dist.all_gather(out_tensors, input_tensors, group=group, async_op=async_op)
            for i, t in enumerate(_dequantize_tensor_list(out_tensors, qtype, quant_loss=quant_loss)):

            

Reported by Pylint.

caffe2/quantization/server/elementwise_add_dnnlowp_op_test.py
32 issues
Unable to import 'hypothesis.strategies'
Error

Line: 6 Column: 1

              import collections

import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given


            

Reported by Pylint.

Unable to import 'hypothesis'
Error

Line: 10 Column: 1

              import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given


dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])


            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 27 Column: 71

                      **hu.gcs_cpu_only
    )
    def test_dnnlowp_elementwise_add_int(
        self, N, is_empty, in_quantized, out_quantized, in_place, gc, dc
    ):
        if is_empty:
            N = 0
        # FIXME: DNNLOWP Add doesn't support inplace operation and
        # dequantize_output=1 at the same time

            

Reported by Pylint.

FIXME: DNNLOWP Add doesn't support inplace operation and
Error

Line: 31 Column: 3

                  ):
        if is_empty:
            N = 0
        # FIXME: DNNLOWP Add doesn't support inplace operation and
        # dequantize_output=1 at the same time
        if in_place[0] or in_place[1]:
            in_quantized = True
            out_quantized = True


            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 106 Column: 58

                      check_quantized_results_close(outputs)

    @given(**hu.gcs_cpu_only)
    def test_dnnlowp_elementwise_add_broadcast(self, gc, dc):
        # Set broadcast and no axis, i.e. broadcasting last dimensions.
        min_ = -100
        max_ = min_ + 255
        A = np.round(np.random.rand(2, 3, 4, 5) * (max_ - min_) + min_)
        A = A.astype(np.float32)

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 148 Column: 63

                      check_quantized_results_close(outputs)

    @given(**hu.gcs_cpu_only)
    def test_dnnlowp_elementwise_add_broadcast_axis(self, gc, dc):
        for bdim, axis in [
            ((3, 4), 1),  # broadcasting intermediate dimensions
            ((2,), 0),  # broadcasting the first dimension
            ((1, 4, 1), 1),
        ]:

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              

import collections

import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close

            

Reported by Pylint.

Missing class docstring
Error

Line: 17 Column: 1

              workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])


class DNNLowPAddOpTest(hu.HypothesisTestCase):
    @given(
        N=st.integers(32, 256),
        is_empty=st.booleans(),
        in_quantized=st.booleans(),
        out_quantized=st.booleans(),

            

Reported by Pylint.

Argument name "gc" doesn't conform to snake_case naming style
Error

Line: 25 Column: 5

                      out_quantized=st.booleans(),
        in_place=st.sampled_from([(False, False), (True, False), (False, True)]),
        **hu.gcs_cpu_only
    )
    def test_dnnlowp_elementwise_add_int(
        self, N, is_empty, in_quantized, out_quantized, in_place, gc, dc
    ):
        if is_empty:
            N = 0

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 25 Column: 5

                      out_quantized=st.booleans(),
        in_place=st.sampled_from([(False, False), (True, False), (False, True)]),
        **hu.gcs_cpu_only
    )
    def test_dnnlowp_elementwise_add_int(
        self, N, is_empty, in_quantized, out_quantized, in_place, gc, dc
    ):
        if is_empty:
            N = 0

            

Reported by Pylint.

caffe2/python/ideep/leaky_relu_op_test.py
32 issues
Unable to import 'hypothesis.strategies'
Error

Line: 7 Column: 1

              

import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace, model_helper
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu

            

Reported by Pylint.

Unable to import 'hypothesis'
Error

Line: 8 Column: 1

              
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace, model_helper
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu


            

Reported by Pylint.

Module 'caffe2.python._import_c_extension' has no 'use_mkldnn' member
Error

Line: 15 Column: 22

              import caffe2.python.ideep_test_util as mu


@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class LeakyReluTest(hu.HypothesisTestCase):
    def _get_inputs(self, N, C, H, W, order):
        input_data = np.random.rand(N, C, H, W).astype(np.float32) - 0.5

        # default step size is 0.05

            

Reported by Pylint.

Unused argument 'order'
Error

Line: 17 Column: 39

              
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class LeakyReluTest(hu.HypothesisTestCase):
    def _get_inputs(self, N, C, H, W, order):
        input_data = np.random.rand(N, C, H, W).astype(np.float32) - 0.5

        # default step size is 0.05
        input_data[np.logical_and(
            input_data >= 0, input_data <= 0.051)] = 0.051

            

Reported by Pylint.

Unused argument 'order'
Error

Line: 28 Column: 45

              
        return input_data,

    def _get_op(self, device_option, alpha, order, inplace=False):
        outputs = ['output' if not inplace else "input"]
        op = core.CreateOperator(
            'LeakyRelu',
            ['input'],
            outputs,

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              




import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np

            

Reported by Pylint.

Missing class docstring
Error

Line: 16 Column: 1

              

@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class LeakyReluTest(hu.HypothesisTestCase):
    def _get_inputs(self, N, C, H, W, order):
        input_data = np.random.rand(N, C, H, W).astype(np.float32) - 0.5

        # default step size is 0.05
        input_data[np.logical_and(

            

Reported by Pylint.

Argument name "N" doesn't conform to snake_case naming style
Error

Line: 17 Column: 5

              
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class LeakyReluTest(hu.HypothesisTestCase):
    def _get_inputs(self, N, C, H, W, order):
        input_data = np.random.rand(N, C, H, W).astype(np.float32) - 0.5

        # default step size is 0.05
        input_data[np.logical_and(
            input_data >= 0, input_data <= 0.051)] = 0.051

            

Reported by Pylint.

Argument name "C" doesn't conform to snake_case naming style
Error

Line: 17 Column: 5

              
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class LeakyReluTest(hu.HypothesisTestCase):
    def _get_inputs(self, N, C, H, W, order):
        input_data = np.random.rand(N, C, H, W).astype(np.float32) - 0.5

        # default step size is 0.05
        input_data[np.logical_and(
            input_data >= 0, input_data <= 0.051)] = 0.051

            

Reported by Pylint.

Argument name "H" doesn't conform to snake_case naming style
Error

Line: 17 Column: 5

              
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class LeakyReluTest(hu.HypothesisTestCase):
    def _get_inputs(self, N, C, H, W, order):
        input_data = np.random.rand(N, C, H, W).astype(np.float32) - 0.5

        # default step size is 0.05
        input_data[np.logical_and(
            input_data >= 0, input_data <= 0.051)] = 0.051

            

Reported by Pylint.

test/onnx/model_defs/op_test.py
32 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch
import torch.nn as nn


class DummyNet(nn.Module):

    def __init__(self, num_classes=1000):
        super(DummyNet, self).__init__()
        self.features = nn.Sequential(

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 2 Column: 1

              import torch
import torch.nn as nn


class DummyNet(nn.Module):

    def __init__(self, num_classes=1000):
        super(DummyNet, self).__init__()
        self.features = nn.Sequential(

            

Reported by Pylint.

Unused argument 'num_classes'
Error

Line: 7 Column: 24

              
class DummyNet(nn.Module):

    def __init__(self, num_classes=1000):
        super(DummyNet, self).__init__()
        self.features = nn.Sequential(
            nn.LeakyReLU(0.02),
            nn.BatchNorm2d(3),
            nn.AvgPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False)

            

Reported by Pylint.

Useless super delegation in method '__init__'
Error

Line: 22 Column: 5

              
class ConcatNet(nn.Module):

    def __init__(self):
        super(ConcatNet, self).__init__()

    def forward(self, inputs):
        return torch.cat(inputs, 1)


            

Reported by Pylint.

Useless super delegation in method '__init__'
Error

Line: 31 Column: 5

              
class PermuteNet(nn.Module):

    def __init__(self):
        super(PermuteNet, self).__init__()

    def forward(self, input):
        return input.permute(2, 3, 0, 1)


            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 34 Column: 23

                  def __init__(self):
        super(PermuteNet, self).__init__()

    def forward(self, input):
        return input.permute(2, 3, 0, 1)


class PReluNet(nn.Module):


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
import torch.nn as nn


class DummyNet(nn.Module):

    def __init__(self, num_classes=1000):
        super(DummyNet, self).__init__()
        self.features = nn.Sequential(

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 5 Column: 1

              import torch.nn as nn


class DummyNet(nn.Module):

    def __init__(self, num_classes=1000):
        super(DummyNet, self).__init__()
        self.features = nn.Sequential(
            nn.LeakyReLU(0.02),

            

Reported by Pylint.

Missing class docstring
Error

Line: 5 Column: 1

              import torch.nn as nn


class DummyNet(nn.Module):

    def __init__(self, num_classes=1000):
        super(DummyNet, self).__init__()
        self.features = nn.Sequential(
            nn.LeakyReLU(0.02),

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 8 Column: 9

              class DummyNet(nn.Module):

    def __init__(self, num_classes=1000):
        super(DummyNet, self).__init__()
        self.features = nn.Sequential(
            nn.LeakyReLU(0.02),
            nn.BatchNorm2d(3),
            nn.AvgPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False)
        )

            

Reported by Pylint.

caffe2/python/operator_test/channel_shuffle_test.py
32 issues
Unable to import 'hypothesis.strategies'
Error

Line: 5 Column: 1

              
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core


class ChannelShuffleOpsTest(serial.SerializedTestCase):

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              

import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core



            

Reported by Pylint.

Missing class docstring
Error

Line: 10 Column: 1

              from caffe2.python import core


class ChannelShuffleOpsTest(serial.SerializedTestCase):
    def _channel_shuffle_nchw_ref(self, X, group):
        dims = X.shape
        N = dims[0]
        C = dims[1]
        G = group

            

Reported by Pylint.

Method could be a function
Error

Line: 11 Column: 5

              

class ChannelShuffleOpsTest(serial.SerializedTestCase):
    def _channel_shuffle_nchw_ref(self, X, group):
        dims = X.shape
        N = dims[0]
        C = dims[1]
        G = group
        K = int(C / G)

            

Reported by Pylint.

Argument name "X" doesn't conform to snake_case naming style
Error

Line: 11 Column: 5

              

class ChannelShuffleOpsTest(serial.SerializedTestCase):
    def _channel_shuffle_nchw_ref(self, X, group):
        dims = X.shape
        N = dims[0]
        C = dims[1]
        G = group
        K = int(C / G)

            

Reported by Pylint.

Variable name "N" doesn't conform to snake_case naming style
Error

Line: 13 Column: 9

              class ChannelShuffleOpsTest(serial.SerializedTestCase):
    def _channel_shuffle_nchw_ref(self, X, group):
        dims = X.shape
        N = dims[0]
        C = dims[1]
        G = group
        K = int(C / G)
        X = X.reshape(N, G, K, np.prod(dims[2:]))
        Y = np.transpose(X, axes=(0, 2, 1, 3))

            

Reported by Pylint.

Variable name "C" doesn't conform to snake_case naming style
Error

Line: 14 Column: 9

                  def _channel_shuffle_nchw_ref(self, X, group):
        dims = X.shape
        N = dims[0]
        C = dims[1]
        G = group
        K = int(C / G)
        X = X.reshape(N, G, K, np.prod(dims[2:]))
        Y = np.transpose(X, axes=(0, 2, 1, 3))
        return [Y.reshape(dims)]

            

Reported by Pylint.

Variable name "G" doesn't conform to snake_case naming style
Error

Line: 15 Column: 9

                      dims = X.shape
        N = dims[0]
        C = dims[1]
        G = group
        K = int(C / G)
        X = X.reshape(N, G, K, np.prod(dims[2:]))
        Y = np.transpose(X, axes=(0, 2, 1, 3))
        return [Y.reshape(dims)]


            

Reported by Pylint.

Variable name "K" doesn't conform to snake_case naming style
Error

Line: 16 Column: 9

                      N = dims[0]
        C = dims[1]
        G = group
        K = int(C / G)
        X = X.reshape(N, G, K, np.prod(dims[2:]))
        Y = np.transpose(X, axes=(0, 2, 1, 3))
        return [Y.reshape(dims)]

    def _channel_shuffle_nhwc_ref(self, X, group):

            

Reported by Pylint.

Variable name "Y" doesn't conform to snake_case naming style
Error

Line: 18 Column: 9

                      G = group
        K = int(C / G)
        X = X.reshape(N, G, K, np.prod(dims[2:]))
        Y = np.transpose(X, axes=(0, 2, 1, 3))
        return [Y.reshape(dims)]

    def _channel_shuffle_nhwc_ref(self, X, group):
        dims = X.shape
        N = dims[0]

            

Reported by Pylint.