The following issues were found

caffe2/python/control.py
44 issues
Using the global statement
Error

Line: 30 Column: 5

              

def _get_next_step_name(control_name, base_name):
    global _current_idx, _used_step_names
    concat_name = '%s/%s' % (base_name, control_name)
    next_name = concat_name
    while next_name in _used_step_names:
        next_name = '%s_%d' % (concat_name, _current_idx)
        _current_idx += 1

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 40 Column: 15

                  return next_name


def _MakeList(input):
    """ input is a tuple.
    Example:
    (a, b, c)   --> [a, b, c]
    (a)         --> [a]
    ([a, b, c]) --> [a, b, c]

            

Reported by Pylint.

Access to a protected member _attr_dict of a client class
Error

Line: 204 Column: 31

                      else:
            last_cond = merged_net.__getattr__(relation)([last_cond, curr_cond])
        # merge attributes
        for k, v in viewitems(condition_nets[i]._attr_dict):
            merged_net._attr_dict[k] += v

    merged_net.AddExternalOutput(last_cond)

    return merged_net

            

Reported by Pylint.

Access to a protected member _attr_dict of a client class
Error

Line: 205 Column: 13

                          last_cond = merged_net.__getattr__(relation)([last_cond, curr_cond])
        # merge attributes
        for k, v in viewitems(condition_nets[i]._attr_dict):
            merged_net._attr_dict[k] += v

    merged_net.AddExternalOutput(last_cond)

    return merged_net


            

Reported by Pylint.

Constant name "_current_idx" doesn't conform to UPPER_CASE naming style
Error

Line: 25 Column: 1

              
# Used to generate names of the steps created by the control functions.
# It is actually the internal index of these steps.
_current_idx = 1
_used_step_names = set()


def _get_next_step_name(control_name, base_name):
    global _current_idx, _used_step_names

            

Reported by Pylint.

Constant name "_current_idx" doesn't conform to UPPER_CASE naming style
Error

Line: 30 Column: 5

              

def _get_next_step_name(control_name, base_name):
    global _current_idx, _used_step_names
    concat_name = '%s/%s' % (base_name, control_name)
    next_name = concat_name
    while next_name in _used_step_names:
        next_name = '%s_%d' % (concat_name, _current_idx)
        _current_idx += 1

            

Reported by Pylint.

Constant name "_used_step_names" doesn't conform to UPPER_CASE naming style
Error

Line: 30 Column: 5

              

def _get_next_step_name(control_name, base_name):
    global _current_idx, _used_step_names
    concat_name = '%s/%s' % (base_name, control_name)
    next_name = concat_name
    while next_name in _used_step_names:
        next_name = '%s_%d' % (concat_name, _current_idx)
        _current_idx += 1

            

Reported by Pylint.

Function name "_MakeList" doesn't conform to snake_case naming style
Error

Line: 40 Column: 1

                  return next_name


def _MakeList(input):
    """ input is a tuple.
    Example:
    (a, b, c)   --> [a, b, c]
    (a)         --> [a]
    ([a, b, c]) --> [a, b, c]

            

Reported by Pylint.

Unnecessary "elif" after "raise"
Error

Line: 47 Column: 5

                  (a)         --> [a]
    ([a, b, c]) --> [a, b, c]
    """
    if len(input) == 0:
        raise ValueError(
            'input cannot be empty.')
    elif len(input) == 1:
        output = input[0]
        if not isinstance(output, list):

            

Reported by Pylint.

Function name "_IsNets" doesn't conform to snake_case naming style
Error

Line: 59 Column: 1

                  return output


def _IsNets(nets_or_steps):
    if isinstance(nets_or_steps, list):
        return all(isinstance(n, core.Net) for n in nets_or_steps)
    else:
        return isinstance(nets_or_steps, core.Net)


            

Reported by Pylint.

benchmarks/fastrnns/fuser.py
44 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch

def set_fuser(fuser_name, executor_name):
    assert fuser_name in ['te', 'old', 'none', 'default']
    if fuser_name == 'te':
        torch._C._jit_set_profiling_executor(True)
        torch._C._jit_set_profiling_mode(True)
        torch._C._jit_override_can_fuse_on_cpu(False)
        torch._C._jit_override_can_fuse_on_gpu(True)

            

Reported by Pylint.

Access to a protected member _jit_set_profiling_executor of a client class
Error

Line: 6 Column: 9

              def set_fuser(fuser_name, executor_name):
    assert fuser_name in ['te', 'old', 'none', 'default']
    if fuser_name == 'te':
        torch._C._jit_set_profiling_executor(True)
        torch._C._jit_set_profiling_mode(True)
        torch._C._jit_override_can_fuse_on_cpu(False)
        torch._C._jit_override_can_fuse_on_gpu(True)
        torch._C._jit_set_texpr_fuser_enabled(True)
    elif fuser_name == 'old':

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 6 Column: 9

              def set_fuser(fuser_name, executor_name):
    assert fuser_name in ['te', 'old', 'none', 'default']
    if fuser_name == 'te':
        torch._C._jit_set_profiling_executor(True)
        torch._C._jit_set_profiling_mode(True)
        torch._C._jit_override_can_fuse_on_cpu(False)
        torch._C._jit_override_can_fuse_on_gpu(True)
        torch._C._jit_set_texpr_fuser_enabled(True)
    elif fuser_name == 'old':

            

Reported by Pylint.

Access to a protected member _jit_set_profiling_mode of a client class
Error

Line: 7 Column: 9

                  assert fuser_name in ['te', 'old', 'none', 'default']
    if fuser_name == 'te':
        torch._C._jit_set_profiling_executor(True)
        torch._C._jit_set_profiling_mode(True)
        torch._C._jit_override_can_fuse_on_cpu(False)
        torch._C._jit_override_can_fuse_on_gpu(True)
        torch._C._jit_set_texpr_fuser_enabled(True)
    elif fuser_name == 'old':
        torch._C._jit_set_profiling_executor(False)

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 7 Column: 9

                  assert fuser_name in ['te', 'old', 'none', 'default']
    if fuser_name == 'te':
        torch._C._jit_set_profiling_executor(True)
        torch._C._jit_set_profiling_mode(True)
        torch._C._jit_override_can_fuse_on_cpu(False)
        torch._C._jit_override_can_fuse_on_gpu(True)
        torch._C._jit_set_texpr_fuser_enabled(True)
    elif fuser_name == 'old':
        torch._C._jit_set_profiling_executor(False)

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 8 Column: 9

                  if fuser_name == 'te':
        torch._C._jit_set_profiling_executor(True)
        torch._C._jit_set_profiling_mode(True)
        torch._C._jit_override_can_fuse_on_cpu(False)
        torch._C._jit_override_can_fuse_on_gpu(True)
        torch._C._jit_set_texpr_fuser_enabled(True)
    elif fuser_name == 'old':
        torch._C._jit_set_profiling_executor(False)
        torch._C._jit_set_profiling_mode(False)

            

Reported by Pylint.

Access to a protected member _jit_override_can_fuse_on_cpu of a client class
Error

Line: 8 Column: 9

                  if fuser_name == 'te':
        torch._C._jit_set_profiling_executor(True)
        torch._C._jit_set_profiling_mode(True)
        torch._C._jit_override_can_fuse_on_cpu(False)
        torch._C._jit_override_can_fuse_on_gpu(True)
        torch._C._jit_set_texpr_fuser_enabled(True)
    elif fuser_name == 'old':
        torch._C._jit_set_profiling_executor(False)
        torch._C._jit_set_profiling_mode(False)

            

Reported by Pylint.

Access to a protected member _jit_override_can_fuse_on_gpu of a client class
Error

Line: 9 Column: 9

                      torch._C._jit_set_profiling_executor(True)
        torch._C._jit_set_profiling_mode(True)
        torch._C._jit_override_can_fuse_on_cpu(False)
        torch._C._jit_override_can_fuse_on_gpu(True)
        torch._C._jit_set_texpr_fuser_enabled(True)
    elif fuser_name == 'old':
        torch._C._jit_set_profiling_executor(False)
        torch._C._jit_set_profiling_mode(False)
        torch._C._jit_override_can_fuse_on_gpu(True)

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 9 Column: 9

                      torch._C._jit_set_profiling_executor(True)
        torch._C._jit_set_profiling_mode(True)
        torch._C._jit_override_can_fuse_on_cpu(False)
        torch._C._jit_override_can_fuse_on_gpu(True)
        torch._C._jit_set_texpr_fuser_enabled(True)
    elif fuser_name == 'old':
        torch._C._jit_set_profiling_executor(False)
        torch._C._jit_set_profiling_mode(False)
        torch._C._jit_override_can_fuse_on_gpu(True)

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 10 Column: 9

                      torch._C._jit_set_profiling_mode(True)
        torch._C._jit_override_can_fuse_on_cpu(False)
        torch._C._jit_override_can_fuse_on_gpu(True)
        torch._C._jit_set_texpr_fuser_enabled(True)
    elif fuser_name == 'old':
        torch._C._jit_set_profiling_executor(False)
        torch._C._jit_set_profiling_mode(False)
        torch._C._jit_override_can_fuse_on_gpu(True)
        torch._C._jit_set_texpr_fuser_enabled(False)

            

Reported by Pylint.

caffe2/python/operator_test/partition_ops_test.py
44 issues
Cell variable parts defined in loop
Error

Line: 56 Column: 34

              
            def sharding(x):
                # numpy has proper modulo op that yields non-negative results
                shards = (x[0] % parts).reshape([-1])
                out = []
                for i in range(parts):
                    for ind, v in enumerate(x):
                        suffix_shape = v.shape[len(x[0].shape):]
                        accum = []

            

Reported by Pylint.

Cell variable parts defined in loop
Error

Line: 58 Column: 32

                              # numpy has proper modulo op that yields non-negative results
                shards = (x[0] % parts).reshape([-1])
                out = []
                for i in range(parts):
                    for ind, v in enumerate(x):
                        suffix_shape = v.shape[len(x[0].shape):]
                        accum = []
                        data = v.reshape((-1, ) + suffix_shape)


            

Reported by Pylint.

Cell variable pack defined in loop
Error

Line: 64 Column: 28

                                      accum = []
                        data = v.reshape((-1, ) + suffix_shape)

                        if pack and ind == 0:
                            data = data // parts

                        for j, s in enumerate(shards):
                            if s == i:
                                accum.append(data[j])

            

Reported by Pylint.

Cell variable parts defined in loop
Error

Line: 65 Column: 44

                                      data = v.reshape((-1, ) + suffix_shape)

                        if pack and ind == 0:
                            data = data // parts

                        for j, s in enumerate(shards):
                            if s == i:
                                accum.append(data[j])


            

Reported by Pylint.

Cell variable suffix_shape defined in loop
Error

Line: 73 Column: 63

              
                        def join(a):
                            if not a:
                                return np.empty(shape=(0, ) + suffix_shape)
                            return np.stack(a)

                        out.append(join(accum))
                return out


            

Reported by Pylint.

Cell variable parts defined in loop
Error

Line: 140 Column: 34

              
            def sharding(x):
                # numpy has proper modulo op that yields non-negative results
                shards = (x[0] % parts).reshape([-1])
                out = []
                for i in range(parts):
                    idx = 0
                    sharded_lengths = np.zeros(elements)
                    for ind, length in enumerate(lengths):

            

Reported by Pylint.

Cell variable parts defined in loop
Error

Line: 142 Column: 32

                              # numpy has proper modulo op that yields non-negative results
                shards = (x[0] % parts).reshape([-1])
                out = []
                for i in range(parts):
                    idx = 0
                    sharded_lengths = np.zeros(elements)
                    for ind, length in enumerate(lengths):
                        for _ in range(length):
                            if shards[idx] == i:

            

Reported by Pylint.

Cell variable elements defined in loop
Error

Line: 144 Column: 48

                              out = []
                for i in range(parts):
                    idx = 0
                    sharded_lengths = np.zeros(elements)
                    for ind, length in enumerate(lengths):
                        for _ in range(length):
                            if shards[idx] == i:
                                sharded_lengths[ind] += 1
                            idx += 1

            

Reported by Pylint.

Cell variable lengths defined in loop
Error

Line: 145 Column: 50

                              for i in range(parts):
                    idx = 0
                    sharded_lengths = np.zeros(elements)
                    for ind, length in enumerate(lengths):
                        for _ in range(length):
                            if shards[idx] == i:
                                sharded_lengths[ind] += 1
                            idx += 1
                    out.append(sharded_lengths)

            

Reported by Pylint.

Cell variable pack defined in loop
Error

Line: 157 Column: 28

                                      accum = []
                        data = v.reshape((-1, ) + suffix_shape)

                        if pack and ind == 0:
                            data = data // parts

                        for j, s in enumerate(shards):
                            if s == i:
                                accum.append(data[j])

            

Reported by Pylint.

torch/quasirandom.py
44 issues
Module 'torch' has no 'device' member
Error

Line: 56 Column: 15

                      self.scramble = scramble
        self.dimension = dimension

        cpu = torch.device("cpu")

        self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long)
        torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)

        if not self.scramble:

            

Reported by Pylint.

Module 'torch' has no 'long' member
Error

Line: 58 Column: 81

              
        cpu = torch.device("cpu")

        self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long)
        torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)

        if not self.scramble:
            self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
        else:

            

Reported by Pylint.

Module 'torch' has no 'zeros' member
Error

Line: 58 Column: 27

              
        cpu = torch.device("cpu")

        self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long)
        torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)

        if not self.scramble:
            self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
        else:

            

Reported by Pylint.

Module 'torch' has no '_sobol_engine_initialize_state_' member
Error

Line: 59 Column: 9

                      cpu = torch.device("cpu")

        self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long)
        torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)

        if not self.scramble:
            self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
        else:
            self._scramble()

            

Reported by Pylint.

Module 'torch' has no 'long' member
Error

Line: 62 Column: 72

                      torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)

        if not self.scramble:
            self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
        else:
            self._scramble()

        self.quasi = self.shift.clone(memory_format=torch.contiguous_format)
        self._first_point = (self.quasi / 2 ** self.MAXBIT).reshape(1, -1)

            

Reported by Pylint.

Module 'torch' has no 'zeros' member
Error

Line: 62 Column: 26

                      torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)

        if not self.scramble:
            self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
        else:
            self._scramble()

        self.quasi = self.shift.clone(memory_format=torch.contiguous_format)
        self._first_point = (self.quasi / 2 ** self.MAXBIT).reshape(1, -1)

            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 66 Column: 53

                      else:
            self._scramble()

        self.quasi = self.shift.clone(memory_format=torch.contiguous_format)
        self._first_point = (self.quasi / 2 ** self.MAXBIT).reshape(1, -1)
        self.num_generated = 0

    def draw(self, n: int = 1, out: Optional[torch.Tensor] = None,
             dtype: torch.dtype = torch.float32) -> torch.Tensor:

            

Reported by Pylint.

Module 'torch' has no 'float32' member
Error

Line: 71 Column: 35

                      self.num_generated = 0

    def draw(self, n: int = 1, out: Optional[torch.Tensor] = None,
             dtype: torch.dtype = torch.float32) -> torch.Tensor:
        r"""
        Function to draw a sequence of :attr:`n` points from a Sobol sequence.
        Note that the samples are dependent on the previous samples. The size
        of the result is :math:`(n, dimension)`.


            

Reported by Pylint.

Module 'torch' has no 'dtype' member
Error

Line: 71 Column: 21

                      self.num_generated = 0

    def draw(self, n: int = 1, out: Optional[torch.Tensor] = None,
             dtype: torch.dtype = torch.float32) -> torch.Tensor:
        r"""
        Function to draw a sequence of :attr:`n` points from a Sobol sequence.
        Note that the samples are dependent on the previous samples. The size
        of the result is :math:`(n, dimension)`.


            

Reported by Pylint.

Module 'torch' has no '_sobol_engine_draw' member
Error

Line: 89 Column: 38

                          if n == 1:
                result = self._first_point.to(dtype)
            else:
                result, self.quasi = torch._sobol_engine_draw(
                    self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated, dtype=dtype,
                )
                result = torch.cat((self._first_point, result), dim=-2)
        else:
            result, self.quasi = torch._sobol_engine_draw(

            

Reported by Pylint.

torch/nn/modules/__init__.py
43 issues
Unable to import '__init__.module'
Error

Line: 1 Column: 1

              from .module import Module
from .linear import Identity, Linear, Bilinear, LazyLinear
from .conv import Conv1d, Conv2d, Conv3d, \
    ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
    LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
    Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
    Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
    Hardsigmoid, Hardswish, SiLU, Mish

            

Reported by Pylint.

Unable to import '__init__.linear'
Error

Line: 2 Column: 1

              from .module import Module
from .linear import Identity, Linear, Bilinear, LazyLinear
from .conv import Conv1d, Conv2d, Conv3d, \
    ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
    LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
    Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
    Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
    Hardsigmoid, Hardswish, SiLU, Mish

            

Reported by Pylint.

Unable to import '__init__.conv'
Error

Line: 3 Column: 1

              from .module import Module
from .linear import Identity, Linear, Bilinear, LazyLinear
from .conv import Conv1d, Conv2d, Conv3d, \
    ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
    LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
    Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
    Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
    Hardsigmoid, Hardswish, SiLU, Mish

            

Reported by Pylint.

Unable to import '__init__.activation'
Error

Line: 6 Column: 1

              from .conv import Conv1d, Conv2d, Conv3d, \
    ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
    LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
    Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
    Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
    Hardsigmoid, Hardswish, SiLU, Mish
from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
    CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \

            

Reported by Pylint.

Unable to import '__init__.loss'
Error

Line: 10 Column: 1

                  Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
    Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
    Hardsigmoid, Hardswish, SiLU, Mish
from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
    CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
    MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
    SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \

            

Reported by Pylint.

Unable to import '__init__.container'
Error

Line: 14 Column: 1

                  CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
    MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
    SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
    MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
    AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
    LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d

            

Reported by Pylint.

Unable to import '__init__.pooling'
Error

Line: 15 Column: 1

                  MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
    SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
    MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
    AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
    LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \

            

Reported by Pylint.

Unable to import '__init__.batchnorm'
Error

Line: 18 Column: 1

              from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
    MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
    AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
    LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
    LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout

            

Reported by Pylint.

Unable to import '__init__.instancenorm'
Error

Line: 20 Column: 1

                  AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
    LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
    LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
from .padding import ReflectionPad1d, ReflectionPad2d, ReflectionPad3d, ReplicationPad1d, ReplicationPad2d, \
    ReplicationPad3d, ZeroPad2d, ConstantPad1d, ConstantPad2d, ConstantPad3d

            

Reported by Pylint.

Unable to import '__init__.normalization'
Error

Line: 22 Column: 1

                  LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
    LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
from .padding import ReflectionPad1d, ReflectionPad2d, ReflectionPad3d, ReplicationPad1d, ReplicationPad2d, \
    ReplicationPad3d, ZeroPad2d, ConstantPad1d, ConstantPad2d, ConstantPad3d
from .sparse import Embedding, EmbeddingBag
from .rnn import RNNBase, RNN, LSTM, GRU, \

            

Reported by Pylint.

test/test_autocast.py
43 issues
Unable to import 'torch'
Error

Line: 2 Column: 1

              import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists

class TestAutocastCPU(TestCase):
    def setUp(self):
        super(TestAutocastCPU, self).setUp()
        self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 3 Column: 1

              import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists

class TestAutocastCPU(TestCase):
    def setUp(self):
        super(TestAutocastCPU, self).setUp()
        self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))

            

Reported by Pylint.

Unable to import 'torch.testing._internal.autocast_test_lists'
Error

Line: 4 Column: 1

              import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists

class TestAutocastCPU(TestCase):
    def setUp(self):
        super(TestAutocastCPU, self).setUp()
        self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))

            

Reported by Pylint.

Access to a protected member _nn of a client class
Error

Line: 106 Column: 76

              
    def test_autocast_nn_bf16(self):
        for op, args in self.autocast_lists.nn_bf16:
            self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)

    def test_autocast_torch_fp32(self):
        for op_with_args in self.autocast_lists.torch_fp32:
            op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
            self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 106 Column: 76

              
    def test_autocast_nn_bf16(self):
        for op, args in self.autocast_lists.nn_bf16:
            self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)

    def test_autocast_torch_fp32(self):
        for op_with_args in self.autocast_lists.torch_fp32:
            op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
            self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)

            

Reported by Pylint.

Access to a protected member _nn of a client class
Error

Line: 116 Column: 75

                  def test_autocast_nn_fp32(self):
        for op_with_args in self.autocast_lists.nn_fp32:
            op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
            self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn, add_kwargs=maybe_kwargs)

    def test_autocast_torch_need_autocast_promote(self):
        for op, args in self.autocast_lists.torch_need_autocast_promote:
            self._run_autocast_outofplace(op, args, torch.float32)


            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 116 Column: 75

                  def test_autocast_nn_fp32(self):
        for op_with_args in self.autocast_lists.nn_fp32:
            op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
            self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn, add_kwargs=maybe_kwargs)

    def test_autocast_torch_need_autocast_promote(self):
        for op, args in self.autocast_lists.torch_need_autocast_promote:
            self._run_autocast_outofplace(op, args, torch.float32)


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists

class TestAutocastCPU(TestCase):
    def setUp(self):
        super(TestAutocastCPU, self).setUp()
        self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))

            

Reported by Pylint.

Missing class docstring
Error

Line: 6 Column: 1

              from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists

class TestAutocastCPU(TestCase):
    def setUp(self):
        super(TestAutocastCPU, self).setUp()
        self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))

    def tearDown(self):

            

Reported by Pylint.

Method name "setUp" doesn't conform to snake_case naming style
Error

Line: 7 Column: 5

              from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists

class TestAutocastCPU(TestCase):
    def setUp(self):
        super(TestAutocastCPU, self).setUp()
        self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))

    def tearDown(self):
        del self.autocast_lists

            

Reported by Pylint.

caffe2/python/ideep/elementwise_sum_op_test.py
43 issues
Unable to import 'hypothesis.strategies'
Error

Line: 7 Column: 1

              

import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu

            

Reported by Pylint.

Unable to import 'hypothesis'
Error

Line: 8 Column: 1

              
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu

            

Reported by Pylint.

Module 'caffe2.python._import_c_extension' has no 'use_mkldnn' member
Error

Line: 16 Column: 22

              import caffe2.python.ideep_test_util as mu


@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ElementwiseSumTest(hu.HypothesisTestCase):
    @given(size=st.integers(7, 9),
           input_channels=st.integers(1, 3),
           batch_size=st.integers(1, 3),
           inputs=st.integers(2, 7),

            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 30 Column: 34

                                               batch_size,
                                 inputs,
                                 inplace,
                                 gc,
                                 dc):
        op = core.CreateOperator(
            "Sum",
            ["X_{}".format(i) for i in range(inputs)],
            ["X_0" if inplace else "Y"],

            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 54 Column: 39

                                                    batch_size,
                                      inputs,
                                      inplace,
                                      gc,
                                      dc):
        op = core.CreateOperator(
            "Sum",
            ["X_{}".format(i) for i in range(inputs)],
            ["X_0" if inplace else "Y"],

            

Reported by Pylint.

Redundant use of assertTrue with constant value False
Error

Line: 79 Column: 13

                          print(Y.flatten())
            print(sum_val.flatten())
            print(np.max(np.abs(Y - sum_val)))
            self.assertTrue(False)


    @given(size=st.integers(7, 9),
           input_channels=st.integers(1, 3),
           batch_size=st.integers(1, 3),

            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 94 Column: 34

                                               batch_size,
                                 inputs,
                                 inplace,
                                 gc,
                                 dc):
        sum_fp32 = core.CreateOperator(
            "Sum",
            ["X_{}".format(i) for i in range(inputs)],
            ["X_0" if inplace else "Y"],

            

Reported by Pylint.

Redefining built-in 'sum'
Error

Line: 150 Column: 9

                          )
            net.op.extend([sw2nhwc, quantize])

        sum = core.CreateOperator(
            "Int8Sum",
            ["Xi_{}_quantized".format(i) for i in range(inputs)],
            ["Xi_0_quantized" if inplace else "Y_quantized"],
            engine="DNNLOWP",
            device_option=dc[1],

            

Reported by Pylint.

Redundant use of assertTrue with constant value False
Error

Line: 185 Column: 13

                          print(Y_out.flatten())
            print(np.max(np.abs(Y_out - Y)))
            print("MSE", MSE)
            self.assertTrue(False)

        workspace.SwitchWorkspace(old_ws_name)

if __name__ == "__main__":
    unittest.main()

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              




import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np

            

Reported by Pylint.

torch/autocast_mode.py
43 issues
Module 'torch' has no 'get_autocast_gpu_dtype' member
Error

Line: 133 Column: 31

                  def __init__(self, device_type, enabled=True, **kwargs):
        self.device = device_type
        if self.device == 'cuda':
            self.fast_dtype = torch.get_autocast_gpu_dtype()
        elif self.device == 'cpu':
            self.fast_dtype = torch.get_autocast_cpu_dtype()
        else:
            raise RuntimeError('User specified autocast device_type must be \'cuda\' or \'cpu\'')
        if torch.cuda.amp.common.amp_definitely_not_available() and self.device == 'cuda':

            

Reported by Pylint.

Module 'torch' has no 'get_autocast_cpu_dtype' member
Error

Line: 135 Column: 31

                      if self.device == 'cuda':
            self.fast_dtype = torch.get_autocast_gpu_dtype()
        elif self.device == 'cpu':
            self.fast_dtype = torch.get_autocast_cpu_dtype()
        else:
            raise RuntimeError('User specified autocast device_type must be \'cuda\' or \'cpu\'')
        if torch.cuda.amp.common.amp_definitely_not_available() and self.device == 'cuda':
            warnings.warn('User provided device_type of \'cuda\', but CUDA is not available. Disabling')
            enabled = False

            

Reported by Pylint.

Module 'torch' has no 'bfloat16' member
Error

Line: 148 Column: 32

                              raise RuntimeError('Unrecognized optional argument supplied to autocast context manager: ' + str(key))

        if self.device == 'cpu':
            supported_dtype = [torch.bfloat16]
            if self.fast_dtype not in supported_dtype:
                error_message = 'In CPU autocast, but the target dtype is not supported. Disabling autocast.\n'
                error_message += 'CPU Autocast only supports dtype of torch.bfloat16 currently.'
                warnings.warn(error_message)
                enabled = False

            

Reported by Pylint.

Module 'torch' has no 'bfloat16' member
Error

Line: 155 Column: 35

                              warnings.warn(error_message)
                enabled = False
        if self.device == 'cuda':
            if self.fast_dtype == torch.bfloat16 and torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
                raise RuntimeError('Current CUDA Device does not support bfloat16. Switching fast_dtype to float16.')
        self._enabled = enabled

    def __enter__(self):
        if self.device == 'cpu':

            

Reported by Pylint.

Module 'torch' has no 'is_autocast_cpu_enabled' member
Error

Line: 161 Column: 25

              
    def __enter__(self):
        if self.device == 'cpu':
            self.prev = torch.is_autocast_cpu_enabled()
            self.prev_fastdtype = torch.get_autocast_cpu_dtype()
            torch.set_autocast_cpu_enabled(self._enabled)
            torch.set_autocast_cpu_dtype(self.fast_dtype)
            torch.autocast_increment_nesting()
        else:

            

Reported by Pylint.

Module 'torch' has no 'get_autocast_cpu_dtype' member
Error

Line: 162 Column: 35

                  def __enter__(self):
        if self.device == 'cpu':
            self.prev = torch.is_autocast_cpu_enabled()
            self.prev_fastdtype = torch.get_autocast_cpu_dtype()
            torch.set_autocast_cpu_enabled(self._enabled)
            torch.set_autocast_cpu_dtype(self.fast_dtype)
            torch.autocast_increment_nesting()
        else:
            self.prev = torch.is_autocast_enabled()

            

Reported by Pylint.

Module 'torch' has no 'set_autocast_cpu_enabled' member
Error

Line: 163 Column: 13

                      if self.device == 'cpu':
            self.prev = torch.is_autocast_cpu_enabled()
            self.prev_fastdtype = torch.get_autocast_cpu_dtype()
            torch.set_autocast_cpu_enabled(self._enabled)
            torch.set_autocast_cpu_dtype(self.fast_dtype)
            torch.autocast_increment_nesting()
        else:
            self.prev = torch.is_autocast_enabled()
            self.prev_fastdtype = torch.get_autocast_gpu_dtype()

            

Reported by Pylint.

Module 'torch' has no 'set_autocast_cpu_dtype' member
Error

Line: 164 Column: 13

                          self.prev = torch.is_autocast_cpu_enabled()
            self.prev_fastdtype = torch.get_autocast_cpu_dtype()
            torch.set_autocast_cpu_enabled(self._enabled)
            torch.set_autocast_cpu_dtype(self.fast_dtype)
            torch.autocast_increment_nesting()
        else:
            self.prev = torch.is_autocast_enabled()
            self.prev_fastdtype = torch.get_autocast_gpu_dtype()
            torch.set_autocast_gpu_dtype(self.fast_dtype)

            

Reported by Pylint.

Module 'torch' has no 'autocast_increment_nesting' member
Error

Line: 165 Column: 13

                          self.prev_fastdtype = torch.get_autocast_cpu_dtype()
            torch.set_autocast_cpu_enabled(self._enabled)
            torch.set_autocast_cpu_dtype(self.fast_dtype)
            torch.autocast_increment_nesting()
        else:
            self.prev = torch.is_autocast_enabled()
            self.prev_fastdtype = torch.get_autocast_gpu_dtype()
            torch.set_autocast_gpu_dtype(self.fast_dtype)
            torch.set_autocast_enabled(self._enabled)

            

Reported by Pylint.

Module 'torch' has no 'is_autocast_enabled' member
Error

Line: 167 Column: 25

                          torch.set_autocast_cpu_dtype(self.fast_dtype)
            torch.autocast_increment_nesting()
        else:
            self.prev = torch.is_autocast_enabled()
            self.prev_fastdtype = torch.get_autocast_gpu_dtype()
            torch.set_autocast_gpu_dtype(self.fast_dtype)
            torch.set_autocast_enabled(self._enabled)
            torch.autocast_increment_nesting()


            

Reported by Pylint.

torch/distributions/__init__.py
43 issues
Unable to import '__init__.bernoulli'
Error

Line: 74 Column: 1

                  loss.backward()
"""

from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2

            

Reported by Pylint.

Unable to import '__init__.beta'
Error

Line: 75 Column: 1

              """

from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to

            

Reported by Pylint.

Unable to import '__init__.binomial'
Error

Line: 76 Column: 1

              
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli

            

Reported by Pylint.

Unable to import '__init__.categorical'
Error

Line: 77 Column: 1

              from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet

            

Reported by Pylint.

Unable to import '__init__.cauchy'
Error

Line: 78 Column: 1

              from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution

            

Reported by Pylint.

Unable to import '__init__.chi2'
Error

Line: 79 Column: 1

              from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily

            

Reported by Pylint.

Unable to import '__init__.constraint_registry'
Error

Line: 80 Column: 1

              from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential

            

Reported by Pylint.

Unable to import '__init__.continuous_bernoulli'
Error

Line: 81 Column: 1

              from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
from .fishersnedecor import FisherSnedecor

            

Reported by Pylint.

Unable to import '__init__.dirichlet'
Error

Line: 82 Column: 1

              from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
from .fishersnedecor import FisherSnedecor
from .gamma import Gamma

            

Reported by Pylint.

Unable to import '__init__.distribution'
Error

Line: 83 Column: 1

              from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
from .fishersnedecor import FisherSnedecor
from .gamma import Gamma
from .geometric import Geometric

            

Reported by Pylint.

torch/quantization/_learnable_fake_quantize.py
43 issues
Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 40 Column: 36

                      observer_kwargs["quant_max"] = quant_max
        self.use_grad_scaling = use_grad_scaling
        if channel_len == -1:
            self.scale = Parameter(torch.tensor([scale]))
            self.zero_point = Parameter(torch.tensor([zero_point]))
        else:
            assert isinstance(channel_len, int) and channel_len > 0, "Channel size must be a positive integer."
            self.scale = Parameter(torch.tensor([scale] * channel_len))
            self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 41 Column: 41

                      self.use_grad_scaling = use_grad_scaling
        if channel_len == -1:
            self.scale = Parameter(torch.tensor([scale]))
            self.zero_point = Parameter(torch.tensor([zero_point]))
        else:
            assert isinstance(channel_len, int) and channel_len > 0, "Channel size must be a positive integer."
            self.scale = Parameter(torch.tensor([scale] * channel_len))
            self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))


            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 44 Column: 36

                          self.zero_point = Parameter(torch.tensor([zero_point]))
        else:
            assert isinstance(channel_len, int) and channel_len > 0, "Channel size must be a positive integer."
            self.scale = Parameter(torch.tensor([scale] * channel_len))
            self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))

        self.activation_post_process = observer(**observer_kwargs)
        assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, \
            'quant_min out of bound'

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 45 Column: 41

                      else:
            assert isinstance(channel_len, int) and channel_len > 0, "Channel size must be a positive integer."
            self.scale = Parameter(torch.tensor([scale] * channel_len))
            self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))

        self.activation_post_process = observer(**observer_kwargs)
        assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, \
            'quant_min out of bound'
        assert quant_max <= torch.iinfo(self.activation_post_process.dtype).max, \

            

Reported by Pylint.

Module 'torch' has no 'iinfo' member
Error

Line: 48 Column: 16

                          self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))

        self.activation_post_process = observer(**observer_kwargs)
        assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, \
            'quant_min out of bound'
        assert quant_max <= torch.iinfo(self.activation_post_process.dtype).max, \
            'quant_max out of bound'
        self.dtype = self.activation_post_process.dtype
        self.qscheme = self.activation_post_process.qscheme

            

Reported by Pylint.

Module 'torch' has no 'iinfo' member
Error

Line: 50 Column: 29

                      self.activation_post_process = observer(**observer_kwargs)
        assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, \
            'quant_min out of bound'
        assert quant_max <= torch.iinfo(self.activation_post_process.dtype).max, \
            'quant_max out of bound'
        self.dtype = self.activation_post_process.dtype
        self.qscheme = self.activation_post_process.qscheme
        self.ch_axis = self.activation_post_process.ch_axis \
            if hasattr(self.activation_post_process, 'ch_axis') else -1

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 56 Column: 52

                      self.qscheme = self.activation_post_process.qscheme
        self.ch_axis = self.activation_post_process.ch_axis \
            if hasattr(self.activation_post_process, 'ch_axis') else -1
        self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
        self.register_buffer('static_enabled', torch.tensor([1], dtype=torch.uint8))
        self.register_buffer('learning_enabled', torch.tensor([0], dtype=torch.uint8))

        bitrange = torch.tensor(quant_max - quant_min + 1).double()
        self.bitwidth = int(torch.log2(bitrange).item())

            

Reported by Pylint.

Module 'torch' has no 'uint8' member
Error

Line: 56 Column: 76

                      self.qscheme = self.activation_post_process.qscheme
        self.ch_axis = self.activation_post_process.ch_axis \
            if hasattr(self.activation_post_process, 'ch_axis') else -1
        self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
        self.register_buffer('static_enabled', torch.tensor([1], dtype=torch.uint8))
        self.register_buffer('learning_enabled', torch.tensor([0], dtype=torch.uint8))

        bitrange = torch.tensor(quant_max - quant_min + 1).double()
        self.bitwidth = int(torch.log2(bitrange).item())

            

Reported by Pylint.

Module 'torch' has no 'uint8' member
Error

Line: 57 Column: 72

                      self.ch_axis = self.activation_post_process.ch_axis \
            if hasattr(self.activation_post_process, 'ch_axis') else -1
        self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
        self.register_buffer('static_enabled', torch.tensor([1], dtype=torch.uint8))
        self.register_buffer('learning_enabled', torch.tensor([0], dtype=torch.uint8))

        bitrange = torch.tensor(quant_max - quant_min + 1).double()
        self.bitwidth = int(torch.log2(bitrange).item())
        self.register_buffer('eps', torch.tensor([torch.finfo(torch.float32).eps]))

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 57 Column: 48

                      self.ch_axis = self.activation_post_process.ch_axis \
            if hasattr(self.activation_post_process, 'ch_axis') else -1
        self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
        self.register_buffer('static_enabled', torch.tensor([1], dtype=torch.uint8))
        self.register_buffer('learning_enabled', torch.tensor([0], dtype=torch.uint8))

        bitrange = torch.tensor(quant_max - quant_min + 1).double()
        self.bitwidth = int(torch.log2(bitrange).item())
        self.register_buffer('eps', torch.tensor([torch.finfo(torch.float32).eps]))

            

Reported by Pylint.