The following issues were found

torch/nn/quantized/dynamic/modules/linear.py
24 issues
Module 'torch' has no 'qint8' member
Error

Line: 32 Column: 69

                  # version used in this class is different from the parent class nnq.Linear
    _version = 4

    def __init__(self, in_features, out_features, bias_=True, dtype=torch.qint8):
        super(Linear, self).__init__(in_features, out_features, bias_, dtype=dtype)
        # We don't muck around with buffers or attributes or anything here
        # to keep the module simple. *everything* is simply a Python attribute.
        # Serialization logic is explicitly handled in the below serialization and
        # deserialization modules

            

Reported by Pylint.

Module 'torch' has no 'qint8' member
Error

Line: 42 Column: 41

              
    def forward(self, x):
        # Note that we can handle self.bias == None case.
        if self._packed_params.dtype == torch.qint8:
            if self.version is None or self.version < 4:
                Y = torch.ops.quantized.linear_dynamic(
                    x, self._packed_params._packed_params)
            else:
                Y = torch.ops.quantized.linear_dynamic(

            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 49 Column: 43

                          else:
                Y = torch.ops.quantized.linear_dynamic(
                    x, self._packed_params._packed_params, reduce_range=True)
        elif self._packed_params.dtype == torch.float16:
            Y = torch.ops.quantized.linear_dynamic_fp16(
                x, self._packed_params._packed_params)
        else:
            raise RuntimeError('Unsupported dtype on dynamic quantized linear!')
        return Y.to(x.dtype)

            

Reported by Pylint.

Module 'torch' has no 'qint8' member
Error

Line: 63 Column: 41

                      extra_repr_str = 'in_features={}, out_features={}, dtype={}'.format(
            self.in_features, self.out_features, self._packed_params.dtype
        )
        if self._packed_params.dtype == torch.qint8:
            extra_repr_str += ', qscheme={}'.format(self.weight().qscheme())
        return extra_repr_str

    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
                              missing_keys, unexpected_keys, error_msgs):

            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 96 Column: 39

                          from torch.quantization.qconfig import default_dynamic_qconfig
            weight_observer = default_dynamic_qconfig.weight()
        dtype = weight_observer.dtype
        assert dtype in [torch.qint8, torch.float16], "The only supported dtypes for " \
            "dynamic quantized linear are qint8 and float16 got: {}".format(dtype)
        weight_observer(mod.weight)
        if dtype == torch.qint8:
            qweight = _quantize_weight(mod.weight.float(), weight_observer)
        elif dtype == torch.float16:

            

Reported by Pylint.

Module 'torch' has no 'qint8' member
Error

Line: 96 Column: 26

                          from torch.quantization.qconfig import default_dynamic_qconfig
            weight_observer = default_dynamic_qconfig.weight()
        dtype = weight_observer.dtype
        assert dtype in [torch.qint8, torch.float16], "The only supported dtypes for " \
            "dynamic quantized linear are qint8 and float16 got: {}".format(dtype)
        weight_observer(mod.weight)
        if dtype == torch.qint8:
            qweight = _quantize_weight(mod.weight.float(), weight_observer)
        elif dtype == torch.float16:

            

Reported by Pylint.

Module 'torch' has no 'qint8' member
Error

Line: 99 Column: 21

                      assert dtype in [torch.qint8, torch.float16], "The only supported dtypes for " \
            "dynamic quantized linear are qint8 and float16 got: {}".format(dtype)
        weight_observer(mod.weight)
        if dtype == torch.qint8:
            qweight = _quantize_weight(mod.weight.float(), weight_observer)
        elif dtype == torch.float16:
            qweight = mod.weight.float()
        else:
            raise RuntimeError('Unsupported dtype specified for dynamic quantized Linear!')

            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 101 Column: 23

                      weight_observer(mod.weight)
        if dtype == torch.qint8:
            qweight = _quantize_weight(mod.weight.float(), weight_observer)
        elif dtype == torch.float16:
            qweight = mod.weight.float()
        else:
            raise RuntimeError('Unsupported dtype specified for dynamic quantized Linear!')
        qlinear = Linear(mod.in_features, mod.out_features, dtype=dtype)
        qlinear.set_weight_bias(qweight, mod.bias)

            

Reported by Pylint.

Access to a protected member _packed_params of a client class
Error

Line: 45 Column: 24

                      if self._packed_params.dtype == torch.qint8:
            if self.version is None or self.version < 4:
                Y = torch.ops.quantized.linear_dynamic(
                    x, self._packed_params._packed_params)
            else:
                Y = torch.ops.quantized.linear_dynamic(
                    x, self._packed_params._packed_params, reduce_range=True)
        elif self._packed_params.dtype == torch.float16:
            Y = torch.ops.quantized.linear_dynamic_fp16(

            

Reported by Pylint.

Access to a protected member _packed_params of a client class
Error

Line: 48 Column: 24

                                  x, self._packed_params._packed_params)
            else:
                Y = torch.ops.quantized.linear_dynamic(
                    x, self._packed_params._packed_params, reduce_range=True)
        elif self._packed_params.dtype == torch.float16:
            Y = torch.ops.quantized.linear_dynamic_fp16(
                x, self._packed_params._packed_params)
        else:
            raise RuntimeError('Unsupported dtype on dynamic quantized linear!')

            

Reported by Pylint.

torch/quantization/fuser_method_mappings.py
24 issues
Attempted relative import beyond top-level package
Error

Line: 6 Column: 1

              
from typing import Union, Callable, Tuple, Dict, Optional, Type

from .utils import get_combined_dict

def fuse_conv_bn(conv, bn):
    r"""Given the conv and bn modules, fuses them and returns the fused module

    Args:

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch.nn as nn
import torch.nn.intrinsic as nni

from typing import Union, Callable, Tuple, Dict, Optional, Type

from .utils import get_combined_dict

def fuse_conv_bn(conv, bn):
    r"""Given the conv and bn modules, fuses them and returns the fused module

            

Reported by Pylint.

standard import "from typing import Union, Callable, Tuple, Dict, Optional, Type" should be placed before "import torch.nn as nn"
Error

Line: 4 Column: 1

              import torch.nn as nn
import torch.nn.intrinsic as nni

from typing import Union, Callable, Tuple, Dict, Optional, Type

from .utils import get_combined_dict

def fuse_conv_bn(conv, bn):
    r"""Given the conv and bn modules, fuses them and returns the fused module

            

Reported by Pylint.

Argument name "bn" doesn't conform to snake_case naming style
Error

Line: 8 Column: 1

              
from .utils import get_combined_dict

def fuse_conv_bn(conv, bn):
    r"""Given the conv and bn modules, fuses them and returns the fused module

    Args:
        conv: Module instance of type conv2d/conv3d
        bn: Spatial BN instance that needs to be fused with the conv

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 21
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                      >>> b1 = nn.BatchNorm2d(20)
        >>> m2 = fuse_conv_bn(m1, b1)
    """
    assert(conv.training == bn.training),\
        "Conv and BN both must be in the same mode (train or eval)."

    fused_module_class_map = {
        nn.Conv1d: nni.ConvBn1d,
        nn.Conv2d: nni.ConvBn2d,

            

Reported by Bandit.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 31
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                  }

    if conv.training:
        assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
        assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
        assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
        fused_module_class = fused_module_class_map.get((type(conv)), None)
        if fused_module_class is not None:
            return fused_module_class(conv, bn)

            

Reported by Bandit.

Line too long (118/100)
Error

Line: 31 Column: 1

                  }

    if conv.training:
        assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
        assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
        assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
        fused_module_class = fused_module_class_map.get((type(conv)), None)
        if fused_module_class is not None:
            return fused_module_class(conv, bn)

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 32
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

              
    if conv.training:
        assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
        assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
        assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
        fused_module_class = fused_module_class_map.get((type(conv)), None)
        if fused_module_class is not None:
            return fused_module_class(conv, bn)
        else:

            

Reported by Bandit.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 33
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                  if conv.training:
        assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
        assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
        assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
        fused_module_class = fused_module_class_map.get((type(conv)), None)
        if fused_module_class is not None:
            return fused_module_class(conv, bn)
        else:
            raise NotImplementedError("Cannot fuse train modules: {}".format((conv, bn)))

            

Reported by Bandit.

Line too long (112/100)
Error

Line: 33 Column: 1

                  if conv.training:
        assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
        assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
        assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
        fused_module_class = fused_module_class_map.get((type(conv)), None)
        if fused_module_class is not None:
            return fused_module_class(conv, bn)
        else:
            raise NotImplementedError("Cannot fuse train modules: {}".format((conv, bn)))

            

Reported by Pylint.

torch/nn/modules/flatten.py
24 issues
Attempted relative import beyond top-level package
Error

Line: 1 Column: 1

              from .module import Module

from typing import Tuple, Union
from torch import Tensor
from torch.types import _size


class Flatten(Module):
    r"""

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 41 Column: 23

                      self.start_dim = start_dim
        self.end_dim = end_dim

    def forward(self, input: Tensor) -> Tensor:
        return input.flatten(self.start_dim, self.end_dim)

    def extra_repr(self) -> str:
        return 'start_dim={}, end_dim={}'.format(
            self.start_dim, self.end_dim

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 115 Column: 36

                      self.dim = dim
        self.unflattened_size = unflattened_size

    def _require_tuple_tuple(self, input):
        if (isinstance(input, tuple)):
            for idx, elem in enumerate(input):
                if not isinstance(elem, tuple):
                    raise TypeError("unflattened_size must be tuple of tuples, " +
                                    "but found element of type {} at pos {}".format(type(elem).__name__, idx))

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 125 Column: 34

                      raise TypeError("unflattened_size must be a tuple of tuples, " +
                        "but found type {}".format(type(input).__name__))

    def _require_tuple_int(self, input):
        if (isinstance(input, (tuple, list))):
            for idx, elem in enumerate(input):
                if not isinstance(elem, int):
                    raise TypeError("unflattened_size must be tuple of ints, " +
                                    "but found element of type {} at pos {}".format(type(elem).__name__, idx))

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 134 Column: 23

                          return
        raise TypeError("unflattened_size must be a tuple of ints, but found type {}".format(type(input).__name__))

    def forward(self, input: Tensor) -> Tensor:
        return input.unflatten(self.dim, self.unflattened_size)

    def extra_repr(self) -> str:
        return 'dim={}, unflattened_size={}'.format(self.dim, self.unflattened_size)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from .module import Module

from typing import Tuple, Union
from torch import Tensor
from torch.types import _size


class Flatten(Module):
    r"""

            

Reported by Pylint.

standard import "from typing import Tuple, Union" should be placed before "from .module import Module"
Error

Line: 3 Column: 1

              from .module import Module

from typing import Tuple, Union
from torch import Tensor
from torch.types import _size


class Flatten(Module):
    r"""

            

Reported by Pylint.

third party import "from torch import Tensor" should be placed before "from .module import Module"
Error

Line: 4 Column: 1

              from .module import Module

from typing import Tuple, Union
from torch import Tensor
from torch.types import _size


class Flatten(Module):
    r"""

            

Reported by Pylint.

third party import "from torch.types import _size" should be placed before "from .module import Module"
Error

Line: 5 Column: 1

              
from typing import Tuple, Union
from torch import Tensor
from torch.types import _size


class Flatten(Module):
    r"""
    Flattens a contiguous range of dims into a tensor. For use with :class:`~nn.Sequential`.

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 37 Column: 9

                  end_dim: int

    def __init__(self, start_dim: int = 1, end_dim: int = -1) -> None:
        super(Flatten, self).__init__()
        self.start_dim = start_dim
        self.end_dim = end_dim

    def forward(self, input: Tensor) -> Tensor:
        return input.flatten(self.start_dim, self.end_dim)

            

Reported by Pylint.

torch/fx/passes/split_utils.py
24 issues
Attempted relative import beyond top-level package
Error

Line: 7 Column: 1

              import torch.fx
import torch.nn as nn
from torch.fx.graph import map_arg
from .tools_common import NodeList, NodeSet


@dataclass
class Component:
    """

            

Reported by Pylint.

Method 'forward' is abstract in class 'torch.nn.modules.module' but is not overridden
Error

Line: 35 Column: 1

                  gm: Optional[torch.fx.GraphModule] = None


class HolderModule(nn.Module):
    """
    HolderModule is used to copy all the attributes from original module to submodules
    that uses the attributes
    """


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from dataclasses import dataclass, field
from typing import List, Optional, Dict

import torch.fx
import torch.nn as nn
from torch.fx.graph import map_arg
from .tools_common import NodeList, NodeSet



            

Reported by Pylint.

Too many instance attributes (9/7)
Error

Line: 11 Column: 1

              

@dataclass
class Component:
    """
    A component serves as a container for a subgraph we want to create afterwards.
    """

    graph: torch.fx.Graph

            

Reported by Pylint.

Attribute name "gm" doesn't conform to snake_case naming style
Error

Line: 32 Column: 5

                  # Mapping from get_attr node in original graph to get_attr node in `graph`.
    getattr_maps: Dict[torch.fx.Node, torch.fx.Node] = field(default_factory=dict)
    constructor_args: List[str] = field(default_factory=list)
    gm: Optional[torch.fx.GraphModule] = None


class HolderModule(nn.Module):
    """
    HolderModule is used to copy all the attributes from original module to submodules

            

Reported by Pylint.

Variable name "v" doesn't conform to snake_case naming style
Error

Line: 43 Column: 16

              
    def __init__(self, d):
        super().__init__()
        for k, v in d.items():
            self.add_module(k, v)


def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
    """

            

Reported by Pylint.

Too many local variables (32/15)
Error

Line: 47 Column: 1

                          self.add_module(k, v)


def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
    """
    Splits a GraphModule using tags on its graph nodes. We honor the order of
    tags. For example, we have tags = ["a", "b", "c"], the function will create
    the initial submodules in the order of "a_0", "b_1", "c_2".


            

Reported by Pylint.

Argument name "gm" doesn't conform to snake_case naming style
Error

Line: 47 Column: 1

                          self.add_module(k, v)


def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
    """
    Splits a GraphModule using tags on its graph nodes. We honor the order of
    tags. For example, we have tags = ["a", "b", "c"], the function will create
    the initial submodules in the order of "a_0", "b_1", "c_2".


            

Reported by Pylint.

Too many statements (77/50)
Error

Line: 47 Column: 1

                          self.add_module(k, v)


def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
    """
    Splits a GraphModule using tags on its graph nodes. We honor the order of
    tags. For example, we have tags = ["a", "b", "c"], the function will create
    the initial submodules in the order of "a_0", "b_1", "c_2".


            

Reported by Pylint.

Too many branches (22/12)
Error

Line: 47 Column: 1

                          self.add_module(k, v)


def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
    """
    Splits a GraphModule using tags on its graph nodes. We honor the order of
    tags. For example, we have tags = ["a", "b", "c"], the function will create
    the initial submodules in the order of "a_0", "b_1", "c_2".


            

Reported by Pylint.

torch/nn/parallel/data_parallel.py
23 issues
Attempted relative import beyond top-level package
Error

Line: 5 Column: 1

              import torch
import warnings
from itertools import chain
from ..modules import Module
from .scatter_gather import scatter_kwargs, gather
from .replicate import replicate
from .parallel_apply import parallel_apply
from torch._utils import (
    _get_all_device_indices,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 6 Column: 1

              import warnings
from itertools import chain
from ..modules import Module
from .scatter_gather import scatter_kwargs, gather
from .replicate import replicate
from .parallel_apply import parallel_apply
from torch._utils import (
    _get_all_device_indices,
    _get_available_device_type,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 7 Column: 1

              from itertools import chain
from ..modules import Module
from .scatter_gather import scatter_kwargs, gather
from .replicate import replicate
from .parallel_apply import parallel_apply
from torch._utils import (
    _get_all_device_indices,
    _get_available_device_type,
    _get_device_index,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 8 Column: 1

              from ..modules import Module
from .scatter_gather import scatter_kwargs, gather
from .replicate import replicate
from .parallel_apply import parallel_apply
from torch._utils import (
    _get_all_device_indices,
    _get_available_device_type,
    _get_device_index,
    _get_devices_properties

            

Reported by Pylint.

Module 'torch' has no 'device' member
Error

Line: 140 Column: 31

                      self.module = module
        self.device_ids = [_get_device_index(x, True) for x in device_ids]
        self.output_device = _get_device_index(output_device, True)
        self.src_device_obj = torch.device(device_type, self.device_ids[0])

        _check_balance(self.device_ids)

        if len(self.device_ids) == 1:
            self.module.to(self.src_device_obj)

            

Reported by Pylint.

Module 'torch' has no 'is_grad_enabled' member
Error

Line: 172 Column: 50

                          return self.gather(outputs, self.output_device)

    def replicate(self, module, device_ids):
        return replicate(module, device_ids, not torch.is_grad_enabled())

    def scatter(self, inputs, kwargs, device_ids):
        return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)

    def parallel_apply(self, replicas, inputs, kwargs):

            

Reported by Pylint.

Module 'torch' has no 'device' member
Error

Line: 212 Column: 22

              
    device_ids = [_get_device_index(x, True) for x in device_ids]
    output_device = _get_device_index(output_device, True)
    src_device_obj = torch.device(device_type, device_ids[0])

    for t in chain(module.parameters(), module.buffers()):
        if t.device != src_device_obj:
            raise RuntimeError("module must have its parameters and buffers "
                               "on device {} (device_ids[0]) but found one of "

            

Reported by Pylint.

TODO: update notes/cuda.rst when this class handles 8+ GPUs well
Error

Line: 119 Column: 3

                      >>> output = net(input_var)  # input_var can be on any device, including CPU
    """

    # TODO: update notes/cuda.rst when this class handles 8+ GPUs well

    def __init__(self, module, device_ids=None, output_device=None, dim=0):
        super(DataParallel, self).__init__()

        device_type = _get_available_device_type()

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import operator
import torch
import warnings
from itertools import chain
from ..modules import Module
from .scatter_gather import scatter_kwargs, gather
from .replicate import replicate
from .parallel_apply import parallel_apply
from torch._utils import (

            

Reported by Pylint.

standard import "import warnings" should be placed before "import torch"
Error

Line: 3 Column: 1

              import operator
import torch
import warnings
from itertools import chain
from ..modules import Module
from .scatter_gather import scatter_kwargs, gather
from .replicate import replicate
from .parallel_apply import parallel_apply
from torch._utils import (

            

Reported by Pylint.

torch/optim/_multi_tensor/adamax.py
23 issues
Attempted relative import beyond top-level package
Error

Line: 2 Column: 1

              import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict

class Adamax(Optimizer):
    """Implements Adamax algorithm (a variant of Adam based on infinity norm).

    It has been proposed in `Adam: A Method for Stochastic Optimization`__.

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict

class Adamax(Optimizer):
    """Implements Adamax algorithm (a variant of Adam based on infinity norm).

    It has been proposed in `Adam: A Method for Stochastic Optimization`__.

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 75 Column: 78

                                  # State initialization
                    if len(state) == 0:
                        state['step'] = 0
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        state['exp_inf'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_infs.append(state['exp_inf'])


            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 75 Column: 44

                                  # State initialization
                    if len(state) == 0:
                        state['step'] = 0
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        state['exp_inf'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_infs.append(state['exp_inf'])


            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 76 Column: 44

                                  if len(state) == 0:
                        state['step'] = 0
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        state['exp_inf'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_infs.append(state['exp_inf'])

                    state['step'] += 1

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 76 Column: 78

                                  if len(state) == 0:
                        state['step'] = 0
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        state['exp_inf'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_infs.append(state['exp_inf'])

                    state['step'] += 1

            

Reported by Pylint.

Module 'torch' has no '_foreach_zero_' member
Error

Line: 118 Column: 21

              
            for _, per_dtype_grads in per_device_and_dtype_grads.items():
                for grads in per_dtype_grads.values():
                    torch._foreach_zero_(grads)

            

Reported by Pylint.

TODO: refactor to a base class once foreach ops are in a good shape.
Error

Line: 97 Column: 3

              
        return loss

    # TODO: refactor to a base class once foreach ops are in a good shape.
    def zero_grad(self, set_to_none: bool = False):
        per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list))
        for group in self.param_groups:
            for p in group['params']:
                if p.grad is not None:

            

Reported by Pylint.

Access to a protected member _foreach_zero_ of a client class
Error

Line: 118 Column: 21

              
            for _, per_dtype_grads in per_device_and_dtype_grads.items():
                for grads in per_dtype_grads.values():
                    torch._foreach_zero_(grads)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict

class Adamax(Optimizer):
    """Implements Adamax algorithm (a variant of Adam based on infinity norm).

    It has been proposed in `Adam: A Method for Stochastic Optimization`__.

            

Reported by Pylint.

torch/optim/_multi_tensor/__init__.py
23 issues
Unable to import '__init__.adam'
Error

Line: 8 Column: 1

              future.
"""

from .adam import Adam
from .adamw import AdamW
from .nadam import NAdam
from .sgd import SGD
from .radam import RAdam as RAdam
from .rmsprop import RMSprop

            

Reported by Pylint.

Unable to import '__init__.adamw'
Error

Line: 9 Column: 1

              """

from .adam import Adam
from .adamw import AdamW
from .nadam import NAdam
from .sgd import SGD
from .radam import RAdam as RAdam
from .rmsprop import RMSprop
from .rprop import Rprop

            

Reported by Pylint.

Unable to import '__init__.nadam'
Error

Line: 10 Column: 1

              
from .adam import Adam
from .adamw import AdamW
from .nadam import NAdam
from .sgd import SGD
from .radam import RAdam as RAdam
from .rmsprop import RMSprop
from .rprop import Rprop
from .asgd import ASGD

            

Reported by Pylint.

Unable to import '__init__.sgd'
Error

Line: 11 Column: 1

              from .adam import Adam
from .adamw import AdamW
from .nadam import NAdam
from .sgd import SGD
from .radam import RAdam as RAdam
from .rmsprop import RMSprop
from .rprop import Rprop
from .asgd import ASGD
from .adamax import Adamax

            

Reported by Pylint.

Unable to import '__init__.radam'
Error

Line: 12 Column: 1

              from .adamw import AdamW
from .nadam import NAdam
from .sgd import SGD
from .radam import RAdam as RAdam
from .rmsprop import RMSprop
from .rprop import Rprop
from .asgd import ASGD
from .adamax import Adamax
from .adadelta import Adadelta

            

Reported by Pylint.

Unable to import '__init__.rmsprop'
Error

Line: 13 Column: 1

              from .nadam import NAdam
from .sgd import SGD
from .radam import RAdam as RAdam
from .rmsprop import RMSprop
from .rprop import Rprop
from .asgd import ASGD
from .adamax import Adamax
from .adadelta import Adadelta
from .adagrad import Adagrad

            

Reported by Pylint.

Unable to import '__init__.rprop'
Error

Line: 14 Column: 1

              from .sgd import SGD
from .radam import RAdam as RAdam
from .rmsprop import RMSprop
from .rprop import Rprop
from .asgd import ASGD
from .adamax import Adamax
from .adadelta import Adadelta
from .adagrad import Adagrad


            

Reported by Pylint.

Unable to import '__init__.asgd'
Error

Line: 15 Column: 1

              from .radam import RAdam as RAdam
from .rmsprop import RMSprop
from .rprop import Rprop
from .asgd import ASGD
from .adamax import Adamax
from .adadelta import Adadelta
from .adagrad import Adagrad

del adam

            

Reported by Pylint.

Unable to import '__init__.adamax'
Error

Line: 16 Column: 1

              from .rmsprop import RMSprop
from .rprop import Rprop
from .asgd import ASGD
from .adamax import Adamax
from .adadelta import Adadelta
from .adagrad import Adagrad

del adam
del adamw

            

Reported by Pylint.

Unable to import '__init__.adadelta'
Error

Line: 17 Column: 1

              from .rprop import Rprop
from .asgd import ASGD
from .adamax import Adamax
from .adadelta import Adadelta
from .adagrad import Adagrad

del adam
del adamw
del sgd

            

Reported by Pylint.

torch/optim/adam.py
23 issues
Attempted relative import beyond top-level package
Error

Line: 2 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class Adam(Optimizer):
    r"""Implements Adam algorithm.

    It has been proposed in `Adam: A Method for Stochastic Optimization`_.

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class Adam(Optimizer):
    r"""Implements Adam algorithm.

    It has been proposed in `Adam: A Method for Stochastic Optimization`_.

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 89 Column: 44

                                  if len(state) == 0:
                        state['step'] = 0
                        # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        if group['amsgrad']:
                            # Maintains max of all exp. moving avg. of sq. grad. values
                            state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 89 Column: 78

                                  if len(state) == 0:
                        state['step'] = 0
                        # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        if group['amsgrad']:
                            # Maintains max of all exp. moving avg. of sq. grad. values
                            state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 91 Column: 47

                                      # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        if group['amsgrad']:
                            # Maintains max of all exp. moving avg. of sq. grad. values
                            state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 91 Column: 81

                                      # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        if group['amsgrad']:
                            # Maintains max of all exp. moving avg. of sq. grad. values
                            state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 94 Column: 89

                                      state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        if group['amsgrad']:
                            # Maintains max of all exp. moving avg. of sq. grad. values
                            state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

                    if group['amsgrad']:

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 94 Column: 55

                                      state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        if group['amsgrad']:
                            # Maintains max of all exp. moving avg. of sq. grad. values
                            state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

                    if group['amsgrad']:

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class Adam(Optimizer):
    r"""Implements Adam algorithm.

    It has been proposed in `Adam: A Method for Stochastic Optimization`_.

            

Reported by Pylint.

Too many arguments (7/5)
Error

Line: 34 Column: 5

                      https://openreview.net/forum?id=ryQu7f-RZ
    """

    def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
                 weight_decay=0, amsgrad=False):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= eps:
            raise ValueError("Invalid epsilon value: {}".format(eps))

            

Reported by Pylint.

torch/optim/adamw.py
23 issues
Attempted relative import beyond top-level package
Error

Line: 2 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class AdamW(Optimizer):
    r"""Implements AdamW algorithm.

    The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class AdamW(Optimizer):
    r"""Implements AdamW algorithm.

    The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 92 Column: 40

                              if len(state) == 0:
                    state['step'] = 0
                    # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    if amsgrad:
                        # Maintains max of all exp. moving avg. of sq. grad. values
                        state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 92 Column: 74

                              if len(state) == 0:
                    state['step'] = 0
                    # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    if amsgrad:
                        # Maintains max of all exp. moving avg. of sq. grad. values
                        state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 94 Column: 43

                                  # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    if amsgrad:
                        # Maintains max of all exp. moving avg. of sq. grad. values
                        state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                exp_avgs.append(state['exp_avg'])

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 94 Column: 77

                                  # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    if amsgrad:
                        # Maintains max of all exp. moving avg. of sq. grad. values
                        state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                exp_avgs.append(state['exp_avg'])

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 97 Column: 85

                                  state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    if amsgrad:
                        # Maintains max of all exp. moving avg. of sq. grad. values
                        state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                exp_avgs.append(state['exp_avg'])
                exp_avg_sqs.append(state['exp_avg_sq'])

                if amsgrad:

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 97 Column: 51

                                  state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    if amsgrad:
                        # Maintains max of all exp. moving avg. of sq. grad. values
                        state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                exp_avgs.append(state['exp_avg'])
                exp_avg_sqs.append(state['exp_avg_sq'])

                if amsgrad:

            

Reported by Pylint.

Unused variable 'state_sums'
Error

Line: 72 Column: 13

                          grads = []
            exp_avgs = []
            exp_avg_sqs = []
            state_sums = []
            max_exp_avg_sqs = []
            state_steps = []
            amsgrad = group['amsgrad']
            beta1, beta2 = group['betas']


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class AdamW(Optimizer):
    r"""Implements AdamW algorithm.

    The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.

            

Reported by Pylint.

torch/optim/nadam.py
23 issues
Attempted relative import beyond top-level package
Error

Line: 2 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class NAdam(Optimizer):
    r"""Implements NAdam algorithm.

    It has been proposed in `Incorporating Nesterov Momentum into Adam`_.

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class NAdam(Optimizer):
    r"""Implements NAdam algorithm.

    It has been proposed in `Incorporating Nesterov Momentum into Adam`_.

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 79 Column: 44

                                      state['step'] = 0
                        state['mu_product'] = 1.
                        # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 79 Column: 78

                                      state['step'] = 0
                        state['mu_product'] = 1.
                        # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 81 Column: 47

                                      # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])
                    mu_products.append(state['mu_product'])


            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 81 Column: 81

                                      # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])
                    mu_products.append(state['mu_product'])


            

Reported by Pylint.

Unused variable 'mu_product'
Error

Line: 106 Column: 20

                                  eps=group['eps'])

            # update mu_product
            for p, mu_product in zip(params_with_grad, mu_products):
                state = self.state[p]
                state['mu_product'] = state['mu_product'] * beta1 * \
                    (1. - 0.5 * (0.96 ** (state['step'] * group['momentum_decay'])))

        return loss

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class NAdam(Optimizer):
    r"""Implements NAdam algorithm.

    It has been proposed in `Incorporating Nesterov Momentum into Adam`_.

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 6 Column: 1

              from .optimizer import Optimizer


class NAdam(Optimizer):
    r"""Implements NAdam algorithm.

    It has been proposed in `Incorporating Nesterov Momentum into Adam`_.

    Args:

            

Reported by Pylint.

Argument name "lr" doesn't conform to snake_case naming style
Error

Line: 26 Column: 5

                      https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ
    """

    def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
                 weight_decay=0, momentum_decay=4e-3):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= eps:
            raise ValueError("Invalid epsilon value: {}".format(eps))

            

Reported by Pylint.