The following issues were found

torch/optim/sparse_adam.py
21 issues
Attempted relative import beyond top-level package
Error

Line: 2 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class SparseAdam(Optimizer):
    r"""Implements lazy version of Adam algorithm suitable for sparse tensors.

    In this variant, only moments that show up in the gradient get updated, and

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class SparseAdam(Optimizer):
    r"""Implements lazy version of Adam algorithm suitable for sparse tensors.

    In this variant, only moments that show up in the gradient get updated, and

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 89 Column: 44

                                  if len(state) == 0:
                        state['step'] = 0
                        # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 89 Column: 78

                                  if len(state) == 0:
                        state['step'] = 0
                        # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 91 Column: 81

                                      # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

                    # update the steps for each param group update

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 91 Column: 47

                                      # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

                    # update the steps for each param group update

            

Reported by Pylint.

Unused variable 'eps'
Error

Line: 72 Column: 13

                          exp_avgs = []
            exp_avg_sqs = []
            state_steps = []
            eps = group['eps']
            lr = group['lr']
            beta1, beta2 = group['betas']

            for p in group['params']:
                if p.grad is not None:

            

Reported by Pylint.

Unused variable 'lr'
Error

Line: 73 Column: 13

                          exp_avg_sqs = []
            state_steps = []
            eps = group['eps']
            lr = group['lr']
            beta1, beta2 = group['betas']

            for p in group['params']:
                if p.grad is not None:
                    params_with_grad.append(p)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class SparseAdam(Optimizer):
    r"""Implements lazy version of Adam algorithm suitable for sparse tensors.

    In this variant, only moments that show up in the gradient get updated, and

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 6 Column: 1

              from .optimizer import Optimizer


class SparseAdam(Optimizer):
    r"""Implements lazy version of Adam algorithm suitable for sparse tensors.

    In this variant, only moments that show up in the gradient get updated, and
    only those portions of the gradient get applied to the parameters.


            

Reported by Pylint.

torch/utils/data/datapipes/iter/combining.py
21 issues
Method '__getitem__' is abstract in class 'Dataset' but is not overridden
Error

Line: 10 Column: 1

              

@functional_datapipe('concat')
class ConcatIterDataPipe(IterDataPipe):
    r""" :class:`ConcatIterDataPipe`.

    Iterable DataPipe to concatenate multiple Iterable DataPipes.
    args:
        datapipes: Iterable DataPipes being concatenated

            

Reported by Pylint.

TODO(VitalyFedyunin): Replace with valid version, documentation and tests
Error

Line: 46 Column: 3

              

# This is fake class to show API, going to be replaced by the copy from torchdata
# TODO(VitalyFedyunin): Replace with valid version, documentation and tests
class IterateBuffer(IterDataPipe):
    def __init__(self, buffer):
        self.buffer = buffer

    def __iter__(self):

            

Reported by Pylint.

Method '__getitem__' is abstract in class 'Dataset' but is not overridden
Error

Line: 47 Column: 1

              
# This is fake class to show API, going to be replaced by the copy from torchdata
# TODO(VitalyFedyunin): Replace with valid version, documentation and tests
class IterateBuffer(IterDataPipe):
    def __init__(self, buffer):
        self.buffer = buffer

    def __iter__(self):
        for i in self.buffer:

            

Reported by Pylint.

Method '__getitem__' is abstract in class 'Dataset' but is not overridden
Error

Line: 57 Column: 1

              

@functional_datapipe('fork')
class ForkIterDataPipe(IterDataPipe):

    def __new__(cls, datapipe, instances):
        result = []
        buffer = list(datapipe)
        return [IterateBuffer(buffer) for i in range(instances)]

            

Reported by Pylint.

Method '__iter__' is abstract in class 'IterableDataset' but is not overridden
Error

Line: 57 Column: 1

              

@functional_datapipe('fork')
class ForkIterDataPipe(IterDataPipe):

    def __new__(cls, datapipe, instances):
        result = []
        buffer = list(datapipe)
        return [IterateBuffer(buffer) for i in range(instances)]

            

Reported by Pylint.

Unused variable 'result'
Error

Line: 60 Column: 9

              class ForkIterDataPipe(IterDataPipe):

    def __new__(cls, datapipe, instances):
        result = []
        buffer = list(datapipe)
        return [IterateBuffer(buffer) for i in range(instances)]


@functional_datapipe('demux')

            

Reported by Pylint.

Method '__getitem__' is abstract in class 'Dataset' but is not overridden
Error

Line: 66 Column: 1

              

@functional_datapipe('demux')
class DemultiplexerIterDataPipe(IterDataPipe):

    def __new__(cls, datapipe, instances, classifier_fn):
        result = []
        buffer = list(datapipe)


            

Reported by Pylint.

Method '__iter__' is abstract in class 'IterableDataset' but is not overridden
Error

Line: 66 Column: 1

              

@functional_datapipe('demux')
class DemultiplexerIterDataPipe(IterDataPipe):

    def __new__(cls, datapipe, instances, classifier_fn):
        result = []
        buffer = list(datapipe)


            

Reported by Pylint.

Unused variable 'result'
Error

Line: 69 Column: 9

              class DemultiplexerIterDataPipe(IterDataPipe):

    def __new__(cls, datapipe, instances, classifier_fn):
        result = []
        buffer = list(datapipe)

        def filter_fn(classifier_fn, i, x):
            return classifier_fn(x) == i
        return [IterateBuffer(buffer).filter(functools.partial(filter_fn, classifier_fn, i)) for i in range(instances)]

            

Reported by Pylint.

Method '__getitem__' is abstract in class 'Dataset' but is not overridden
Error

Line: 77 Column: 1

                      return [IterateBuffer(buffer).filter(functools.partial(filter_fn, classifier_fn, i)) for i in range(instances)]

@functional_datapipe('mux')
class MultiplexerIterDataPipe(IterDataPipe):

    def __init__(self, *datapipes):
        self.datapipes = datapipes

    def __iter__(self):

            

Reported by Pylint.

torch/nn/parallel/_functions.py
21 issues
Attempted relative import beyond top-level package
Error

Line: 4 Column: 1

              import warnings

import torch
from . import comm
from torch.autograd import Function
from torch._utils import _get_device_index
from typing import List, Optional



            

Reported by Pylint.

Parameters differ from overridden 'forward' method
Error

Line: 13 Column: 5

              class Broadcast(Function):

    @staticmethod
    def forward(ctx, target_gpus, *inputs):
        assert all(i.device.type != 'cpu' for i in inputs), (
            'Broadcast function not implemented for CPU tensors'
        )
        target_gpus = [_get_device_index(x, True) for x in target_gpus]
        ctx.target_gpus = target_gpus

            

Reported by Pylint.

Parameters differ from overridden 'forward' method
Error

Line: 40 Column: 5

              class ReduceAddCoalesced(Function):

    @staticmethod
    def forward(ctx, destination, num_inputs, *grads):
        ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)]

        grads_ = [grads[i:i + num_inputs]
                  for i in range(0, len(grads), num_inputs)]
        return comm.reduce_add_coalesced(grads_, destination)

            

Reported by Pylint.

Parameters differ from overridden 'forward' method
Error

Line: 55 Column: 5

              class Gather(Function):

    @staticmethod
    def forward(ctx, target_device, dim, *inputs):
        assert all(i.device.type != 'cpu' for i in inputs), (
            'Gather function not implemented for CPU tensors'
        )
        if (target_device == 'cpu'):
            ctx.target_device = 'cpu'

            

Reported by Pylint.

Parameters differ from overridden 'backward' method
Error

Line: 78 Column: 5

                      return comm.gather(inputs, ctx.dim, ctx.target_device)

    @staticmethod
    def backward(ctx, grad_output):
        scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output)
        if ctx.unsqueezed_scalar:
            scattered_grads = tuple(g[0] for g in scattered_grads)
        return (None, None) + scattered_grads


            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 88 Column: 53

              class Scatter(Function):

    @staticmethod
    def forward(ctx, target_gpus, chunk_sizes, dim, input):
        target_gpus = [_get_device_index(x, True) for x in target_gpus]
        ctx.dim = dim
        ctx.input_device = input.get_device() if input.device.type != "cpu" else -1
        streams = None
        if torch.cuda.is_available() and ctx.input_device == -1:

            

Reported by Pylint.

Parameters differ from overridden 'forward' method
Error

Line: 88 Column: 5

              class Scatter(Function):

    @staticmethod
    def forward(ctx, target_gpus, chunk_sizes, dim, input):
        target_gpus = [_get_device_index(x, True) for x in target_gpus]
        ctx.dim = dim
        ctx.input_device = input.get_device() if input.device.type != "cpu" else -1
        streams = None
        if torch.cuda.is_available() and ctx.input_device == -1:

            

Reported by Pylint.

Using the global statement
Error

Line: 117 Column: 5

              
def _get_stream(device: int):
    """Gets a background stream for copying between CPU and GPU"""
    global _streams
    if device == -1:
        return None
    if _streams is None:
        _streams = [None] * torch.cuda.device_count()
    if _streams[device] is None:

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import warnings

import torch
from . import comm
from torch.autograd import Function
from torch._utils import _get_device_index
from typing import List, Optional



            

Reported by Pylint.

third party import "from torch.autograd import Function" should be placed before "from . import comm"
Error

Line: 5 Column: 1

              
import torch
from . import comm
from torch.autograd import Function
from torch._utils import _get_device_index
from typing import List, Optional


class Broadcast(Function):

            

Reported by Pylint.

torch/nn/modules/upsampling.py
21 issues
Attempted relative import beyond top-level package
Error

Line: 1 Column: 1

              from .module import Module
from .. import functional as F

from torch import Tensor
from typing import Optional
from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t


class Upsample(Module):

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 2 Column: 1

              from .module import Module
from .. import functional as F

from torch import Tensor
from typing import Optional
from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t


class Upsample(Module):

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 6 Column: 1

              
from torch import Tensor
from typing import Optional
from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t


class Upsample(Module):
    r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.


            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 140 Column: 23

                      self.mode = mode
        self.align_corners = align_corners

    def forward(self, input: Tensor) -> Tensor:
        return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners)

    def extra_repr(self) -> str:
        if self.scale_factor is not None:
            info = 'scale_factor=' + str(self.scale_factor)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from .module import Module
from .. import functional as F

from torch import Tensor
from typing import Optional
from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t


class Upsample(Module):

            

Reported by Pylint.

third party import "from torch import Tensor" should be placed before "from .module import Module"
Error

Line: 4 Column: 1

              from .module import Module
from .. import functional as F

from torch import Tensor
from typing import Optional
from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t


class Upsample(Module):

            

Reported by Pylint.

standard import "from typing import Optional" should be placed before "from torch import Tensor"
Error

Line: 5 Column: 1

              from .. import functional as F

from torch import Tensor
from typing import Optional
from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t


class Upsample(Module):
    r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.

            

Reported by Pylint.

Line too long (102/100)
Error

Line: 14 Column: 1

              
    The input data is assumed to be of the form
    `minibatch x channels x [optional depth] x [optional height] x width`.
    Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.

    The algorithms available for upsampling are nearest neighbor and linear,
    bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor,
    respectively.


            

Reported by Pylint.

Line too long (108/100)
Error

Line: 26 Column: 1

                  Args:
        size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional):
            output spatial sizes
        scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional):
            multiplier for spatial size. Has to match input size if it is a tuple.
        mode (str, optional): the upsampling algorithm: one of ``'nearest'``,
            ``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``.
            Default: ``'nearest'``
        align_corners (bool, optional): if ``True``, the corner pixels of the input

            

Reported by Pylint.

Line too long (113/100)
Error

Line: 37 Column: 1

                          ``'linear'``, ``'bilinear'``, or ``'trilinear'``. Default: ``False``

    Shape:
        - Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})`
        - Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})`
          or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where

    .. math::
        D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor

            

Reported by Pylint.

torch/quantization/fx/fuse.py
20 issues
Attempted relative import beyond top-level package
Error

Line: 11 Column: 1

              
from torch.fx.graph import Graph

from ..utils import (
    get_combined_dict
)

from .pattern_utils import (
    get_default_fusion_patterns,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 15 Column: 1

                  get_combined_dict
)

from .pattern_utils import (
    get_default_fusion_patterns,
)

from .match_utils import is_match


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 19 Column: 1

                  get_default_fusion_patterns,
)

from .match_utils import is_match

from .graph_module import (
    FusedGraphModule
)


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 21 Column: 1

              
from .match_utils import is_match

from .graph_module import (
    FusedGraphModule
)

from .fusion_patterns import *  # noqa: F401,F403


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 25 Column: 1

                  FusedGraphModule
)

from .fusion_patterns import *  # noqa: F401,F403

from .quantization_types import Pattern

from typing import Callable, Tuple


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 27 Column: 1

              
from .fusion_patterns import *  # noqa: F401,F403

from .quantization_types import Pattern

from typing import Callable, Tuple


class Fuser:

            

Reported by Pylint.

Undefined variable 'FuseHandler'
Error

Line: 71 Column: 32

                  def _find_matches(
            self, root: GraphModule, graph: Graph,
            patterns: Dict[Pattern, Callable]
    ) -> Dict[str, Tuple[Node, FuseHandler]]:
        modules = dict(root.named_modules())
        match_map : Dict[str, Tuple[Node, FuseHandler]] = {}  # node name -> (root_node, match_value)

        def apply_match(pattern, node, match):
            if isinstance(pattern, tuple):

            

Reported by Pylint.

Undefined variable 'FuseHandler'
Error

Line: 73 Column: 43

                          patterns: Dict[Pattern, Callable]
    ) -> Dict[str, Tuple[Node, FuseHandler]]:
        modules = dict(root.named_modules())
        match_map : Dict[str, Tuple[Node, FuseHandler]] = {}  # node name -> (root_node, match_value)

        def apply_match(pattern, node, match):
            if isinstance(pattern, tuple):
                s, *args = pattern
                apply_match(s, node, match)

            

Reported by Pylint.

Wildcard import fusion_patterns
Error

Line: 25 Column: 1

                  FusedGraphModule
)

from .fusion_patterns import *  # noqa: F401,F403

from .quantization_types import Pattern

from typing import Callable, Tuple


            

Reported by Pylint.

Attribute 'modules' defined outside __init__
Error

Line: 40 Column: 9

              
        input_root = model
        input_graph = model.graph
        self.modules = dict(input_root.named_modules())

        additional_fusion_patterns = \
            fuse_custom_config_dict.get("additional_fusion_pattern", {})
        fusion_patterns = get_combined_dict(
            get_default_fusion_patterns(), additional_fusion_patterns)

            

Reported by Pylint.

torch/optim/_multi_tensor/asgd.py
20 issues
Attempted relative import beyond top-level package
Error

Line: 2 Column: 1

              import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict

class ASGD(Optimizer):
    """Implements Averaged Stochastic Gradient Descent.

    It has been proposed in `Acceleration of stochastic approximation by

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict

class ASGD(Optimizer):
    """Implements Averaged Stochastic Gradient Descent.

    It has been proposed in `Acceleration of stochastic approximation by

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 67 Column: 39

                                      state['step'] = 0
                        state['eta'] = group['lr']
                        state['mu'] = 1
                        state['ax'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    state['step'] += 1
                    states.append(state)

            F.asgd(params_with_grad,

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 67 Column: 73

                                      state['step'] = 0
                        state['eta'] = group['lr']
                        state['mu'] = 1
                        state['ax'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    state['step'] += 1
                    states.append(state)

            F.asgd(params_with_grad,

            

Reported by Pylint.

Module 'torch' has no '_foreach_zero_' member
Error

Line: 104 Column: 21

              
            for _, per_dtype_grads in per_device_and_dtype_grads.items():
                for grads in per_dtype_grads.values():
                    torch._foreach_zero_(grads)

            

Reported by Pylint.

TODO: refactor to a base class once foreach ops are in a good shape.
Error

Line: 83 Column: 3

              
        return loss

    # TODO: refactor to a base class once foreach ops are in a good shape.
    def zero_grad(self, set_to_none: bool = False):
        per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list))
        for group in self.param_groups:
            for p in group['params']:
                if p.grad is not None:

            

Reported by Pylint.

Access to a protected member _foreach_zero_ of a client class
Error

Line: 104 Column: 21

              
            for _, per_dtype_grads in per_device_and_dtype_grads.items():
                for grads in per_dtype_grads.values():
                    torch._foreach_zero_(grads)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict

class ASGD(Optimizer):
    """Implements Averaged Stochastic Gradient Descent.

    It has been proposed in `Acceleration of stochastic approximation by

            

Reported by Pylint.

standard import "from collections import defaultdict" should be placed before "import torch"
Error

Line: 4 Column: 1

              import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict

class ASGD(Optimizer):
    """Implements Averaged Stochastic Gradient Descent.

    It has been proposed in `Acceleration of stochastic approximation by

            

Reported by Pylint.

Argument name "t0" doesn't conform to snake_case naming style
Error

Line: 25 Column: 5

                      https://dl.acm.org/citation.cfm?id=131098
    """

    def __init__(self, params, lr=1e-2, lambd=1e-4, alpha=0.75, t0=1e6, weight_decay=0):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= weight_decay:
            raise ValueError("Invalid weight_decay value: {}".format(weight_decay))


            

Reported by Pylint.

torch/optim/radam.py
20 issues
Attempted relative import beyond top-level package
Error

Line: 2 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class RAdam(Optimizer):
    r"""implements RAdam algorithm.

    It has been proposed in `On the variance of the adaptive learning rate and beyond`_.

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class RAdam(Optimizer):
    r"""implements RAdam algorithm.

    It has been proposed in `On the variance of the adaptive learning rate and beyond`_.

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 74 Column: 44

                                  if len(state) == 0:
                        state['step'] = 0
                        # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 74 Column: 78

                                  if len(state) == 0:
                        state['step'] = 0
                        # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 76 Column: 81

                                      # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

                    # update the steps for each param group update

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 76 Column: 47

                                      # Exponential moving average of gradient values
                        state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                        # Exponential moving average of squared gradient values
                        state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    exp_avgs.append(state['exp_avg'])
                    exp_avg_sqs.append(state['exp_avg_sq'])

                    # update the steps for each param group update

            

Reported by Pylint.

Unused variable 'max_exp_avg_sqs'
Error

Line: 58 Column: 13

                          grads = []
            exp_avgs = []
            exp_avg_sqs = []
            max_exp_avg_sqs = []
            state_steps = []
            beta1, beta2 = group['betas']

            for p in group['params']:
                if p.grad is not None:

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer


class RAdam(Optimizer):
    r"""implements RAdam algorithm.

    It has been proposed in `On the variance of the adaptive learning rate and beyond`_.

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 6 Column: 1

              from .optimizer import Optimizer


class RAdam(Optimizer):
    r"""implements RAdam algorithm.

    It has been proposed in `On the variance of the adaptive learning rate and beyond`_.

    Args:

            

Reported by Pylint.

Too many arguments (6/5)
Error

Line: 25 Column: 5

                      https://arxiv.org/abs/1908.03265
    """

    def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
                 weight_decay=0):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= eps:
            raise ValueError("Invalid epsilon value: {}".format(eps))

            

Reported by Pylint.

torch/fx/experimental/schema_type_annotation.py
20 issues
TODO: can we emit the union of these? What are the implications on TorchScript
Error

Line: 48 Column: 3

                              assert not isinstance(target, str)
                dispatched = boolean_dispatched[target]
                if_true, if_false = dispatched['if_true'], dispatched['if_false']
                # TODO: can we emit the union of these? What are the implications on TorchScript
                # compilation?
                if inspect.signature(if_true).return_annotation != inspect.signature(if_false).return_annotation:
                    return super().call_function(target, args, kwargs)
                target_for_analysis = if_true


            

Reported by Pylint.

Access to a protected member _jit_try_infer_type of a client class
Error

Line: 84 Column: 38

                                  raise RuntimeError(f'Node referenced nonextent target {".".join(atoms[:i])}!')
                module_itr = getattr(module_itr, atom)

            maybe_inferred_ts_type = torch._C._jit_try_infer_type(module_itr)
            if maybe_inferred_ts_type.success():
                python_type = _torchscript_type_to_python_type(maybe_inferred_ts_type.type())
                attr_proxy.node.type = python_type if not attr_proxy.node.type else attr_proxy.node.type

        return attr_proxy

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 84 Column: 38

                                  raise RuntimeError(f'Node referenced nonextent target {".".join(atoms[:i])}!')
                module_itr = getattr(module_itr, atom)

            maybe_inferred_ts_type = torch._C._jit_try_infer_type(module_itr)
            if maybe_inferred_ts_type.success():
                python_type = _torchscript_type_to_python_type(maybe_inferred_ts_type.type())
                attr_proxy.node.type = python_type if not attr_proxy.node.type else attr_proxy.node.type

        return attr_proxy

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
import torch.fx
import inspect
from typing import Any, Dict, Optional, Tuple
from torch.fx.node import Argument, Target
from torch._jit_internal import boolean_dispatched
from torch.fx.operator_schemas import _torchscript_type_to_python_type

from torch.fx import Transformer

            

Reported by Pylint.

standard import "import inspect" should be placed before "import torch"
Error

Line: 3 Column: 1

              import torch
import torch.fx
import inspect
from typing import Any, Dict, Optional, Tuple
from torch.fx.node import Argument, Target
from torch._jit_internal import boolean_dispatched
from torch.fx.operator_schemas import _torchscript_type_to_python_type

from torch.fx import Transformer

            

Reported by Pylint.

standard import "from typing import Any, Dict, Optional, Tuple" should be placed before "import torch"
Error

Line: 4 Column: 1

              import torch
import torch.fx
import inspect
from typing import Any, Dict, Optional, Tuple
from torch.fx.node import Argument, Target
from torch._jit_internal import boolean_dispatched
from torch.fx.operator_schemas import _torchscript_type_to_python_type

from torch.fx import Transformer

            

Reported by Pylint.

Line too long (102/100)
Error

Line: 42 Column: 1

                          target_for_analysis = target
            if target in boolean_dispatched:
                # HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
                # a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
                # branches of the dispatch have exactly the same signature. If they do, use the `true`
                # branch signature for analysis. Otherwise, leave this un-normalized
                assert not isinstance(target, str)
                dispatched = boolean_dispatched[target]
                if_true, if_false = dispatched['if_true'], dispatched['if_false']

            

Reported by Pylint.

Line too long (102/100)
Error

Line: 43 Column: 1

                          if target in boolean_dispatched:
                # HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
                # a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
                # branches of the dispatch have exactly the same signature. If they do, use the `true`
                # branch signature for analysis. Otherwise, leave this un-normalized
                assert not isinstance(target, str)
                dispatched = boolean_dispatched[target]
                if_true, if_false = dispatched['if_true'], dispatched['if_false']
                # TODO: can we emit the union of these? What are the implications on TorchScript

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 45
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                              # a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
                # branches of the dispatch have exactly the same signature. If they do, use the `true`
                # branch signature for analysis. Otherwise, leave this un-normalized
                assert not isinstance(target, str)
                dispatched = boolean_dispatched[target]
                if_true, if_false = dispatched['if_true'], dispatched['if_false']
                # TODO: can we emit the union of these? What are the implications on TorchScript
                # compilation?
                if inspect.signature(if_true).return_annotation != inspect.signature(if_false).return_annotation:

            

Reported by Bandit.

Line too long (113/100)
Error

Line: 50 Column: 1

                              if_true, if_false = dispatched['if_true'], dispatched['if_false']
                # TODO: can we emit the union of these? What are the implications on TorchScript
                # compilation?
                if inspect.signature(if_true).return_annotation != inspect.signature(if_false).return_annotation:
                    return super().call_function(target, args, kwargs)
                target_for_analysis = if_true

            python_ret_type = self._extract_python_return_type(target_for_analysis)


            

Reported by Pylint.

torch/optim/asgd.py
20 issues
Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import math
import torch
from . import _functional as F
from .optimizer import Optimizer


class ASGD(Optimizer):
    """Implements Averaged Stochastic Gradient Descent.


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 4 Column: 1

              import math
import torch
from . import _functional as F
from .optimizer import Optimizer


class ASGD(Optimizer):
    """Implements Averaged Stochastic Gradient Descent.


            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 70 Column: 39

                                      state['step'] = 0
                        state['eta'] = group['lr']
                        state['mu'] = 1
                        state['ax'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    mus.append(state['mu'])
                    axs.append(state['ax'])
                    etas.append(state['eta'])


            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 70 Column: 73

                                      state['step'] = 0
                        state['eta'] = group['lr']
                        state['mu'] = 1
                        state['ax'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                    mus.append(state['mu'])
                    axs.append(state['ax'])
                    etas.append(state['eta'])


            

Reported by Pylint.

Unused variable 'mu'
Error

Line: 88 Column: 20

                                 lambd=group['lambd'])

            # update eta and mu
            for p, mu, eta in zip(params_with_grad, mus, etas):
                state = self.state[p]
                state['eta'] = (group['lr'] /
                                math.pow((1 + group['lambd'] * group['lr'] * state['step']), group['alpha']))
                state['mu'] = 1 / max(1, state['step'] - group['t0'])


            

Reported by Pylint.

Unused variable 'eta'
Error

Line: 88 Column: 24

                                 lambd=group['lambd'])

            # update eta and mu
            for p, mu, eta in zip(params_with_grad, mus, etas):
                state = self.state[p]
                state['eta'] = (group['lr'] /
                                math.pow((1 + group['lambd'] * group['lr'] * state['step']), group['alpha']))
                state['mu'] = 1 / max(1, state['step'] - group['t0'])


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import math
import torch
from . import _functional as F
from .optimizer import Optimizer


class ASGD(Optimizer):
    """Implements Averaged Stochastic Gradient Descent.


            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 7 Column: 1

              from .optimizer import Optimizer


class ASGD(Optimizer):
    """Implements Averaged Stochastic Gradient Descent.

    It has been proposed in `Acceleration of stochastic approximation by
    averaging`_.


            

Reported by Pylint.

Too many arguments (7/5)
Error

Line: 26 Column: 5

                      https://dl.acm.org/citation.cfm?id=131098
    """

    def __init__(self, params, lr=1e-2, lambd=1e-4, alpha=0.75, t0=1e6, weight_decay=0):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= weight_decay:
            raise ValueError("Invalid weight_decay value: {}".format(weight_decay))


            

Reported by Pylint.

Argument name "lr" doesn't conform to snake_case naming style
Error

Line: 26 Column: 5

                      https://dl.acm.org/citation.cfm?id=131098
    """

    def __init__(self, params, lr=1e-2, lambd=1e-4, alpha=0.75, t0=1e6, weight_decay=0):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= weight_decay:
            raise ValueError("Invalid weight_decay value: {}".format(weight_decay))


            

Reported by Pylint.

torch/optim/adadelta.py
20 issues
Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import torch

from . import _functional as F
from .optimizer import Optimizer


class Adadelta(Optimizer):
    """Implements Adadelta algorithm.


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 4 Column: 1

              import torch

from . import _functional as F
from .optimizer import Optimizer


class Adadelta(Optimizer):
    """Implements Adadelta algorithm.


            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 72 Column: 77

                              # Lazy state initialization
                if len(state) == 0:
                    state['step'] = 0
                    state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    state['acc_delta'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                square_avgs.append(state['square_avg'])
                acc_deltas.append(state['acc_delta'])


            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 72 Column: 43

                              # Lazy state initialization
                if len(state) == 0:
                    state['step'] = 0
                    state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    state['acc_delta'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                square_avgs.append(state['square_avg'])
                acc_deltas.append(state['acc_delta'])


            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 73 Column: 76

                              if len(state) == 0:
                    state['step'] = 0
                    state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    state['acc_delta'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                square_avgs.append(state['square_avg'])
                acc_deltas.append(state['acc_delta'])

                state['step'] += 1

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 73 Column: 42

                              if len(state) == 0:
                    state['step'] = 0
                    state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
                    state['acc_delta'] = torch.zeros_like(p, memory_format=torch.preserve_format)

                square_avgs.append(state['square_avg'])
                acc_deltas.append(state['acc_delta'])

                state['step'] += 1

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch

from . import _functional as F
from .optimizer import Optimizer


class Adadelta(Optimizer):
    """Implements Adadelta algorithm.


            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 7 Column: 1

              from .optimizer import Optimizer


class Adadelta(Optimizer):
    """Implements Adadelta algorithm.

    It has been proposed in `ADADELTA: An Adaptive Learning Rate Method`__.

    Args:

            

Reported by Pylint.

Too many arguments (6/5)
Error

Line: 26 Column: 5

                  __ https://arxiv.org/abs/1212.5701
    """

    def __init__(self, params, lr=1.0, rho=0.9, eps=1e-6, weight_decay=0):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= rho <= 1.0:
            raise ValueError("Invalid rho value: {}".format(rho))
        if not 0.0 <= eps:

            

Reported by Pylint.

Argument name "lr" doesn't conform to snake_case naming style
Error

Line: 26 Column: 5

                  __ https://arxiv.org/abs/1212.5701
    """

    def __init__(self, params, lr=1.0, rho=0.9, eps=1e-6, weight_decay=0):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= rho <= 1.0:
            raise ValueError("Invalid rho value: {}".format(rho))
        if not 0.0 <= eps:

            

Reported by Pylint.