The following issues were found

torch/package/_digraph.py
10 issues
Missing module docstring
Error

Line: 1 Column: 1

              class DiGraph:
    """Really simple unweighted directed graph data structure to track dependencies.

    The API is pretty much the same as networkx so if you add something just
    copy their API.
    """

    def __init__(self):
        # Dict of node -> dict of arbitrary attributes

            

Reported by Pylint.

Argument name "n" doesn't conform to snake_case naming style
Error

Line: 17 Column: 5

                      # Nested dict of node -> predecessor node -> nothing.
        self._pred = {}

    def add_node(self, n, **kwargs):
        """Add a node to the graph.

        Args:
            n: the node. Can we any object that is a valid dict key.
            **kwargs: any attributes you want to attach to the node.

            

Reported by Pylint.

Argument name "u" doesn't conform to snake_case naming style
Error

Line: 31 Column: 5

                      else:
            self._node[n].update(kwargs)

    def add_edge(self, u, v):
        """Add an edge to graph between nodes ``u`` and ``v``

        ``u`` and ``v`` will be created if they do not already exist.
        """
        # add nodes

            

Reported by Pylint.

Argument name "v" doesn't conform to snake_case naming style
Error

Line: 31 Column: 5

                      else:
            self._node[n].update(kwargs)

    def add_edge(self, u, v):
        """Add an edge to graph between nodes ``u`` and ``v``

        ``u`` and ``v`` will be created if they do not already exist.
        """
        # add nodes

            

Reported by Pylint.

Argument name "n" doesn't conform to snake_case naming style
Error

Line: 50 Column: 5

                      self._succ[u][v] = True
        self._pred[v][u] = True

    def successors(self, n):
        """Returns an iterator over successor nodes of n."""
        try:
            return iter(self._succ[n])
        except KeyError as e:
            raise ValueError(f"The node {n} is not in the digraph.") from e

            

Reported by Pylint.

Variable name "e" doesn't conform to snake_case naming style
Error

Line: 54 Column: 9

                      """Returns an iterator over successor nodes of n."""
        try:
            return iter(self._succ[n])
        except KeyError as e:
            raise ValueError(f"The node {n} is not in the digraph.") from e

    def predecessors(self, n):
        """Returns an iterator over predecessors nodes of n."""
        try:

            

Reported by Pylint.

Argument name "n" doesn't conform to snake_case naming style
Error

Line: 57 Column: 5

                      except KeyError as e:
            raise ValueError(f"The node {n} is not in the digraph.") from e

    def predecessors(self, n):
        """Returns an iterator over predecessors nodes of n."""
        try:
            return iter(self._pred[n])
        except KeyError as e:
            raise ValueError(f"The node {n} is not in the digraph.") from e

            

Reported by Pylint.

Variable name "e" doesn't conform to snake_case naming style
Error

Line: 61 Column: 9

                      """Returns an iterator over predecessors nodes of n."""
        try:
            return iter(self._pred[n])
        except KeyError as e:
            raise ValueError(f"The node {n} is not in the digraph.") from e

    @property
    def edges(self):
        """Returns an iterator over all edges (u, v) in the graph"""

            

Reported by Pylint.

Variable name "n" doesn't conform to snake_case naming style
Error

Line: 67 Column: 13

                  @property
    def edges(self):
        """Returns an iterator over all edges (u, v) in the graph"""
        for n, successors in self._succ.items():
            for succ in successors:
                yield n, succ

    @property
    def nodes(self):

            

Reported by Pylint.

Argument name "n" doesn't conform to snake_case naming style
Error

Line: 80 Column: 5

                      """Iterate over the nodes."""
        return iter(self._node)

    def __contains__(self, n):
        """Returns True if ``n`` is a node in the graph, False otherwise."""
        try:
            return n in self._node
        except TypeError:
            return False

            

Reported by Pylint.

torch/optim/sgd.py
10 issues
Attempted relative import beyond top-level package
Error

Line: 2 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer, required


class SGD(Optimizer):
    r"""Implements stochastic gradient descent (optionally with momentum).

    Nesterov momentum is based on the formula from

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer, required


class SGD(Optimizer):
    r"""Implements stochastic gradient descent (optionally with momentum).

    Nesterov momentum is based on the formula from

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
from . import _functional as F
from .optimizer import Optimizer, required


class SGD(Optimizer):
    r"""Implements stochastic gradient descent (optionally with momentum).

    Nesterov momentum is based on the formula from

            

Reported by Pylint.

Too many arguments (7/5)
Error

Line: 56 Column: 5

                      The Nesterov version is analogously modified.
    """

    def __init__(self, params, lr=required, momentum=0, dampening=0,
                 weight_decay=0, nesterov=False):
        if lr is not required and lr < 0.0:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if momentum < 0.0:
            raise ValueError("Invalid momentum value: {}".format(momentum))

            

Reported by Pylint.

Argument name "lr" doesn't conform to snake_case naming style
Error

Line: 56 Column: 5

                      The Nesterov version is analogously modified.
    """

    def __init__(self, params, lr=required, momentum=0, dampening=0,
                 weight_decay=0, nesterov=False):
        if lr is not required and lr < 0.0:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if momentum < 0.0:
            raise ValueError("Invalid momentum value: {}".format(momentum))

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 69 Column: 9

                                      weight_decay=weight_decay, nesterov=nesterov)
        if nesterov and (momentum <= 0 or dampening != 0):
            raise ValueError("Nesterov momentum requires a momentum and zero dampening")
        super(SGD, self).__init__(params, defaults)

    def __setstate__(self, state):
        super(SGD, self).__setstate__(state)
        for group in self.param_groups:
            group.setdefault('nesterov', False)

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 72 Column: 9

                      super(SGD, self).__init__(params, defaults)

    def __setstate__(self, state):
        super(SGD, self).__setstate__(state)
        for group in self.param_groups:
            group.setdefault('nesterov', False)

    @torch.no_grad()
    def step(self, closure=None):

            

Reported by Pylint.

Variable name "lr" doesn't conform to snake_case naming style
Error

Line: 97 Column: 13

                          momentum = group['momentum']
            dampening = group['dampening']
            nesterov = group['nesterov']
            lr = group['lr']

            for p in group['params']:
                if p.grad is not None:
                    params_with_grad.append(p)
                    d_p_list.append(p.grad)

            

Reported by Pylint.

Variable name "p" doesn't conform to snake_case naming style
Error

Line: 99 Column: 17

                          nesterov = group['nesterov']
            lr = group['lr']

            for p in group['params']:
                if p.grad is not None:
                    params_with_grad.append(p)
                    d_p_list.append(p.grad)

                    state = self.state[p]

            

Reported by Pylint.

Variable name "p" doesn't conform to snake_case naming style
Error

Line: 120 Column: 17

                                nesterov=nesterov)

            # update momentum_buffers in state
            for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
                state = self.state[p]
                state['momentum_buffer'] = momentum_buffer

        return loss

            

Reported by Pylint.

torch/nn/utils/__init__.py
10 issues
Unable to import '__init__.clip_grad'
Error

Line: 2 Column: 1

              from . import rnn
from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
from .weight_norm import weight_norm, remove_weight_norm
from .convert_parameters import parameters_to_vector, vector_to_parameters
from .spectral_norm import spectral_norm, remove_spectral_norm
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
from .memory_format import convert_conv2d_weight_memory_format
from . import parametrizations
from .init import skip_init

            

Reported by Pylint.

Unable to import '__init__.weight_norm'
Error

Line: 3 Column: 1

              from . import rnn
from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
from .weight_norm import weight_norm, remove_weight_norm
from .convert_parameters import parameters_to_vector, vector_to_parameters
from .spectral_norm import spectral_norm, remove_spectral_norm
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
from .memory_format import convert_conv2d_weight_memory_format
from . import parametrizations
from .init import skip_init

            

Reported by Pylint.

Unable to import '__init__.convert_parameters'
Error

Line: 4 Column: 1

              from . import rnn
from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
from .weight_norm import weight_norm, remove_weight_norm
from .convert_parameters import parameters_to_vector, vector_to_parameters
from .spectral_norm import spectral_norm, remove_spectral_norm
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
from .memory_format import convert_conv2d_weight_memory_format
from . import parametrizations
from .init import skip_init

            

Reported by Pylint.

Unable to import '__init__.spectral_norm'
Error

Line: 5 Column: 1

              from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
from .weight_norm import weight_norm, remove_weight_norm
from .convert_parameters import parameters_to_vector, vector_to_parameters
from .spectral_norm import spectral_norm, remove_spectral_norm
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
from .memory_format import convert_conv2d_weight_memory_format
from . import parametrizations
from .init import skip_init

            

Reported by Pylint.

Unable to import '__init__.fusion'
Error

Line: 6 Column: 1

              from .weight_norm import weight_norm, remove_weight_norm
from .convert_parameters import parameters_to_vector, vector_to_parameters
from .spectral_norm import spectral_norm, remove_spectral_norm
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
from .memory_format import convert_conv2d_weight_memory_format
from . import parametrizations
from .init import skip_init

            

Reported by Pylint.

Unable to import '__init__.memory_format'
Error

Line: 7 Column: 1

              from .convert_parameters import parameters_to_vector, vector_to_parameters
from .spectral_norm import spectral_norm, remove_spectral_norm
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
from .memory_format import convert_conv2d_weight_memory_format
from . import parametrizations
from .init import skip_init

            

Reported by Pylint.

Unable to import '__init__.init'
Error

Line: 9 Column: 1

              from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
from .memory_format import convert_conv2d_weight_memory_format
from . import parametrizations
from .init import skip_init

            

Reported by Pylint.

Module import itself
Error

Line: 1 Column: 1

              from . import rnn
from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
from .weight_norm import weight_norm, remove_weight_norm
from .convert_parameters import parameters_to_vector, vector_to_parameters
from .spectral_norm import spectral_norm, remove_spectral_norm
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
from .memory_format import convert_conv2d_weight_memory_format
from . import parametrizations
from .init import skip_init

            

Reported by Pylint.

Module import itself
Error

Line: 8 Column: 1

              from .spectral_norm import spectral_norm, remove_spectral_norm
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
from .memory_format import convert_conv2d_weight_memory_format
from . import parametrizations
from .init import skip_init

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from . import rnn
from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
from .weight_norm import weight_norm, remove_weight_norm
from .convert_parameters import parameters_to_vector, vector_to_parameters
from .spectral_norm import spectral_norm, remove_spectral_norm
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
from .memory_format import convert_conv2d_weight_memory_format
from . import parametrizations
from .init import skip_init

            

Reported by Pylint.

torch/package/_directory_reader.py
10 issues
Missing module docstring
Error

Line: 1 Column: 1

              import os.path
from glob import glob
from typing import Any, List

import torch

_storages: List[Any] = [
    torch.DoubleStorage,
    torch.FloatStorage,

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 20 Column: 1

              _dtype_to_storage = {data_type(0).dtype: data_type for data_type in _storages}

# because get_storage_from_record returns a tensor!?
class _HasStorage(object):
    def __init__(self, storage):
        self._storage = storage

    def storage(self):
        return self._storage

            

Reported by Pylint.

Class '_HasStorage' inherits from object, can be safely removed from bases in python3
Error

Line: 20 Column: 1

              _dtype_to_storage = {data_type(0).dtype: data_type for data_type in _storages}

# because get_storage_from_record returns a tensor!?
class _HasStorage(object):
    def __init__(self, storage):
        self._storage = storage

    def storage(self):
        return self._storage

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 24 Column: 5

                  def __init__(self, storage):
        self._storage = storage

    def storage(self):
        return self._storage


class DirectoryReader(object):
    """

            

Reported by Pylint.

Class 'DirectoryReader' inherits from object, can be safely removed from bases in python3
Error

Line: 28 Column: 1

                      return self._storage


class DirectoryReader(object):
    """
    Class to allow PackageImporter to operate on unzipped packages. Methods
    copy the behavior of the internal PyTorchFileReader class (which is used for
    accessing packages in all other cases).


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 41 Column: 5

                  def __init__(self, directory):
        self.directory = directory

    def get_record(self, name):
        filename = f"{self.directory}/{name}"
        with open(filename, "rb") as f:
            return f.read()

    def get_storage_from_record(self, name, numel, dtype):

            

Reported by Pylint.

Variable name "f" doesn't conform to snake_case naming style
Error

Line: 43 Column: 38

              
    def get_record(self, name):
        filename = f"{self.directory}/{name}"
        with open(filename, "rb") as f:
            return f.read()

    def get_storage_from_record(self, name, numel, dtype):
        storage = _dtype_to_storage[dtype]
        filename = f"{self.directory}/{name}"

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 46 Column: 5

                      with open(filename, "rb") as f:
            return f.read()

    def get_storage_from_record(self, name, numel, dtype):
        storage = _dtype_to_storage[dtype]
        filename = f"{self.directory}/{name}"
        return _HasStorage(storage.from_file(filename=filename, size=numel))

    def has_record(self, path):

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 51 Column: 5

                      filename = f"{self.directory}/{name}"
        return _HasStorage(storage.from_file(filename=filename, size=numel))

    def has_record(self, path):
        full_path = os.path.join(self.directory, path)
        return os.path.isfile(full_path)

    def get_all_records(
        self,

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 55 Column: 5

                      full_path = os.path.join(self.directory, path)
        return os.path.isfile(full_path)

    def get_all_records(
        self,
    ):
        files = []
        for filename in glob(f"{self.directory}/**", recursive=True):
            if not os.path.isdir(filename):

            

Reported by Pylint.

torch/nn/parallel/parallel_apply.py
10 issues
Module 'torch' has no 'is_grad_enabled' member
Error

Line: 50 Column: 38

                  devices = [_get_device_index(x, True) for x in devices]
    lock = threading.Lock()
    results = {}
    grad_enabled, autocast_enabled = torch.is_grad_enabled(), torch.is_autocast_enabled()

    def _worker(i, module, input, kwargs, device=None):
        torch.set_grad_enabled(grad_enabled)
        if device is None:
            device = get_a_var(input).get_device()

            

Reported by Pylint.

Module 'torch' has no 'is_autocast_enabled' member
Error

Line: 50 Column: 63

                  devices = [_get_device_index(x, True) for x in devices]
    lock = threading.Lock()
    results = {}
    grad_enabled, autocast_enabled = torch.is_grad_enabled(), torch.is_autocast_enabled()

    def _worker(i, module, input, kwargs, device=None):
        torch.set_grad_enabled(grad_enabled)
        if device is None:
            device = get_a_var(input).get_device()

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 52 Column: 28

                  results = {}
    grad_enabled, autocast_enabled = torch.is_grad_enabled(), torch.is_autocast_enabled()

    def _worker(i, module, input, kwargs, device=None):
        torch.set_grad_enabled(grad_enabled)
        if device is None:
            device = get_a_var(input).get_device()
        try:
            with torch.cuda.device(device), autocast(enabled=autocast_enabled):

            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 64 Column: 16

                              output = module(*input, **kwargs)
            with lock:
                results[i] = output
        except Exception:
            with lock:
                results[i] = ExceptionWrapper(
                    where="in replica {} on device {}".format(i, device))

    if len(modules) > 1:

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import threading
import torch
from torch.cuda._utils import _get_device_index
from torch.cuda.amp import autocast
from torch._utils import ExceptionWrapper


def get_a_var(obj):
    if isinstance(obj, torch.Tensor):

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 8 Column: 1

              from torch._utils import ExceptionWrapper


def get_a_var(obj):
    if isinstance(obj, torch.Tensor):
        return obj

    if isinstance(obj, list) or isinstance(obj, tuple):
        for result in map(get_a_var, obj):

            

Reported by Pylint.

Consider merging these isinstance calls to isinstance(obj, (list, tuple))
Error

Line: 12 Column: 8

                  if isinstance(obj, torch.Tensor):
        return obj

    if isinstance(obj, list) or isinstance(obj, tuple):
        for result in map(get_a_var, obj):
            if isinstance(result, torch.Tensor):
                return result
    if isinstance(obj, dict):
        for result in map(get_a_var, obj.items()):

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 38
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                  element of :attr:`inputs` can either be a single object as the only argument
    to a module, or a collection of positional arguments.
    """
    assert len(modules) == len(inputs)
    if kwargs_tup is not None:
        assert len(modules) == len(kwargs_tup)
    else:
        kwargs_tup = ({},) * len(modules)
    if devices is not None:

            

Reported by Bandit.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 40
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                  """
    assert len(modules) == len(inputs)
    if kwargs_tup is not None:
        assert len(modules) == len(kwargs_tup)
    else:
        kwargs_tup = ({},) * len(modules)
    if devices is not None:
        assert len(modules) == len(devices)
    else:

            

Reported by Bandit.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 44
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                  else:
        kwargs_tup = ({},) * len(modules)
    if devices is not None:
        assert len(modules) == len(devices)
    else:
        devices = [None] * len(modules)
    devices = [_get_device_index(x, True) for x in devices]
    lock = threading.Lock()
    results = {}

            

Reported by Bandit.

torch/nn/intrinsic/quantized/_reference/modules/conv_relu.py
10 issues
Module 'torch' has no 'quantize_per_tensor' member
Error

Line: 16 Column: 18

                          self._conv1d_padding, self._conv1d_dilation, self.groups)  # type: ignore[has-type]
        float_result = F.relu(float_result, inplace=True)
        # NEEDFIX: we don't have dtype in the Linear module APIs right now!
        result = torch.quantize_per_tensor(
            float_result, self.scale, self.zero_point, torch.quint8)
        return result

    def _get_name(self):
        return "QuantizedConvReLU1d(Reference)"

            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 17 Column: 56

                      float_result = F.relu(float_result, inplace=True)
        # NEEDFIX: we don't have dtype in the Linear module APIs right now!
        result = torch.quantize_per_tensor(
            float_result, self.scale, self.zero_point, torch.quint8)
        return result

    def _get_name(self):
        return "QuantizedConvReLU1d(Reference)"


            

Reported by Pylint.

Module 'torch' has no 'quantize_per_tensor' member
Error

Line: 35 Column: 18

                          self.padding, self.dilation, self.groups)
        float_result = F.relu(float_result, inplace=True)
        # NEEDFIX: we don't have dtype in the Linear module APIs right now!
        result = torch.quantize_per_tensor(
            float_result, self.scale, self.zero_point, torch.quint8)
        return result

    def _get_name(self):
        return "QuantizedConvReLU2d(Reference)"

            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 36 Column: 56

                      float_result = F.relu(float_result, inplace=True)
        # NEEDFIX: we don't have dtype in the Linear module APIs right now!
        result = torch.quantize_per_tensor(
            float_result, self.scale, self.zero_point, torch.quint8)
        return result

    def _get_name(self):
        return "QuantizedConvReLU2d(Reference)"


            

Reported by Pylint.

Module 'torch' has no 'quantize_per_tensor' member
Error

Line: 53 Column: 18

                          self.padding, self.dilation, self.groups)
        float_result = F.relu(float_result, inplace=True)
        # NEEDFIX: we don't have dtype in the Linear module APIs right now!
        result = torch.quantize_per_tensor(
            float_result, self.scale, self.zero_point, torch.quint8)
        return result

    def _get_name(self):
        return "QuantizedConvReLU3d(Reference)"

            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 54 Column: 56

                      float_result = F.relu(float_result, inplace=True)
        # NEEDFIX: we don't have dtype in the Linear module APIs right now!
        result = torch.quantize_per_tensor(
            float_result, self.scale, self.zero_point, torch.quint8)
        return result

    def _get_name(self):
        return "QuantizedConvReLU3d(Reference)"

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
import torch.nn.quantized._reference as nnqr
import torch.nn.functional as F

class ConvReLU1d(nnqr.Conv1d):
    _FLOAT_MODULE = torch.nn.intrinsic.ConvReLU1d

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x_dequant = x.dequantize()

            

Reported by Pylint.

Missing class docstring
Error

Line: 5 Column: 1

              import torch.nn.quantized._reference as nnqr
import torch.nn.functional as F

class ConvReLU1d(nnqr.Conv1d):
    _FLOAT_MODULE = torch.nn.intrinsic.ConvReLU1d

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x_dequant = x.dequantize()
        weight_dequant = self._qweight.dequantize()

            

Reported by Pylint.

Missing class docstring
Error

Line: 24 Column: 1

                      return "QuantizedConvReLU1d(Reference)"


class ConvReLU2d(nnqr.Conv2d):
    _FLOAT_MODULE = torch.nn.intrinsic.ConvReLU2d

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x_dequant = x.dequantize()
        weight_dequant = self._qweight.dequantize()

            

Reported by Pylint.

Missing class docstring
Error

Line: 42 Column: 1

                  def _get_name(self):
        return "QuantizedConvReLU2d(Reference)"

class ConvReLU3d(nnqr.Conv3d):
    _FLOAT_MODULE = torch.nn.intrinsic.ConvReLU3d

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x_dequant = x.dequantize()
        weight_dequant = self._qweight.dequantize()

            

Reported by Pylint.

torch/multiprocessing/pool.py
10 issues
Attempted relative import beyond top-level package
Error

Line: 4 Column: 1

              import multiprocessing.pool
import multiprocessing.util as util

from .queue import SimpleQueue


def clean_worker(*args, **kwargs):
    import gc
    multiprocessing.pool.worker(*args, **kwargs)

            

Reported by Pylint.

No value for argument 'ctx' in staticmethod call
Error

Line: 38 Column: 17

                                  self._initargs, self._maxtasksperchild)
            if hasattr(self, '_wrap_exception'):
                args += (self._wrap_exception,)
            w = self.Process(target=clean_worker, args=args)
            self._pool.append(w)
            w.name = w.name.replace('Process', 'PoolWorker')
            w.daemon = True
            w.start()
            util.debug('added worker')

            

Reported by Pylint.

Method '__reduce__' is abstract in class 'Pool' but is not overridden
Error

Line: 16 Column: 1

                  gc.collect()


class Pool(multiprocessing.pool.Pool):
    """Pool implementation which uses our version of SimpleQueue.
    This lets us pass tensors in shared memory across processes instead of
    serializing the underlying data."""

    def _setup_queues(self):

            

Reported by Pylint.

Access to a protected member _writer of a client class
Error

Line: 24 Column: 27

                  def _setup_queues(self):
        self._inqueue = SimpleQueue()
        self._outqueue = SimpleQueue()
        self._quick_put = self._inqueue._writer.send
        self._quick_get = self._outqueue._reader.recv

    def _repopulate_pool(self):
        """Bring the number of pool processes up to the specified number,
        for use after reaping workers which have exited.

            

Reported by Pylint.

Access to a protected member _reader of a client class
Error

Line: 25 Column: 27

                      self._inqueue = SimpleQueue()
        self._outqueue = SimpleQueue()
        self._quick_put = self._inqueue._writer.send
        self._quick_get = self._outqueue._reader.recv

    def _repopulate_pool(self):
        """Bring the number of pool processes up to the specified number,
        for use after reaping workers which have exited.
        """

            

Reported by Pylint.

Unused variable 'i'
Error

Line: 31 Column: 13

                      """Bring the number of pool processes up to the specified number,
        for use after reaping workers which have exited.
        """
        for i in range(self._processes - len(self._pool)):
            # changed worker -> clean_worker
            args = (self._inqueue, self._outqueue,
                    self._initializer,
                    self._initargs, self._maxtasksperchild)
            if hasattr(self, '_wrap_exception'):

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import multiprocessing.pool
import multiprocessing.util as util

from .queue import SimpleQueue


def clean_worker(*args, **kwargs):
    import gc
    multiprocessing.pool.worker(*args, **kwargs)

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 7 Column: 1

              from .queue import SimpleQueue


def clean_worker(*args, **kwargs):
    import gc
    multiprocessing.pool.worker(*args, **kwargs)
    # Regular multiprocessing workers don't fully clean up after themselves,
    # so we have to explicitly trigger garbage collection to make sure that all
    # destructors are called...

            

Reported by Pylint.

Import outside toplevel (gc)
Error

Line: 8 Column: 5

              

def clean_worker(*args, **kwargs):
    import gc
    multiprocessing.pool.worker(*args, **kwargs)
    # Regular multiprocessing workers don't fully clean up after themselves,
    # so we have to explicitly trigger garbage collection to make sure that all
    # destructors are called...
    gc.collect()

            

Reported by Pylint.

Variable name "w" doesn't conform to snake_case naming style
Error

Line: 38 Column: 13

                                  self._initargs, self._maxtasksperchild)
            if hasattr(self, '_wrap_exception'):
                args += (self._wrap_exception,)
            w = self.Process(target=clean_worker, args=args)
            self._pool.append(w)
            w.name = w.name.replace('Process', 'PoolWorker')
            w.daemon = True
            w.start()
            util.debug('added worker')

            

Reported by Pylint.

torch/utils/benchmark/examples/sparse/op_benchmark.py
10 issues
Unable to import 'torch'
Error

Line: 7 Column: 1

              """

import numpy as np
import torch

from torch.utils.benchmark import Timer
from torch.utils.benchmark.op_fuzzers.sparse_unary import UnaryOpSparseFuzzer
from torch.utils.benchmark.op_fuzzers.sparse_binary import BinaryOpSparseFuzzer


            

Reported by Pylint.

Unable to import 'torch.utils.benchmark'
Error

Line: 9 Column: 1

              import numpy as np
import torch

from torch.utils.benchmark import Timer
from torch.utils.benchmark.op_fuzzers.sparse_unary import UnaryOpSparseFuzzer
from torch.utils.benchmark.op_fuzzers.sparse_binary import BinaryOpSparseFuzzer

_MEASURE_TIME = 1.0


            

Reported by Pylint.

Unable to import 'torch.utils.benchmark.op_fuzzers.sparse_unary'
Error

Line: 10 Column: 1

              import torch

from torch.utils.benchmark import Timer
from torch.utils.benchmark.op_fuzzers.sparse_unary import UnaryOpSparseFuzzer
from torch.utils.benchmark.op_fuzzers.sparse_binary import BinaryOpSparseFuzzer

_MEASURE_TIME = 1.0

def assert_dicts_equal(dict_0, dict_1):

            

Reported by Pylint.

Unable to import 'torch.utils.benchmark.op_fuzzers.sparse_binary'
Error

Line: 11 Column: 1

              
from torch.utils.benchmark import Timer
from torch.utils.benchmark.op_fuzzers.sparse_unary import UnaryOpSparseFuzzer
from torch.utils.benchmark.op_fuzzers.sparse_binary import BinaryOpSparseFuzzer

_MEASURE_TIME = 1.0

def assert_dicts_equal(dict_0, dict_1):
    """Builtin dict comparison will not compare numpy arrays.

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 21
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                      x = {"a": np.ones((2, 1))}
        x == x  # Raises ValueError
    """
    assert set(dict_0.keys()) == set(dict_0.keys())
    assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")

def run(n, stmt, fuzzer_cls):
    float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
    double_iter = fuzzer_cls(seed=0, dtype=torch.float64).take(n)

            

Reported by Bandit.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 22
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                      x == x  # Raises ValueError
    """
    assert set(dict_0.keys()) == set(dict_0.keys())
    assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")

def run(n, stmt, fuzzer_cls):
    float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
    double_iter = fuzzer_cls(seed=0, dtype=torch.float64).take(n)
    raw_results = []

            

Reported by Bandit.

Argument name "n" doesn't conform to snake_case naming style
Error

Line: 24 Column: 1

                  assert set(dict_0.keys()) == set(dict_0.keys())
    assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")

def run(n, stmt, fuzzer_cls):
    float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
    double_iter = fuzzer_cls(seed=0, dtype=torch.float64).take(n)
    raw_results = []
    for i, (float_values, int_values) in enumerate(zip(float_iter, double_iter)):
        float_tensors, float_tensor_params, float_params = float_values

            

Reported by Pylint.

Too many local variables (37/15)
Error

Line: 24 Column: 1

                  assert set(dict_0.keys()) == set(dict_0.keys())
    assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")

def run(n, stmt, fuzzer_cls):
    float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
    double_iter = fuzzer_cls(seed=0, dtype=torch.float64).take(n)
    raw_results = []
    for i, (float_values, int_values) in enumerate(zip(float_iter, double_iter)):
        float_tensors, float_tensor_params, float_params = float_values

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 24 Column: 1

                  assert set(dict_0.keys()) == set(dict_0.keys())
    assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")

def run(n, stmt, fuzzer_cls):
    float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
    double_iter = fuzzer_cls(seed=0, dtype=torch.float64).take(n)
    raw_results = []
    for i, (float_values, int_values) in enumerate(zip(float_iter, double_iter)):
        float_tensors, float_tensor_params, float_params = float_values

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 91 Column: 1

                      print(spacer)


def main():
    run(n=100, stmt="torch.sparse.sum(x, dim=0)", fuzzer_cls=UnaryOpSparseFuzzer)
    run(n=100, stmt="torch.sparse.softmax(x, dim=0)", fuzzer_cls=UnaryOpSparseFuzzer)
    run(n=100, stmt="x + y", fuzzer_cls=BinaryOpSparseFuzzer)



            

Reported by Pylint.

torch/jit/_logging.py
10 issues
Access to a protected member _logging_set_logger of a client class
Error

Line: 5 Column: 14

              
add_stat_value = torch.ops.prim.AddStatValue

set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger

time_point = torch.ops.prim.TimePoint

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 5 Column: 14

              
add_stat_value = torch.ops.prim.AddStatValue

set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger

time_point = torch.ops.prim.TimePoint

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 6 Column: 17

              add_stat_value = torch.ops.prim.AddStatValue

set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger

time_point = torch.ops.prim.TimePoint

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 7 Column: 19

              
set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger

time_point = torch.ops.prim.TimePoint

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 8 Column: 14

              set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger

time_point = torch.ops.prim.TimePoint

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch

add_stat_value = torch.ops.prim.AddStatValue

set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger


            

Reported by Pylint.

Module 'torch._C' has no '_logging_set_logger' member, but source is unavailable. Consider adding this module to extension-pkg-whitelist if you want to perform analysis based on run-time introspection of living objects.
Error

Line: 5 Column: 14

              
add_stat_value = torch.ops.prim.AddStatValue

set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger

time_point = torch.ops.prim.TimePoint

            

Reported by Pylint.

Module 'torch._C' has no 'LockingLogger' member, but source is unavailable. Consider adding this module to extension-pkg-whitelist if you want to perform analysis based on run-time introspection of living objects.
Error

Line: 6 Column: 17

              add_stat_value = torch.ops.prim.AddStatValue

set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger

time_point = torch.ops.prim.TimePoint

            

Reported by Pylint.

Module 'torch._C' has no 'AggregationType' member, but source is unavailable. Consider adding this module to extension-pkg-whitelist if you want to perform analysis based on run-time introspection of living objects.
Error

Line: 7 Column: 19

              
set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger

time_point = torch.ops.prim.TimePoint

            

Reported by Pylint.

Module 'torch._C' has no 'NoopLogger' member, but source is unavailable. Consider adding this module to extension-pkg-whitelist if you want to perform analysis based on run-time introspection of living objects.
Error

Line: 8 Column: 14

              set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger

time_point = torch.ops.prim.TimePoint

            

Reported by Pylint.

torch/fx/passes/tools_common.py
10 issues
Missing module docstring
Error

Line: 1 Column: 1

              from typing import List, Tuple, Union, Dict, Any, Set
from dataclasses import dataclass

import torch
import torch.fx
from torch.fx.node import _get_qualified_name


Tensors = Union[Tuple[torch.Tensor], List[torch.Tensor]]

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 32
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                  "torch". e.g. _VariableFunctionsClass.relu would become torch.relu.
    """

    assert node.op in CALLABLE_NODE_OPS, (
        "Expect op types of " + ", ".join(CALLABLE_NODE_OPS) + f", but found {node.op}"
    )

    if node.op == "call_module":
        assert isinstance(node.target, str)

            

Reported by Bandit.

Unnecessary "elif" after "return"
Error

Line: 36 Column: 5

                      "Expect op types of " + ", ".join(CALLABLE_NODE_OPS) + f", but found {node.op}"
    )

    if node.op == "call_module":
        assert isinstance(node.target, str)
        return torch.typename(submodules[node.target])
    elif node.op == "call_function":
        target: Any = node.target
        return (

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 37
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                  )

    if node.op == "call_module":
        assert isinstance(node.target, str)
        return torch.typename(submodules[node.target])
    elif node.op == "call_function":
        target: Any = node.target
        return (
            f"acc_ops.{target.__name__}"

            

Reported by Bandit.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 47
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                          else _get_qualified_name(target)
        )
    else:
        assert isinstance(node.target, str)
        return node.target


class FxNetAccFusionsFinder:
    """

            

Reported by Bandit.

Missing class docstring
Error

Line: 63 Column: 5

                      self.acc_nodes = acc_nodes

    @dataclass
    class FusionGroup:
        # The smallest idx of nodes in the fusion group after topological sorting all the nodes in the model.
        top_node_idx: int

        # Nodes in this fusion group.
        nodes: NodeSet

            

Reported by Pylint.

Line too long (109/100)
Error

Line: 64 Column: 1

              
    @dataclass
    class FusionGroup:
        # The smallest idx of nodes in the fusion group after topological sorting all the nodes in the model.
        top_node_idx: int

        # Nodes in this fusion group.
        nodes: NodeSet


            

Reported by Pylint.

Too many branches (17/12)
Error

Line: 125 Column: 5

              
        return False

    def __call__(self) -> Dict[torch.fx.Node, NodeSet]:
        result: Dict[torch.fx.Node, NodeSet] = {}
        acc_nodes = list(self.acc_nodes)

        for node in acc_nodes:
            if node in result:

            

Reported by Pylint.

Unnecessary parens after 'not' keyword
Error

Line: 175 Column: 1

                                  )
                    self.recursive_add_node(fusion_group, fusion_group.inputs)

            if not (set(fusion_group.nodes) <= self.acc_nodes):
                self.acc_nodes -= fusion_group.nodes
            else:
                for n in fusion_group.nodes:
                    result[n] = fusion_group.nodes


            

Reported by Pylint.

Variable name "n" doesn't conform to snake_case naming style
Error

Line: 178 Column: 21

                          if not (set(fusion_group.nodes) <= self.acc_nodes):
                self.acc_nodes -= fusion_group.nodes
            else:
                for n in fusion_group.nodes:
                    result[n] = fusion_group.nodes

        return result

            

Reported by Pylint.