The following issues were found

caffe2/python/operator_test/group_conv_test.py
17 issues
Unable to import 'hypothesis'
Error

Line: 6 Column: 1

              

import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st

from caffe2.proto import caffe2_pb2
from caffe2.python import core, utils
import caffe2.python.hip_test_util as hiputl

            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 7 Column: 1

              
import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st

from caffe2.proto import caffe2_pb2
from caffe2.python import core, utils
import caffe2.python.hip_test_util as hiputl
import caffe2.python.hypothesis_test_util as hu

            

Reported by Pylint.

TODO: Group conv in NHWC not implemented for GPU yet.
Error

Line: 43 Column: 3

                          if order == "NHWC":
                assume(group == 1 and engine != "CUDNN")
        else:
            # TODO: Group conv in NHWC not implemented for GPU yet.
            assume(group == 1 or order == "NCHW" or gc.device_type == caffe2_pb2.CPU)

            if group != 1 and order == "NHWC":
                dc = [d for d in dc if d.device_type == caffe2_pb2.CPU]


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              



import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st

from caffe2.proto import caffe2_pb2

            

Reported by Pylint.

standard import "import unittest" should be placed before "import numpy as np"
Error

Line: 14 Column: 1

              import caffe2.python.hip_test_util as hiputl
import caffe2.python.hypothesis_test_util as hu

import unittest

class TestGroupConvolution(hu.HypothesisTestCase):

    @given(stride=st.integers(1, 3),
           pad=st.integers(0, 3),

            

Reported by Pylint.

Missing class docstring
Error

Line: 16 Column: 1

              
import unittest

class TestGroupConvolution(hu.HypothesisTestCase):

    @given(stride=st.integers(1, 3),
           pad=st.integers(0, 3),
           kernel=st.integers(1, 5),
           size=st.integers(7, 10),

            

Reported by Pylint.

Too many local variables (22/15)
Error

Line: 33 Column: 5

                         use_bias=st.booleans(),
           **hu.gcs)
    @settings(max_examples=2, deadline=None)
    def test_group_convolution(
            self, stride, pad, kernel, size, group,
            input_channels_per_group, output_channels_per_group, batch_size,
            order, engine, use_bias, gc, dc):
        assume(size >= kernel)


            

Reported by Pylint.

Too many arguments (14/5)
Error

Line: 33 Column: 5

                         use_bias=st.booleans(),
           **hu.gcs)
    @settings(max_examples=2, deadline=None)
    def test_group_convolution(
            self, stride, pad, kernel, size, group,
            input_channels_per_group, output_channels_per_group, batch_size,
            order, engine, use_bias, gc, dc):
        assume(size >= kernel)


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 33 Column: 5

                         use_bias=st.booleans(),
           **hu.gcs)
    @settings(max_examples=2, deadline=None)
    def test_group_convolution(
            self, stride, pad, kernel, size, group,
            input_channels_per_group, output_channels_per_group, batch_size,
            order, engine, use_bias, gc, dc):
        assume(size >= kernel)


            

Reported by Pylint.

Argument name "dc" doesn't conform to snake_case naming style
Error

Line: 33 Column: 5

                         use_bias=st.booleans(),
           **hu.gcs)
    @settings(max_examples=2, deadline=None)
    def test_group_convolution(
            self, stride, pad, kernel, size, group,
            input_channels_per_group, output_channels_per_group, batch_size,
            order, engine, use_bias, gc, dc):
        assume(size >= kernel)


            

Reported by Pylint.

torch/distributed/optim/functional_rmsprop.py
17 issues
Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 75 Column: 37

                              if param not in self.state:
                    self.state[param] = {}
                    state = self.state[param]
                    state['step'] = torch.tensor(0.0)
                    state['square_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if momentum > 0:
                        state['momentum_buffer'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if self.centered:
                        state['grad_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 76 Column: 43

                                  self.state[param] = {}
                    state = self.state[param]
                    state['step'] = torch.tensor(0.0)
                    state['square_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if momentum > 0:
                        state['momentum_buffer'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if self.centered:
                        state['grad_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)


            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 76 Column: 81

                                  self.state[param] = {}
                    state = self.state[param]
                    state['step'] = torch.tensor(0.0)
                    state['square_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if momentum > 0:
                        state['momentum_buffer'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if self.centered:
                        state['grad_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)


            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 78 Column: 52

                                  state['step'] = torch.tensor(0.0)
                    state['square_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if momentum > 0:
                        state['momentum_buffer'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if self.centered:
                        state['grad_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)

                state = self.state[param]
                square_avgs.append(state['square_avg'])

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 78 Column: 90

                                  state['step'] = torch.tensor(0.0)
                    state['square_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if momentum > 0:
                        state['momentum_buffer'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if self.centered:
                        state['grad_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)

                state = self.state[param]
                square_avgs.append(state['square_avg'])

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 80 Column: 45

                                  if momentum > 0:
                        state['momentum_buffer'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if self.centered:
                        state['grad_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)

                state = self.state[param]
                square_avgs.append(state['square_avg'])
                if momentum > 0:
                    momentum_buffer_list.append(state['momentum_buffer'])

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 80 Column: 83

                                  if momentum > 0:
                        state['momentum_buffer'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if self.centered:
                        state['grad_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)

                state = self.state[param]
                square_avgs.append(state['square_avg'])
                if momentum > 0:
                    momentum_buffer_list.append(state['momentum_buffer'])

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from typing import List, Dict, Optional
import torch
import torch.optim._functional as F

from torch import Tensor

# Define a TorchScript compatible Functional RMSprop Optimizer
# where we use these optimizer in a functional way.
# Instead of using the `param.grad` when updating parameters,

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 17 Column: 1

              # NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalRMSprop(object):
    def __init__(
        self,
        params: List[Tensor],
        lr: float = 1e-2,
        alpha: float = 0.99,

            

Reported by Pylint.

Class '_FunctionalRMSprop' inherits from object, can be safely removed from bases in python3
Error

Line: 17 Column: 1

              # NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalRMSprop(object):
    def __init__(
        self,
        params: List[Tensor],
        lr: float = 1e-2,
        alpha: float = 0.99,

            

Reported by Pylint.

torch/distributed/pipeline/sync/dependency.py
17 issues
Attempted relative import beyond top-level package
Error

Line: 13 Column: 1

              import torch
from torch import Tensor

from .phony import get_phony

__all__: List[str] = []


def fork(input: Tensor) -> Tuple[Tensor, Tensor]:

            

Reported by Pylint.

Module 'torch' has no 'is_grad_enabled' member
Error

Line: 20 Column: 8

              
def fork(input: Tensor) -> Tuple[Tensor, Tensor]:
    """Branches out from an autograd lane of the given tensor."""
    if torch.is_grad_enabled() and input.requires_grad:
        input, phony = Fork.apply(input)
    else:
        phony = get_phony(input.device, requires_grad=False)

    return input, phony

            

Reported by Pylint.

Function 'input' has no 'device' member
Error

Line: 23 Column: 27

                  if torch.is_grad_enabled() and input.requires_grad:
        input, phony = Fork.apply(input)
    else:
        phony = get_phony(input.device, requires_grad=False)

    return input, phony


class Fork(torch.autograd.Function):

            

Reported by Pylint.

Module 'torch' has no 'is_grad_enabled' member
Error

Line: 41 Column: 8

              
def join(input: Tensor, phony: Tensor) -> Tensor:
    """Merges two autograd lanes."""
    if torch.is_grad_enabled() and (input.requires_grad or phony.requires_grad):
        input = Join.apply(input, phony)

    return input



            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 18 Column: 10

              __all__: List[str] = []


def fork(input: Tensor) -> Tuple[Tensor, Tensor]:
    """Branches out from an autograd lane of the given tensor."""
    if torch.is_grad_enabled() and input.requires_grad:
        input, phony = Fork.apply(input)
    else:
        phony = get_phony(input.device, requires_grad=False)

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 30 Column: 30

              
class Fork(torch.autograd.Function):
    @staticmethod
    def forward(ctx: "Fork", input: Tensor) -> Tuple[Tensor, Tensor]:  # type: ignore[override]
        phony = get_phony(input.device, requires_grad=False)
        return input.detach(), phony.detach()

    @staticmethod
    def backward(ctx: "Fork", grad_input: Tensor, grad_grad: Tensor) -> Tensor:  # type: ignore[override]

            

Reported by Pylint.

Parameters differ from overridden 'forward' method
Error

Line: 30 Column: 5

              
class Fork(torch.autograd.Function):
    @staticmethod
    def forward(ctx: "Fork", input: Tensor) -> Tuple[Tensor, Tensor]:  # type: ignore[override]
        phony = get_phony(input.device, requires_grad=False)
        return input.detach(), phony.detach()

    @staticmethod
    def backward(ctx: "Fork", grad_input: Tensor, grad_grad: Tensor) -> Tensor:  # type: ignore[override]

            

Reported by Pylint.

Parameters differ from overridden 'backward' method
Error

Line: 35 Column: 5

                      return input.detach(), phony.detach()

    @staticmethod
    def backward(ctx: "Fork", grad_input: Tensor, grad_grad: Tensor) -> Tensor:  # type: ignore[override]
        return grad_input


def join(input: Tensor, phony: Tensor) -> Tensor:
    """Merges two autograd lanes."""

            

Reported by Pylint.

Unused argument 'grad_grad'
Error

Line: 35 Column: 51

                      return input.detach(), phony.detach()

    @staticmethod
    def backward(ctx: "Fork", grad_input: Tensor, grad_grad: Tensor) -> Tensor:  # type: ignore[override]
        return grad_input


def join(input: Tensor, phony: Tensor) -> Tensor:
    """Merges two autograd lanes."""

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 39 Column: 10

                      return grad_input


def join(input: Tensor, phony: Tensor) -> Tensor:
    """Merges two autograd lanes."""
    if torch.is_grad_enabled() and (input.requires_grad or phony.requires_grad):
        input = Join.apply(input, phony)

    return input

            

Reported by Pylint.

torch/distributed/algorithms/ddp_comm_hooks/post_localSGD_hook.py
17 issues
Attempted relative import beyond top-level package
Error

Line: 6 Column: 1

              import torch
import torch.distributed as dist

from . import default_hooks as default


class PostLocalSGDState(object):
    r"""
    Stores the state for all-reducing gradients globally using ``process_group`` until step ``start_localSGD_iter``,

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 32 Column: 13

                      start_localSGD_iter,
    ):
        logging.info(
            "Local SGD will be started after {} iterations".format(start_localSGD_iter)
        )

        # The group used for all-reducing gradients globally.
        self.process_group = process_group
        # The group used for all-reducing gradients locally.

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 51 Column: 17

              
        if self.iter == self.start_localSGD_iter:
            logging.info(
                "Start to apply local SGD after {} iterations.".format(self.iter)
            )


def post_localSGD_hook(
    state: PostLocalSGDState, bucket: dist.GradBucket

            

Reported by Pylint.

Unused variable 'world_size'
Error

Line: 84 Column: 5

                  global_group_to_use = (
        state.process_group if state.process_group is not None else dist.group.WORLD
    )
    world_size = global_group_to_use.size()

    # The input tensor is a flattened 1D tensor.
    input_tensor = bucket.buffer()

    # Run allreduce using `global_group_to_use` in the first `start_localSGD_iter` iterations.

            

Reported by Pylint.

Access to a protected member _allreduce_fut of a client class
Error

Line: 92 Column: 16

                  # Run allreduce using `global_group_to_use` in the first `start_localSGD_iter` iterations.
    if state.iter < state.start_localSGD_iter:
        state.maybe_increase_iter(bucket)
        return default._allreduce_fut(global_group_to_use, input_tensor)

    # Run allreduce using `subgroup` after the first `start_localSGD_iter` iterations.
    # From this moment, model averaging should run after the optimizer step,
    # to globally allreduce all the parameters.
    if state.subgroup is None:

            

Reported by Pylint.

Access to a protected member _allreduce_fut of a client class
Error

Line: 99 Column: 12

                  # to globally allreduce all the parameters.
    if state.subgroup is None:
        state.subgroup, _ = dist.new_subgroups()
    return default._allreduce_fut(state.subgroup, input_tensor)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import logging

import torch
import torch.distributed as dist

from . import default_hooks as default


class PostLocalSGDState(object):

            

Reported by Pylint.

Module name "post_localSGD_hook" doesn't conform to snake_case naming style
Error

Line: 1 Column: 1

              import logging

import torch
import torch.distributed as dist

from . import default_hooks as default


class PostLocalSGDState(object):

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 9 Column: 1

              from . import default_hooks as default


class PostLocalSGDState(object):
    r"""
    Stores the state for all-reducing gradients globally using ``process_group`` until step ``start_localSGD_iter``,
    and all-reducing gradients locally using ``subgroup`` afterwards.

    If ``process_group`` is ``None``, the global process group will be used.

            

Reported by Pylint.

Class 'PostLocalSGDState' inherits from object, can be safely removed from bases in python3
Error

Line: 9 Column: 1

              from . import default_hooks as default


class PostLocalSGDState(object):
    r"""
    Stores the state for all-reducing gradients globally using ``process_group`` until step ``start_localSGD_iter``,
    and all-reducing gradients locally using ``subgroup`` afterwards.

    If ``process_group`` is ``None``, the global process group will be used.

            

Reported by Pylint.

torch/distributed/elastic/rendezvous/etcd_server.py
17 issues
Unable to import 'etcd'
Error

Line: 20 Column: 5

              from typing import Optional, TextIO, Union

try:
    import etcd  # type: ignore[import]
except ModuleNotFoundError:
    pass


log = logging.getLogger(__name__)

            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 54 Column: 17

                  )

    for addr in addrs:
        family, type, proto, _, _ = addr
        try:
            s = socket.socket(family, type, proto)
            s.bind(("localhost", 0))
            s.listen(0)
            return s

            

Reported by Pylint.

Redefining name 'subprocess' from outer scope (line 14)
Error

Line: 66 Column: 15

                  raise RuntimeError("Failed to create a socket")


def stop_etcd(subprocess, data_dir: Optional[str] = None):
    if subprocess and subprocess.poll() is None:
        log.info("stopping etcd server")
        subprocess.terminate()
        subprocess.wait()


            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 73 Column: 9

                      subprocess.wait()

    if data_dir:
        log.info(f"deleting etcd data dir: {data_dir}")
        shutil.rmtree(data_dir, ignore_errors=True)


class EtcdServer:
    """

            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 185 Column: 20

                              data_dir = os.path.join(self._base_data_dir, str(curr_retries))
                os.makedirs(data_dir, exist_ok=True)
                return self._start(data_dir, timeout, stderr)
            except Exception as e:
                curr_retries += 1
                stop_etcd(self._etcd_proc)
                log.warning(
                    f"Failed to start etcd server, got error: {str(e)}, retrying"
                )

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 188 Column: 17

                          except Exception as e:
                curr_retries += 1
                stop_etcd(self._etcd_proc)
                log.warning(
                    f"Failed to start etcd server, got error: {str(e)}, retrying"
                )
                if curr_retries >= num_retries:
                    shutil.rmtree(self._base_data_dir, ignore_errors=True)
                    raise

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 221 Column: 9

                          )
        )

        log.info(f"Starting etcd server: [{etcd_cmd}]")

        sock.close()
        sock_peer.close()
        self._etcd_proc = subprocess.Popen(etcd_cmd, close_fds=True, stderr=stderr)
        self._wait_for_ready(timeout)

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 252 Column: 17

                                  f"Etcd server process exited with the code: {exitcode}"
                )
            try:
                log.info(f"etcd server ready. version: {client.version}")
                return
            except Exception:
                time.sleep(1)
        raise TimeoutError("Timed out waiting for etcd server to be ready!")


            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 254 Column: 20

                          try:
                log.info(f"etcd server ready. version: {client.version}")
                return
            except Exception:
                time.sleep(1)
        raise TimeoutError("Timed out waiting for etcd server to be ready!")

    def stop(self) -> None:
        """

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              #!/usr/bin/env python3

# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import atexit
import logging

            

Reported by Pylint.

torch/fx/experimental/fx2trt/converters/add.py
17 issues
Unable to import 'tensorrt'
Error

Line: 3 Column: 1

              import operator
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter

from .helper_functions import get_dyn_range, mark_as_int8_layer

@tensorrt_converter(operator.add)
@tensorrt_converter(torch.add)

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 6 Column: 1

              import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter

from .helper_functions import get_dyn_range, mark_as_int8_layer

@tensorrt_converter(operator.add)
@tensorrt_converter(torch.add)
def add(network, target, args, kwargs, layer_name):
    # operator.add

            

Reported by Pylint.

Module 'torch' has no 'add' member
Error

Line: 9 Column: 21

              from .helper_functions import get_dyn_range, mark_as_int8_layer

@tensorrt_converter(operator.add)
@tensorrt_converter(torch.add)
def add(network, target, args, kwargs, layer_name):
    # operator.add
    if len(kwargs) == 0:
        lhs_val, rhs_val = args
    else:

            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 37 Column: 70

              
    layer = network.add_elementwise(lhs_val, rhs_val, trt.ElementWiseOperation.SUM)
    layer.name = layer_name
    dyn_range = get_dyn_range(kwargs["scale"], kwargs["zero_point"], torch.quint8)
    mark_as_int8_layer(layer, dyn_range)

    return layer.get_output(0)



            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 52 Column: 70

              
    layer = network.add_elementwise(lhs_val, rhs_val, trt.ElementWiseOperation.SUM)
    layer.name = f"{layer_name}_add"
    dyn_range = get_dyn_range(kwargs["scale"], kwargs["zero_point"], torch.quint8)
    mark_as_int8_layer(layer, dyn_range)

    layer = network.add_activation(
        input=layer.get_output(0), type=trt.ActivationType.RELU)
    layer.name = f"{layer_name}_relu"

            

Reported by Pylint.

Unused argument 'target'
Error

Line: 10 Column: 18

              
@tensorrt_converter(operator.add)
@tensorrt_converter(torch.add)
def add(network, target, args, kwargs, layer_name):
    # operator.add
    if len(kwargs) == 0:
        lhs_val, rhs_val = args
    else:
        # torch.add

            

Reported by Pylint.

Unused argument 'args'
Error

Line: 29 Column: 36

              

@tensorrt_converter(torch.ops.quantized.add)
def quantized_add(network, target, args, kwargs, layer_name):
    lhs_val, rhs_val = kwargs["qa"], kwargs["qb"]

    if not all(isinstance(i, trt.tensorrt.ITensor) for i in [lhs_val, rhs_val]):
        raise RuntimeError('Quantized add received an input that is not part of the TensorRT region!')


            

Reported by Pylint.

Unused argument 'target'
Error

Line: 29 Column: 28

              

@tensorrt_converter(torch.ops.quantized.add)
def quantized_add(network, target, args, kwargs, layer_name):
    lhs_val, rhs_val = kwargs["qa"], kwargs["qb"]

    if not all(isinstance(i, trt.tensorrt.ITensor) for i in [lhs_val, rhs_val]):
        raise RuntimeError('Quantized add received an input that is not part of the TensorRT region!')


            

Reported by Pylint.

Unused argument 'submod'
Error

Line: 44 Column: 33

              

@tensorrt_converter(torch.ops.quantized.add_relu)
def quantized_add_relu(network, submod, args, kwargs, layer_name):
    lhs_val, rhs_val = kwargs["qa"], kwargs["qb"]

    if not all(isinstance(i, trt.tensorrt.ITensor) for i in [lhs_val, rhs_val]):
        raise RuntimeError('Quantized add_relu received an input that is not part of the TensorRT region!')


            

Reported by Pylint.

Unused argument 'args'
Error

Line: 44 Column: 41

              

@tensorrt_converter(torch.ops.quantized.add_relu)
def quantized_add_relu(network, submod, args, kwargs, layer_name):
    lhs_val, rhs_val = kwargs["qa"], kwargs["qb"]

    if not all(isinstance(i, trt.tensorrt.ITensor) for i in [lhs_val, rhs_val]):
        raise RuntimeError('Quantized add_relu received an input that is not part of the TensorRT region!')


            

Reported by Pylint.

torch/distributions/exponential.py
17 issues
Module 'torch' has no 'Size' member
Error

Line: 41 Column: 23

              
    def __init__(self, rate, validate_args=None):
        self.rate, = broadcast_all(rate)
        batch_shape = torch.Size() if isinstance(rate, Number) else self.rate.size()
        super(Exponential, self).__init__(batch_shape, validate_args=validate_args)

    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(Exponential, _instance)
        batch_shape = torch.Size(batch_shape)

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 46 Column: 23

              
    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(Exponential, _instance)
        batch_shape = torch.Size(batch_shape)
        new.rate = self.rate.expand(batch_shape)
        super(Exponential, new).__init__(batch_shape, validate_args=False)
        new._validate_args = self._validate_args
        return new


            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 52 Column: 36

                      new._validate_args = self._validate_args
        return new

    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        if torch._C._get_tracing_state():
            # [JIT WORKAROUND] lack of support for ._exponential()
            u = torch.rand(shape, dtype=self.rate.dtype, device=self.rate.device)
            return -(-u).log1p() / self.rate

            

Reported by Pylint.

Module 'torch' has no 'rand' member
Error

Line: 56 Column: 17

                      shape = self._extended_shape(sample_shape)
        if torch._C._get_tracing_state():
            # [JIT WORKAROUND] lack of support for ._exponential()
            u = torch.rand(shape, dtype=self.rate.dtype, device=self.rate.device)
            return -(-u).log1p() / self.rate
        return self.rate.new(shape).exponential_() / self.rate

    def log_prob(self, value):
        if self._validate_args:

            

Reported by Pylint.

Module 'torch' has no 'exp' member
Error

Line: 68 Column: 20

                  def cdf(self, value):
        if self._validate_args:
            self._validate_sample(value)
        return 1 - torch.exp(-self.rate * value)

    def icdf(self, value):
        return -torch.log(1 - value) / self.rate

    def entropy(self):

            

Reported by Pylint.

Module 'torch' has no 'log' member
Error

Line: 71 Column: 17

                      return 1 - torch.exp(-self.rate * value)

    def icdf(self, value):
        return -torch.log(1 - value) / self.rate

    def entropy(self):
        return 1.0 - torch.log(self.rate)

    @property

            

Reported by Pylint.

Module 'torch' has no 'log' member
Error

Line: 74 Column: 22

                      return -torch.log(1 - value) / self.rate

    def entropy(self):
        return 1.0 - torch.log(self.rate)

    @property
    def _natural_params(self):
        return (-self.rate, )


            

Reported by Pylint.

Module 'torch' has no 'log' member
Error

Line: 81 Column: 17

                      return (-self.rate, )

    def _log_normalizer(self, x):
        return -torch.log(-x)

            

Reported by Pylint.

Method 'enumerate_support' is abstract in class 'Distribution' but is not overridden
Error

Line: 9 Column: 1

              from torch.distributions.utils import broadcast_all


class Exponential(ExponentialFamily):
    r"""
    Creates a Exponential distribution parameterized by :attr:`rate`.

    Example::


            

Reported by Pylint.

Access to a protected member _validate_args of a client class
Error

Line: 49 Column: 9

                      batch_shape = torch.Size(batch_shape)
        new.rate = self.rate.expand(batch_shape)
        super(Exponential, new).__init__(batch_shape, validate_args=False)
        new._validate_args = self._validate_args
        return new

    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        if torch._C._get_tracing_state():

            

Reported by Pylint.

tools/test/test_trailing_newlines.py
17 issues
Unable to import 'tools.linter'
Error

Line: 1 Column: 1

              from tools.linter import trailing_newlines
import unittest
import tempfile


def correct_trailing_newlines(file_contents: str) -> bool:
    with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp:
        filename = tmp.name
        tmp.write(file_contents)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from tools.linter import trailing_newlines
import unittest
import tempfile


def correct_trailing_newlines(file_contents: str) -> bool:
    with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp:
        filename = tmp.name
        tmp.write(file_contents)

            

Reported by Pylint.

standard import "import unittest" should be placed before "from tools.linter import trailing_newlines"
Error

Line: 2 Column: 1

              from tools.linter import trailing_newlines
import unittest
import tempfile


def correct_trailing_newlines(file_contents: str) -> bool:
    with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp:
        filename = tmp.name
        tmp.write(file_contents)

            

Reported by Pylint.

standard import "import tempfile" should be placed before "from tools.linter import trailing_newlines"
Error

Line: 3 Column: 1

              from tools.linter import trailing_newlines
import unittest
import tempfile


def correct_trailing_newlines(file_contents: str) -> bool:
    with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp:
        filename = tmp.name
        tmp.write(file_contents)

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 6 Column: 1

              import tempfile


def correct_trailing_newlines(file_contents: str) -> bool:
    with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp:
        filename = tmp.name
        tmp.write(file_contents)
    return trailing_newlines.correct_trailing_newlines(filename)


            

Reported by Pylint.

Missing class docstring
Error

Line: 13 Column: 1

                  return trailing_newlines.correct_trailing_newlines(filename)


class TestTrailingNewlines(unittest.TestCase):
    def test_empty(self) -> None:
        self.assertTrue(correct_trailing_newlines(''))

    def test_single_byte(self) -> None:
        self.assertFalse(correct_trailing_newlines('a'))

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 14 Column: 5

              

class TestTrailingNewlines(unittest.TestCase):
    def test_empty(self) -> None:
        self.assertTrue(correct_trailing_newlines(''))

    def test_single_byte(self) -> None:
        self.assertFalse(correct_trailing_newlines('a'))


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 17 Column: 5

                  def test_empty(self) -> None:
        self.assertTrue(correct_trailing_newlines(''))

    def test_single_byte(self) -> None:
        self.assertFalse(correct_trailing_newlines('a'))

    def test_single_newline(self) -> None:
        self.assertFalse(correct_trailing_newlines('\n'))


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 20 Column: 5

                  def test_single_byte(self) -> None:
        self.assertFalse(correct_trailing_newlines('a'))

    def test_single_newline(self) -> None:
        self.assertFalse(correct_trailing_newlines('\n'))

    def test_two_newlines(self) -> None:
        self.assertFalse(correct_trailing_newlines('\n\n'))


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 23 Column: 5

                  def test_single_newline(self) -> None:
        self.assertFalse(correct_trailing_newlines('\n'))

    def test_two_newlines(self) -> None:
        self.assertFalse(correct_trailing_newlines('\n\n'))

    def test_three_newlines(self) -> None:
        self.assertFalse(correct_trailing_newlines('\n\n\n'))


            

Reported by Pylint.

test/scripts/cuda_memcheck_common.py
17 issues
Unnecessary pass statement
Error

Line: 6 Column: 5

              
class ParseError(Exception):
    """Whenever the simple parser is unable to parse the report, this exception will be raised"""
    pass


class Report:
    """A report is a container of errors, and a summary on how many errors are found"""


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # this file contains a simple parser that parses report
# from cuda-memcheck

class ParseError(Exception):
    """Whenever the simple parser is unable to parse the report, this exception will be raised"""
    pass


class Report:

            

Reported by Pylint.

Too few public methods (0/2)
Error

Line: 9 Column: 1

                  pass


class Report:
    """A report is a container of errors, and a summary on how many errors are found"""

    def __init__(self, text, errors):
        # text is something like
        # ERROR SUMMARY: 1 error

            

Reported by Pylint.

Too few public methods (0/2)
Error

Line: 28 Column: 1

                              raise ParseError("Number of errors does not match")


class Error:
    """Each error is a section in the output of cuda-memcheck.
    Each error in the report has an error message and a backtrace. It looks like:

    ========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaGetLastError.
    =========     Saved host backtrace up to driver entry point at error

            

Reported by Pylint.

Line too long (121/100)
Error

Line: 32 Column: 1

                  """Each error is a section in the output of cuda-memcheck.
    Each error in the report has an error message and a backtrace. It looks like:

    ========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaGetLastError.
    =========     Saved host backtrace up to driver entry point at error
    =========     Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3]
    =========     Host Frame:/usr/local/cuda/lib64/libcudart.so.10.1 (cudaGetLastError + 0x163) [0x4c493]
    =========     Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x5b77a05]
    =========     Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x39d6d1d]

            

Reported by Pylint.

Line too long (105/100)
Error

Line: 35 Column: 1

                  ========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaGetLastError.
    =========     Saved host backtrace up to driver entry point at error
    =========     Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3]
    =========     Host Frame:/usr/local/cuda/lib64/libcudart.so.10.1 (cudaGetLastError + 0x163) [0x4c493]
    =========     Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x5b77a05]
    =========     Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x39d6d1d]
    =========     .....
    """


            

Reported by Pylint.

Line too long (111/100)
Error

Line: 36 Column: 1

                  =========     Saved host backtrace up to driver entry point at error
    =========     Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3]
    =========     Host Frame:/usr/local/cuda/lib64/libcudart.so.10.1 (cudaGetLastError + 0x163) [0x4c493]
    =========     Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x5b77a05]
    =========     Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x39d6d1d]
    =========     .....
    """

    def __init__(self, lines):

            

Reported by Pylint.

Line too long (111/100)
Error

Line: 37 Column: 1

                  =========     Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3]
    =========     Host Frame:/usr/local/cuda/lib64/libcudart.so.10.1 (cudaGetLastError + 0x163) [0x4c493]
    =========     Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x5b77a05]
    =========     Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x39d6d1d]
    =========     .....
    """

    def __init__(self, lines):
        self.message = lines[0]

            

Reported by Pylint.

Line too long (129/100)
Error

Line: 55 Column: 1

                  A report contains multiple errors and a summary on how many errors are detected. It looks like:

    ========= CUDA-MEMCHECK
    ========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaPointerGetAttributes.
    =========     Saved host backtrace up to driver entry point at error
    =========     Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3]
    =========     Host Frame:/usr/local/cuda/lib64/libcudart.so.10.1 (cudaPointerGetAttributes + 0x1a9) [0x428b9]
    =========     Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x5b778a9]
    =========     .....

            

Reported by Pylint.

Line too long (113/100)
Error

Line: 58 Column: 1

                  ========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaPointerGetAttributes.
    =========     Saved host backtrace up to driver entry point at error
    =========     Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3]
    =========     Host Frame:/usr/local/cuda/lib64/libcudart.so.10.1 (cudaPointerGetAttributes + 0x1a9) [0x428b9]
    =========     Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x5b778a9]
    =========     .....
    =========
    ========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaGetLastError.
    =========     Saved host backtrace up to driver entry point at error

            

Reported by Pylint.

test/onnx/test_pytorch_helper.py
17 issues
Unable to import 'torch'
Error

Line: 3 Column: 1

              # Some standard imports
import numpy as np
from torch import nn
import torch.onnx
import torch.nn.init as init
from caffe2.python.model_helper import ModelHelper
from pytorch_helper import PyTorchModule
import unittest
from caffe2.python.core import workspace

            

Reported by Pylint.

Unable to import 'torch.onnx'
Error

Line: 4 Column: 1

              # Some standard imports
import numpy as np
from torch import nn
import torch.onnx
import torch.nn.init as init
from caffe2.python.model_helper import ModelHelper
from pytorch_helper import PyTorchModule
import unittest
from caffe2.python.core import workspace

            

Reported by Pylint.

Unable to import 'torch.nn.init'
Error

Line: 5 Column: 1

              import numpy as np
from torch import nn
import torch.onnx
import torch.nn.init as init
from caffe2.python.model_helper import ModelHelper
from pytorch_helper import PyTorchModule
import unittest
from caffe2.python.core import workspace


            

Reported by Pylint.

Unable to import 'caffe2.python.model_helper'
Error

Line: 6 Column: 1

              from torch import nn
import torch.onnx
import torch.nn.init as init
from caffe2.python.model_helper import ModelHelper
from pytorch_helper import PyTorchModule
import unittest
from caffe2.python.core import workspace

from test_pytorch_common import skipIfNoLapack

            

Reported by Pylint.

Unable to import 'caffe2.python.core'
Error

Line: 9 Column: 1

              from caffe2.python.model_helper import ModelHelper
from pytorch_helper import PyTorchModule
import unittest
from caffe2.python.core import workspace

from test_pytorch_common import skipIfNoLapack


class TestCaffe2Backend(unittest.TestCase):

            

Reported by Pylint.

No name 'skipIfNoLapack' in module 'test_pytorch_common'
Error

Line: 11 Column: 1

              import unittest
from caffe2.python.core import workspace

from test_pytorch_common import skipIfNoLapack


class TestCaffe2Backend(unittest.TestCase):

    @skipIfNoLapack

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # Some standard imports
import numpy as np
from torch import nn
import torch.onnx
import torch.nn.init as init
from caffe2.python.model_helper import ModelHelper
from pytorch_helper import PyTorchModule
import unittest
from caffe2.python.core import workspace

            

Reported by Pylint.

standard import "import unittest" should be placed before "import numpy as np"
Error

Line: 8 Column: 1

              import torch.nn.init as init
from caffe2.python.model_helper import ModelHelper
from pytorch_helper import PyTorchModule
import unittest
from caffe2.python.core import workspace

from test_pytorch_common import skipIfNoLapack



            

Reported by Pylint.

third party import "from caffe2.python.core import workspace" should be placed before "from pytorch_helper import PyTorchModule"
Error

Line: 9 Column: 1

              from caffe2.python.model_helper import ModelHelper
from pytorch_helper import PyTorchModule
import unittest
from caffe2.python.core import workspace

from test_pytorch_common import skipIfNoLapack


class TestCaffe2Backend(unittest.TestCase):

            

Reported by Pylint.

Imports from package caffe2 are not grouped
Error

Line: 9 Column: 1

              from caffe2.python.model_helper import ModelHelper
from pytorch_helper import PyTorchModule
import unittest
from caffe2.python.core import workspace

from test_pytorch_common import skipIfNoLapack


class TestCaffe2Backend(unittest.TestCase):

            

Reported by Pylint.