The following issues were found

torch/utils/tensorboard/writer.py
169 issues
Unable to import 'tensorboard.compat'
Error

Line: 8 Column: 1

              import time
import torch

from tensorboard.compat import tf
from tensorboard.compat.proto.event_pb2 import SessionLog
from tensorboard.compat.proto.event_pb2 import Event
from tensorboard.compat.proto import event_pb2
from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorboard.summary.writer.event_file_writer import EventFileWriter

            

Reported by Pylint.

Unable to import 'tensorboard.compat.proto.event_pb2'
Error

Line: 9 Column: 1

              import torch

from tensorboard.compat import tf
from tensorboard.compat.proto.event_pb2 import SessionLog
from tensorboard.compat.proto.event_pb2 import Event
from tensorboard.compat.proto import event_pb2
from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorboard.summary.writer.event_file_writer import EventFileWriter


            

Reported by Pylint.

Unable to import 'tensorboard.compat.proto.event_pb2'
Error

Line: 10 Column: 1

              
from tensorboard.compat import tf
from tensorboard.compat.proto.event_pb2 import SessionLog
from tensorboard.compat.proto.event_pb2 import Event
from tensorboard.compat.proto import event_pb2
from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorboard.summary.writer.event_file_writer import EventFileWriter

from ._convert_np import make_np

            

Reported by Pylint.

Unable to import 'tensorboard.compat.proto'
Error

Line: 11 Column: 1

              from tensorboard.compat import tf
from tensorboard.compat.proto.event_pb2 import SessionLog
from tensorboard.compat.proto.event_pb2 import Event
from tensorboard.compat.proto import event_pb2
from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorboard.summary.writer.event_file_writer import EventFileWriter

from ._convert_np import make_np
from ._embedding import (

            

Reported by Pylint.

Unable to import 'tensorboard.plugins.projector.projector_config_pb2'
Error

Line: 12 Column: 1

              from tensorboard.compat.proto.event_pb2 import SessionLog
from tensorboard.compat.proto.event_pb2 import Event
from tensorboard.compat.proto import event_pb2
from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorboard.summary.writer.event_file_writer import EventFileWriter

from ._convert_np import make_np
from ._embedding import (
    make_mat, make_sprite, make_tsv, write_pbtxt, get_embedding_info,

            

Reported by Pylint.

Unable to import 'tensorboard.summary.writer.event_file_writer'
Error

Line: 13 Column: 1

              from tensorboard.compat.proto.event_pb2 import Event
from tensorboard.compat.proto import event_pb2
from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorboard.summary.writer.event_file_writer import EventFileWriter

from ._convert_np import make_np
from ._embedding import (
    make_mat, make_sprite, make_tsv, write_pbtxt, get_embedding_info,
)

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 15 Column: 1

              from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorboard.summary.writer.event_file_writer import EventFileWriter

from ._convert_np import make_np
from ._embedding import (
    make_mat, make_sprite, make_tsv, write_pbtxt, get_embedding_info,
)
from ._onnx_graph import load_onnx_graph
from ._pytorch_graph import graph

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 16 Column: 1

              from tensorboard.summary.writer.event_file_writer import EventFileWriter

from ._convert_np import make_np
from ._embedding import (
    make_mat, make_sprite, make_tsv, write_pbtxt, get_embedding_info,
)
from ._onnx_graph import load_onnx_graph
from ._pytorch_graph import graph
from ._utils import figure_to_image

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 19 Column: 1

              from ._embedding import (
    make_mat, make_sprite, make_tsv, write_pbtxt, get_embedding_info,
)
from ._onnx_graph import load_onnx_graph
from ._pytorch_graph import graph
from ._utils import figure_to_image
from .summary import (
    scalar, histogram, histogram_raw, image, audio, text,
    pr_curve, pr_curve_raw, video, custom_scalars, image_boxes, mesh, hparams

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 20 Column: 1

                  make_mat, make_sprite, make_tsv, write_pbtxt, get_embedding_info,
)
from ._onnx_graph import load_onnx_graph
from ._pytorch_graph import graph
from ._utils import figure_to_image
from .summary import (
    scalar, histogram, histogram_raw, image, audio, text,
    pr_curve, pr_curve_raw, video, custom_scalars, image_boxes, mesh, hparams
)

            

Reported by Pylint.

torch/_tensor.py
167 issues
Module 'torch' has no 'qscheme' member
Error

Line: 99 Column: 51

                              new_storage = self.storage().__deepcopy__(memo)
                if self.is_quantized:
                    # quantizer_params can be different type based on torch attribute
                    quantizer_params: Union[Tuple[torch.qscheme, float, int], Tuple[torch.qscheme, Tensor, Tensor, int]]
                    if self.qscheme() == torch.per_tensor_affine:
                        quantizer_params = self.qscheme(), self.q_scale(), self.q_zero_point()
                    elif self.qscheme() in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
                        quantizer_params = self.qscheme(), \
                            self.q_per_channel_scales(), \

            

Reported by Pylint.

Module 'torch' has no 'qscheme' member
Error

Line: 99 Column: 85

                              new_storage = self.storage().__deepcopy__(memo)
                if self.is_quantized:
                    # quantizer_params can be different type based on torch attribute
                    quantizer_params: Union[Tuple[torch.qscheme, float, int], Tuple[torch.qscheme, Tensor, Tensor, int]]
                    if self.qscheme() == torch.per_tensor_affine:
                        quantizer_params = self.qscheme(), self.q_scale(), self.q_zero_point()
                    elif self.qscheme() in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
                        quantizer_params = self.qscheme(), \
                            self.q_per_channel_scales(), \

            

Reported by Pylint.

Module 'torch' has no 'per_tensor_affine' member
Error

Line: 100 Column: 42

                              if self.is_quantized:
                    # quantizer_params can be different type based on torch attribute
                    quantizer_params: Union[Tuple[torch.qscheme, float, int], Tuple[torch.qscheme, Tensor, Tensor, int]]
                    if self.qscheme() == torch.per_tensor_affine:
                        quantizer_params = self.qscheme(), self.q_scale(), self.q_zero_point()
                    elif self.qscheme() in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
                        quantizer_params = self.qscheme(), \
                            self.q_per_channel_scales(), \
                            self.q_per_channel_zero_points(), \

            

Reported by Pylint.

Module 'torch' has no 'per_channel_affine_float_qparams' member
Error

Line: 102 Column: 71

                                  quantizer_params: Union[Tuple[torch.qscheme, float, int], Tuple[torch.qscheme, Tensor, Tensor, int]]
                    if self.qscheme() == torch.per_tensor_affine:
                        quantizer_params = self.qscheme(), self.q_scale(), self.q_zero_point()
                    elif self.qscheme() in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
                        quantizer_params = self.qscheme(), \
                            self.q_per_channel_scales(), \
                            self.q_per_channel_zero_points(), \
                            self.q_per_channel_axis()
                    else:

            

Reported by Pylint.

Module 'torch' has no 'per_channel_affine' member
Error

Line: 102 Column: 45

                                  quantizer_params: Union[Tuple[torch.qscheme, float, int], Tuple[torch.qscheme, Tensor, Tensor, int]]
                    if self.qscheme() == torch.per_tensor_affine:
                        quantizer_params = self.qscheme(), self.q_scale(), self.q_zero_point()
                    elif self.qscheme() in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
                        quantizer_params = self.qscheme(), \
                            self.q_per_channel_scales(), \
                            self.q_per_channel_zero_points(), \
                            self.q_per_channel_axis()
                    else:

            

Reported by Pylint.

getstate_fn is not callable
Error

Line: 142 Column: 21

                      # https://github.com/python/cpython/blob/c83919bd635f4433f1c6ae8504996a9fe3c215e5/Objects/typeobject.c#L4891
        getstate_fn = getattr(self, "__getstate__", None)
        if getstate_fn:
            state = getstate_fn()
        else:
            slots_to_save = copyreg._slotnames(self.__class__)  # type: ignore[attr-defined]
            if slots_to_save:
                state = (self.__dict__, {name: getattr(self, name) for name in slots_to_save if hasattr(self, name)})
            else:

            

Reported by Pylint.

Module 'torch' has no 'qscheme' member
Error

Line: 190 Column: 43

                          return (torch._utils._rebuild_meta_tensor_no_storage, arg_meta)
        if self.is_quantized:
            # quantizer_params can be different type based on torch attribute
            quantizer_params: Union[Tuple[torch.qscheme, float, int], Tuple[Any, Tensor, Tensor, int]]
            if self.qscheme() == torch.per_tensor_affine:
                quantizer_params = (torch.per_tensor_affine,
                                    self.q_scale(),
                                    self.q_zero_point())
            elif self.qscheme() in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):

            

Reported by Pylint.

Module 'torch' has no 'per_tensor_affine' member
Error

Line: 191 Column: 34

                      if self.is_quantized:
            # quantizer_params can be different type based on torch attribute
            quantizer_params: Union[Tuple[torch.qscheme, float, int], Tuple[Any, Tensor, Tensor, int]]
            if self.qscheme() == torch.per_tensor_affine:
                quantizer_params = (torch.per_tensor_affine,
                                    self.q_scale(),
                                    self.q_zero_point())
            elif self.qscheme() in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
                # convert scales and zero points to tuple to avoid recursive calls

            

Reported by Pylint.

Module 'torch' has no 'per_tensor_affine' member
Error

Line: 192 Column: 37

                          # quantizer_params can be different type based on torch attribute
            quantizer_params: Union[Tuple[torch.qscheme, float, int], Tuple[Any, Tensor, Tensor, int]]
            if self.qscheme() == torch.per_tensor_affine:
                quantizer_params = (torch.per_tensor_affine,
                                    self.q_scale(),
                                    self.q_zero_point())
            elif self.qscheme() in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
                # convert scales and zero points to tuple to avoid recursive calls
                # when/if we get multi-axis quantized tensors in the future, the shape

            

Reported by Pylint.

Module 'torch' has no 'per_channel_affine' member
Error

Line: 195 Column: 37

                              quantizer_params = (torch.per_tensor_affine,
                                    self.q_scale(),
                                    self.q_zero_point())
            elif self.qscheme() in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
                # convert scales and zero points to tuple to avoid recursive calls
                # when/if we get multi-axis quantized tensors in the future, the shape
                # is recoverable from the main tensor shape
                quantizer_params = (torch.per_channel_affine,
                                    self.q_per_channel_scales(),

            

Reported by Pylint.

torch/testing/_asserts.py
166 issues
Attempted relative import beyond top-level package
Error

Line: 10 Column: 1

              import torch
from torch import Tensor

from ._core import _unravel_index

__all__ = ["assert_close"]


class _TestingErrorMeta(NamedTuple):

            

Reported by Pylint.

Instance of '_TestingErrorMeta' has no '_replace' member
Error

Line: 20 Column: 16

                  msg: str

    def amend_msg(self, prefix: str = "", postfix: str = "") -> "_TestingErrorMeta":
        return self._replace(msg=f"{prefix}{self.msg}{postfix}")

    def to_error(self) -> Exception:
        return self.type(self.msg)



            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 31 Column: 5

              # https://github.com/pytorch/pytorch/pull/54769#issuecomment-813174256 for details.
# {dtype: (rtol, atol)}
_DTYPE_PRECISIONS = {
    torch.float16: (0.001, 1e-5),
    torch.bfloat16: (0.016, 1e-5),
    torch.float32: (1.3e-6, 1e-5),
    torch.float64: (1e-7, 1e-7),
    torch.complex32: (0.001, 1e-5),
    torch.complex64: (1.3e-6, 1e-5),

            

Reported by Pylint.

Module 'torch' has no 'bfloat16' member
Error

Line: 32 Column: 5

              # {dtype: (rtol, atol)}
_DTYPE_PRECISIONS = {
    torch.float16: (0.001, 1e-5),
    torch.bfloat16: (0.016, 1e-5),
    torch.float32: (1.3e-6, 1e-5),
    torch.float64: (1e-7, 1e-7),
    torch.complex32: (0.001, 1e-5),
    torch.complex64: (1.3e-6, 1e-5),
    torch.complex128: (1e-7, 1e-7),

            

Reported by Pylint.

Module 'torch' has no 'float32' member
Error

Line: 33 Column: 5

              _DTYPE_PRECISIONS = {
    torch.float16: (0.001, 1e-5),
    torch.bfloat16: (0.016, 1e-5),
    torch.float32: (1.3e-6, 1e-5),
    torch.float64: (1e-7, 1e-7),
    torch.complex32: (0.001, 1e-5),
    torch.complex64: (1.3e-6, 1e-5),
    torch.complex128: (1e-7, 1e-7),
}

            

Reported by Pylint.

Module 'torch' has no 'float64' member
Error

Line: 34 Column: 5

                  torch.float16: (0.001, 1e-5),
    torch.bfloat16: (0.016, 1e-5),
    torch.float32: (1.3e-6, 1e-5),
    torch.float64: (1e-7, 1e-7),
    torch.complex32: (0.001, 1e-5),
    torch.complex64: (1.3e-6, 1e-5),
    torch.complex128: (1e-7, 1e-7),
}


            

Reported by Pylint.

Module 'torch' has no 'complex32' member
Error

Line: 35 Column: 5

                  torch.bfloat16: (0.016, 1e-5),
    torch.float32: (1.3e-6, 1e-5),
    torch.float64: (1e-7, 1e-7),
    torch.complex32: (0.001, 1e-5),
    torch.complex64: (1.3e-6, 1e-5),
    torch.complex128: (1e-7, 1e-7),
}



            

Reported by Pylint.

Module 'torch' has no 'complex64' member
Error

Line: 36 Column: 5

                  torch.float32: (1.3e-6, 1e-5),
    torch.float64: (1e-7, 1e-7),
    torch.complex32: (0.001, 1e-5),
    torch.complex64: (1.3e-6, 1e-5),
    torch.complex128: (1e-7, 1e-7),
}


def _get_default_rtol_and_atol(actual: Tensor, expected: Tensor) -> Tuple[float, float]:

            

Reported by Pylint.

Module 'torch' has no 'complex128' member
Error

Line: 37 Column: 5

                  torch.float64: (1e-7, 1e-7),
    torch.complex32: (0.001, 1e-5),
    torch.complex64: (1.3e-6, 1e-5),
    torch.complex128: (1e-7, 1e-7),
}


def _get_default_rtol_and_atol(actual: Tensor, expected: Tensor) -> Tuple[float, float]:
    actual_rtol, actual_atol = _DTYPE_PRECISIONS.get(actual.dtype, (0.0, 0.0))

            

Reported by Pylint.

Module 'torch' has no 'complex64' member
Error

Line: 69 Column: 50

                      else:
            relaxed_complex_nan = False

        if actual.dtype not in (torch.complex32, torch.complex64, torch.complex128):
            return check_tensors(actual, expected, equal_nan=equal_nan, **kwargs)

        if relaxed_complex_nan:
            actual, expected = [
                t.clone().masked_fill(

            

Reported by Pylint.

torch/jit/_trace.py
164 issues
Module 'torch' has no 'preserve_format' member
Error

Line: 114 Column: 45

                          trace_inputs = _unflatten(in_args, in_desc)

            ret_inputs.append(
                tuple(x.clone(memory_format=torch.preserve_format) for x in args)
            )
            if self._return_inputs_states:
                inputs_states.append(_unflatten(in_args, in_desc))
            outs.append(self.inner(*trace_inputs))
            if self._return_inputs_states:

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 151 Column: 63

                          # TODO: figure out one liner to .clone() and set requires_grad
            v = (
                a.detach()
                .clone(memory_format=None if a.is_mkldnn else torch.preserve_format)
                .requires_grad_(a.requires_grad)
            )
            if a.grad is not None:
                v.grad = clone_input(v.grad)
            return v

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 158 Column: 42

                              v.grad = clone_input(v.grad)
            return v
        else:
            return a.clone(memory_format=torch.preserve_format)

    return function._nested_map(
        lambda x: isinstance(x, torch.Tensor), clone_input, condition_msg="tensors"
    )(args)


            

Reported by Pylint.

Module 'torch' has no 'sum' member
Error

Line: 188 Column: 33

                      print("{} {} time: {} ms".format(trace_name, name, start.elapsed_time(end)))


def verify(model, args, loss_fn=torch.sum, devices=None):
    """
    Verify that a JIT compiled model has the same behavior as its uncompiled
    version along with its backwards pass.  If your model returns multiple
    outputs, you must also specify a `loss_fn` to produce a loss for which
    the backwards will be computed.

            

Reported by Pylint.

Module 'torch' has no 'sum' member
Error

Line: 249 Column: 23

                          raise RuntimeError("failed to use the compiled function")
        if not isinstance(out, tuple):
            out = (out,)
        if loss_fn == torch.sum and len(out) != 1:
            raise ValueError(
                (
                    "Model returns {} outputs, but default loss function "
                    "(torch.sum) can only handle a single output"
                ).format(len(out))

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 258 Column: 44

                          )
        out_vars, _ = _flatten(out)
        saved_outs = [
            v.detach().clone(memory_format=torch.preserve_format) for v in out_vars
        ]
        loss = loss_fn(*out)
        grads = torch.autograd.grad([loss], in_vars)
        # TODO: I'm not sure if the clone here is necessary but it is safer
        saved_grads = [

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 264 Column: 44

                      grads = torch.autograd.grad([loss], in_vars)
        # TODO: I'm not sure if the clone here is necessary but it is safer
        saved_grads = [
            v.detach().clone(memory_format=torch.preserve_format) for v in grads
        ]
        return (saved_outs, saved_grads)

    with torch.random.fork_rng(devices, _caller="torch.jit.verify"):
        uncompiled_outs, uncompiled_grads = run_fwd_bwd(args, force_trace=True)

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 29 Column: 12

              
from torch.testing._core import _get_default_tolerance

_flatten = torch._C._jit_flatten
_unflatten = torch._C._jit_unflatten


def _create_interpreter_name_lookup_fn(frames_up=1):
    def _get_interpreter_name_for_var(var):

            

Reported by Pylint.

Access to a protected member _jit_flatten of a client class
Error

Line: 29 Column: 12

              
from torch.testing._core import _get_default_tolerance

_flatten = torch._C._jit_flatten
_unflatten = torch._C._jit_unflatten


def _create_interpreter_name_lookup_fn(frames_up=1):
    def _get_interpreter_name_for_var(var):

            

Reported by Pylint.

Access to a protected member _jit_unflatten of a client class
Error

Line: 30 Column: 14

              from torch.testing._core import _get_default_tolerance

_flatten = torch._C._jit_flatten
_unflatten = torch._C._jit_unflatten


def _create_interpreter_name_lookup_fn(frames_up=1):
    def _get_interpreter_name_for_var(var):
        frame = inspect.currentframe()

            

Reported by Pylint.

test/jit/test_profiler.py
163 issues
Unable to import 'torch'
Error

Line: 4 Column: 1

              import os
import sys

import torch

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, warmup_backward, FileCheck

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 9 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, warmup_backward, FileCheck

if __name__ == '__main__':
    raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
                       "\tpython test/test_jit.py TESTNAME\n\n"
                       "instead.")

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 18 Column: 26

              
class TestProfiler(JitTestCase):
    def setUp(self):
        self.prev_exec = torch._C._jit_set_profiling_executor(True)
        self.prev_profiling = torch._C._jit_set_profiling_mode(True)
        self.inline_autodiff = torch._C._debug_set_autodiff_subgraph_inlining(False)
        self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
        self.can_fuse_on_cpu = torch._C._jit_can_fuse_on_cpu()
        torch._C._jit_set_texpr_fuser_enabled(True)

            

Reported by Pylint.

Access to a protected member _jit_set_profiling_executor of a client class
Error

Line: 18 Column: 26

              
class TestProfiler(JitTestCase):
    def setUp(self):
        self.prev_exec = torch._C._jit_set_profiling_executor(True)
        self.prev_profiling = torch._C._jit_set_profiling_mode(True)
        self.inline_autodiff = torch._C._debug_set_autodiff_subgraph_inlining(False)
        self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
        self.can_fuse_on_cpu = torch._C._jit_can_fuse_on_cpu()
        torch._C._jit_set_texpr_fuser_enabled(True)

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 19 Column: 31

              class TestProfiler(JitTestCase):
    def setUp(self):
        self.prev_exec = torch._C._jit_set_profiling_executor(True)
        self.prev_profiling = torch._C._jit_set_profiling_mode(True)
        self.inline_autodiff = torch._C._debug_set_autodiff_subgraph_inlining(False)
        self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
        self.can_fuse_on_cpu = torch._C._jit_can_fuse_on_cpu()
        torch._C._jit_set_texpr_fuser_enabled(True)
        torch._C._jit_override_can_fuse_on_cpu(True)

            

Reported by Pylint.

Access to a protected member _jit_set_profiling_mode of a client class
Error

Line: 19 Column: 31

              class TestProfiler(JitTestCase):
    def setUp(self):
        self.prev_exec = torch._C._jit_set_profiling_executor(True)
        self.prev_profiling = torch._C._jit_set_profiling_mode(True)
        self.inline_autodiff = torch._C._debug_set_autodiff_subgraph_inlining(False)
        self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
        self.can_fuse_on_cpu = torch._C._jit_can_fuse_on_cpu()
        torch._C._jit_set_texpr_fuser_enabled(True)
        torch._C._jit_override_can_fuse_on_cpu(True)

            

Reported by Pylint.

Access to a protected member _debug_set_autodiff_subgraph_inlining of a client class
Error

Line: 20 Column: 32

                  def setUp(self):
        self.prev_exec = torch._C._jit_set_profiling_executor(True)
        self.prev_profiling = torch._C._jit_set_profiling_mode(True)
        self.inline_autodiff = torch._C._debug_set_autodiff_subgraph_inlining(False)
        self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
        self.can_fuse_on_cpu = torch._C._jit_can_fuse_on_cpu()
        torch._C._jit_set_texpr_fuser_enabled(True)
        torch._C._jit_override_can_fuse_on_cpu(True)
        self.default_dtype = torch.get_default_dtype()

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 20 Column: 32

                  def setUp(self):
        self.prev_exec = torch._C._jit_set_profiling_executor(True)
        self.prev_profiling = torch._C._jit_set_profiling_mode(True)
        self.inline_autodiff = torch._C._debug_set_autodiff_subgraph_inlining(False)
        self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
        self.can_fuse_on_cpu = torch._C._jit_can_fuse_on_cpu()
        torch._C._jit_set_texpr_fuser_enabled(True)
        torch._C._jit_override_can_fuse_on_cpu(True)
        self.default_dtype = torch.get_default_dtype()

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 21 Column: 34

                      self.prev_exec = torch._C._jit_set_profiling_executor(True)
        self.prev_profiling = torch._C._jit_set_profiling_mode(True)
        self.inline_autodiff = torch._C._debug_set_autodiff_subgraph_inlining(False)
        self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
        self.can_fuse_on_cpu = torch._C._jit_can_fuse_on_cpu()
        torch._C._jit_set_texpr_fuser_enabled(True)
        torch._C._jit_override_can_fuse_on_cpu(True)
        self.default_dtype = torch.get_default_dtype()
        self.old_reduction_enabled = torch._C._jit_set_texpr_reductions_enabled(True)

            

Reported by Pylint.

Access to a protected member _jit_texpr_fuser_enabled of a client class
Error

Line: 21 Column: 34

                      self.prev_exec = torch._C._jit_set_profiling_executor(True)
        self.prev_profiling = torch._C._jit_set_profiling_mode(True)
        self.inline_autodiff = torch._C._debug_set_autodiff_subgraph_inlining(False)
        self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
        self.can_fuse_on_cpu = torch._C._jit_can_fuse_on_cpu()
        torch._C._jit_set_texpr_fuser_enabled(True)
        torch._C._jit_override_can_fuse_on_cpu(True)
        self.default_dtype = torch.get_default_dtype()
        self.old_reduction_enabled = torch._C._jit_set_texpr_reductions_enabled(True)

            

Reported by Pylint.

caffe2/python/onnx/backend.py
161 issues
Unable to import 'onnx.backend'
Error

Line: 18 Column: 1

              # vendored protobuf is loaded first. We can work around this by
# importing onnx first, which will cause it to go out and pick up the
# system protobuf.
import onnx.backend
from caffe2.python import core, workspace, rnn_cell, gru_cell
from caffe2.python.model_helper import ModelHelper
from caffe2.proto import caffe2_pb2
import caffe2.python.utils
import numpy as np

            

Reported by Pylint.

Unable to import 'onnx'
Error

Line: 24 Column: 1

              from caffe2.proto import caffe2_pb2
import caffe2.python.utils
import numpy as np
import onnx
from onnx import TensorProto
import onnx.numpy_helper
import onnx.defs
import onnx.shape_inference
import onnx.utils

            

Reported by Pylint.

Unable to import 'onnx'
Error

Line: 25 Column: 1

              import caffe2.python.utils
import numpy as np
import onnx
from onnx import TensorProto
import onnx.numpy_helper
import onnx.defs
import onnx.shape_inference
import onnx.utils
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict

            

Reported by Pylint.

Unable to import 'onnx.numpy_helper'
Error

Line: 26 Column: 1

              import numpy as np
import onnx
from onnx import TensorProto
import onnx.numpy_helper
import onnx.defs
import onnx.shape_inference
import onnx.utils
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict


            

Reported by Pylint.

Unable to import 'onnx.defs'
Error

Line: 27 Column: 1

              import onnx
from onnx import TensorProto
import onnx.numpy_helper
import onnx.defs
import onnx.shape_inference
import onnx.utils
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict

from caffe2.python.onnx.workspace import Workspace

            

Reported by Pylint.

Unable to import 'onnx.shape_inference'
Error

Line: 28 Column: 1

              from onnx import TensorProto
import onnx.numpy_helper
import onnx.defs
import onnx.shape_inference
import onnx.utils
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict

from caffe2.python.onnx.workspace import Workspace
from caffe2.python.onnx.backend_rep import Caffe2Rep

            

Reported by Pylint.

Unable to import 'onnx.utils'
Error

Line: 29 Column: 1

              import onnx.numpy_helper
import onnx.defs
import onnx.shape_inference
import onnx.utils
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict

from caffe2.python.onnx.workspace import Workspace
from caffe2.python.onnx.backend_rep import Caffe2Rep


            

Reported by Pylint.

Unable to import 'onnx.backend.base'
Error

Line: 30 Column: 1

              import onnx.defs
import onnx.shape_inference
import onnx.utils
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict

from caffe2.python.onnx.workspace import Workspace
from caffe2.python.onnx.backend_rep import Caffe2Rep

import caffe2.python._import_c_extension as C

            

Reported by Pylint.

Module 'caffe2.python._import_c_extension' has no 'DummyName' member
Error

Line: 196 Column: 19

                  }

    # Dummy name generator
    _dummy_name = C.DummyName()

    @classmethod
    def dummy_name(cls):
        return cls._dummy_name.new_dummy_name()


            

Reported by Pylint.

Module 'caffe2.python._import_c_extension' has no 'Caffe2Backend' member
Error

Line: 232 Column: 24

                                      shape=value.shape).SerializeToString())

            ops = []
            cbackend = C.Caffe2Backend(cls._dummy_name)
            ops_str = cbackend.convert_node(node.SerializeToString(), value_infos, opset_version)
            for s in ops_str[0] + ops_str[1]:
                op = caffe2_pb2.OperatorDef()
                op.ParseFromString(s)
                op.device_option.CopyFrom(device_option)

            

Reported by Pylint.

caffe2/python/optimizer_test.py
161 issues
Attribute '_skip_gpu' defined outside __init__
Error

Line: 27 Column: 9

                      raise unittest.SkipTest("no sparse support")

    def build_optimizer(self, model, **kwargs):
        self._skip_gpu = False
        return build_sgd(model, base_learning_rate=0.1, lars=0.5, **kwargs)

    def check_optimizer(self, optimizer):
        self.assertTrue(optimizer.get_auxiliary_parameters().shared)
        self.assertFalse(optimizer.get_auxiliary_parameters().local)

            

Reported by Pylint.

Redefining name 'optimizer' from outer scope (line 5)
Error

Line: 30 Column: 31

                      self._skip_gpu = False
        return build_sgd(model, base_learning_rate=0.1, lars=0.5, **kwargs)

    def check_optimizer(self, optimizer):
        self.assertTrue(optimizer.get_auxiliary_parameters().shared)
        self.assertFalse(optimizer.get_auxiliary_parameters().local)
        for param in optimizer.get_auxiliary_parameters().shared:
            tensor = workspace.FetchBlob(param)
            np.testing.assert_allclose(np.array([1.0]), tensor, atol=1e-5)

            

Reported by Pylint.

Attribute '_skip_gpu' defined outside __init__
Error

Line: 40 Column: 9

              
class TestMomentumSgd(OptimizerTestBase, TestCase):
    def build_optimizer(self, model, **kwargs):
        self._skip_gpu = False
        return build_sgd(model, base_learning_rate=0.1, momentum=0.1, **kwargs)

    def check_optimizer(self, optimizer):
        self.assertTrue(optimizer.get_auxiliary_parameters().shared)
        self.assertTrue(optimizer.get_auxiliary_parameters().local)

            

Reported by Pylint.

Redefining name 'optimizer' from outer scope (line 5)
Error

Line: 43 Column: 31

                      self._skip_gpu = False
        return build_sgd(model, base_learning_rate=0.1, momentum=0.1, **kwargs)

    def check_optimizer(self, optimizer):
        self.assertTrue(optimizer.get_auxiliary_parameters().shared)
        self.assertTrue(optimizer.get_auxiliary_parameters().local)
        for param in optimizer.get_auxiliary_parameters().shared:
            tensor = workspace.FetchBlob(param)
            np.testing.assert_allclose(np.array([1.0]), tensor, atol=1e-5)

            

Reported by Pylint.

Attribute '_skip_gpu' defined outside __init__
Error

Line: 53 Column: 9

              
class TestSgd(OptimizerTestBase, LRModificationTestBase, TestCase):
    def build_optimizer(self, model, **kwargs):
        self._skip_gpu = False
        return build_sgd(model, base_learning_rate=0.1, **kwargs)

    def check_optimizer(self, optimizer):
        self.assertTrue(optimizer.get_auxiliary_parameters().shared)
        self.assertFalse(optimizer.get_auxiliary_parameters().local)

            

Reported by Pylint.

Redefining name 'optimizer' from outer scope (line 5)
Error

Line: 56 Column: 31

                      self._skip_gpu = False
        return build_sgd(model, base_learning_rate=0.1, **kwargs)

    def check_optimizer(self, optimizer):
        self.assertTrue(optimizer.get_auxiliary_parameters().shared)
        self.assertFalse(optimizer.get_auxiliary_parameters().local)
        for param in optimizer.get_auxiliary_parameters().shared:
            tensor = workspace.FetchBlob(param)
            np.testing.assert_allclose(np.array([1.0]), tensor, atol=1e-5)

            

Reported by Pylint.

Attribute '_skip_gpu' defined outside __init__
Error

Line: 68 Column: 9

                  OptimizerTestBase, LRModificationTestBase, TestCase
):
    def build_optimizer(self, model, **kwargs):
        self._skip_gpu = False
        return build_multi_precision_sgd(
            model, base_learning_rate=0.1, **kwargs
        )

    def check_optimizer(self, optimizer):

            

Reported by Pylint.

Redefining name 'optimizer' from outer scope (line 5)
Error

Line: 73 Column: 31

                          model, base_learning_rate=0.1, **kwargs
        )

    def check_optimizer(self, optimizer):
        self.assertTrue(optimizer.get_auxiliary_parameters().shared)
        self.assertFalse(optimizer.get_auxiliary_parameters().local)
        for param in optimizer.get_auxiliary_parameters().shared:
            tensor = workspace.FetchBlob(param)
            np.testing.assert_allclose(np.array([1.0]), tensor, atol=1e-5)

            

Reported by Pylint.

Parameters differ from overridden 'testGPUDense' method
Error

Line: 81 Column: 5

                          np.testing.assert_allclose(np.array([1.0]), tensor, atol=1e-5)

    @unittest.skipIf(not workspace.has_gpu_support, "No GPU support")
    def testGPUDense(self):
        super(TestMultiPrecisionSgd, self).testGPUDense(core.DataType.FLOAT16)


class TestFtrl(OptimizerTestBase, TestCase):
    def build_optimizer(self, model, **kwargs):

            

Reported by Pylint.

Attribute '_skip_gpu' defined outside __init__
Error

Line: 87 Column: 9

              
class TestFtrl(OptimizerTestBase, TestCase):
    def build_optimizer(self, model, **kwargs):
        self._skip_gpu = True
        return build_ftrl(
            model,
            engine=None,
            alpha=1.0,
            beta=0.1,

            

Reported by Pylint.

test/jit/test_backends.py
159 issues
Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 1 Column: 1

              from torch.testing._internal.jit_utils import JitTestCase
import io
import os
import sys
import unittest

import torch
import torch._C
from torch.testing import FileCheck

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 7 Column: 1

              import sys
import unittest

import torch
import torch._C
from torch.testing import FileCheck
from torch.jit.mobile import _load_for_lite_interpreter

from torch.testing._internal.common_utils import (

            

Reported by Pylint.

Unable to import 'torch._C'
Error

Line: 8 Column: 1

              import unittest

import torch
import torch._C
from torch.testing import FileCheck
from torch.jit.mobile import _load_for_lite_interpreter

from torch.testing._internal.common_utils import (
    IS_FBCODE,

            

Reported by Pylint.

Unable to import 'torch.testing'
Error

Line: 9 Column: 1

              
import torch
import torch._C
from torch.testing import FileCheck
from torch.jit.mobile import _load_for_lite_interpreter

from torch.testing._internal.common_utils import (
    IS_FBCODE,
    IS_MACOS,

            

Reported by Pylint.

Unable to import 'torch.jit.mobile'
Error

Line: 10 Column: 1

              import torch
import torch._C
from torch.testing import FileCheck
from torch.jit.mobile import _load_for_lite_interpreter

from torch.testing._internal.common_utils import (
    IS_FBCODE,
    IS_MACOS,
    IS_SANDCASTLE,

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 12 Column: 1

              from torch.testing import FileCheck
from torch.jit.mobile import _load_for_lite_interpreter

from torch.testing._internal.common_utils import (
    IS_FBCODE,
    IS_MACOS,
    IS_SANDCASTLE,
    IS_WINDOWS,
    TEST_WITH_ROCM,

            

Reported by Pylint.

Undefined variable 'ModuleAdd'
Error

Line: 718 Column: 34

                          }
            self.add = torch._C._jit_to_backend(
                "backend_with_compiler_demo",
                torch.jit.script(ModuleAdd()),
                compile_spec,
            )
            self.sub = torch._C._jit_to_backend(
                "backend_with_compiler_demo",
                torch.jit.script(ModuleAdd()),

            

Reported by Pylint.

Undefined variable 'ModuleAdd'
Error

Line: 723 Column: 34

                          )
            self.sub = torch._C._jit_to_backend(
                "backend_with_compiler_demo",
                torch.jit.script(ModuleAdd()),
                compile_spec,
            )

        def forward(self, a, b, s: int):
            c = self.add.forward(a, b)

            

Reported by Pylint.

Undefined variable 'CompModule'
Error

Line: 737 Column: 23

                  def setUp(self):
        super().setUp()

        self.module = CompModule()
        self.scripted_module = torch.jit.script(self.module)
        buffer = io.BytesIO(self.scripted_module._save_to_buffer_for_lite_interpreter())
        buffer.seek(0)
        self.mobile_module = _load_for_lite_interpreter(buffer)


            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 34 Column: 12

              

def to_test_backend(module, method_compile_spec):
    return torch._C._jit_to_backend("test_backend", module, {"forward": method_compile_spec})


def to_test_backend_multi(module, method_compile_spec):
    return torch._C._jit_to_backend("test_backend", module, method_compile_spec)


            

Reported by Pylint.

torch/nn/quantized/modules/conv.py
159 issues
Module 'torch' has no '_empty_affine_quantized' member
Error

Line: 70 Column: 19

                          weight_shape = [in_channels, out_channels // self.groups]
        else:
            weight_shape = [out_channels, in_channels // self.groups]
        qweight = torch._empty_affine_quantized(
            weight_shape + list(kernel_size),
            scale=1, zero_point=0, dtype=torch.qint8,
            **{k: v for k, v in factory_kwargs.items() if k != 'dtype'})
        bias_float = (
            torch.zeros(out_channels, dtype=torch.float,

            

Reported by Pylint.

Module 'torch' has no 'qint8' member
Error

Line: 72 Column: 42

                          weight_shape = [out_channels, in_channels // self.groups]
        qweight = torch._empty_affine_quantized(
            weight_shape + list(kernel_size),
            scale=1, zero_point=0, dtype=torch.qint8,
            **{k: v for k, v in factory_kwargs.items() if k != 'dtype'})
        bias_float = (
            torch.zeros(out_channels, dtype=torch.float,
                        **{k: v for k, v in factory_kwargs.items() if k != 'dtype'}) if bias else None)


            

Reported by Pylint.

Module 'torch' has no 'float' member
Error

Line: 75 Column: 45

                          scale=1, zero_point=0, dtype=torch.qint8,
            **{k: v for k, v in factory_kwargs.items() if k != 'dtype'})
        bias_float = (
            torch.zeros(out_channels, dtype=torch.float,
                        **{k: v for k, v in factory_kwargs.items() if k != 'dtype'}) if bias else None)

        self.set_weight_bias(qweight, bias_float)
        self.scale = 1.0
        self.zero_point = 0

            

Reported by Pylint.

Module 'torch' has no 'zeros' member
Error

Line: 75 Column: 13

                          scale=1, zero_point=0, dtype=torch.qint8,
            **{k: v for k, v in factory_kwargs.items() if k != 'dtype'})
        bias_float = (
            torch.zeros(out_channels, dtype=torch.float,
                        **{k: v for k, v in factory_kwargs.items() if k != 'dtype'}) if bias else None)

        self.set_weight_bias(qweight, bias_float)
        self.scale = 1.0
        self.zero_point = 0

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 123 Column: 41

                      (w, b) = self._weight_bias()
        destination[prefix + 'weight'] = w
        destination[prefix + 'bias'] = b
        destination[prefix + 'scale'] = torch.tensor(self.scale)
        destination[prefix + 'zero_point'] = torch.tensor(self.zero_point)

    @torch.jit.export
    def __getstate__(self):
        (w, b) = self._weight_bias()

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 124 Column: 46

                      destination[prefix + 'weight'] = w
        destination[prefix + 'bias'] = b
        destination[prefix + 'scale'] = torch.tensor(self.scale)
        destination[prefix + 'zero_point'] = torch.tensor(self.zero_point)

    @torch.jit.export
    def __getstate__(self):
        (w, b) = self._weight_bias()
        return (

            

Reported by Pylint.

Module 'torch' has no 'qint8' member
Error

Line: 199 Column: 45

                          weight_post_process = mod.qconfig.weight()
        weight_post_process(mod.weight)
        act_scale, act_zp = activation_post_process.calculate_qparams()
        assert weight_post_process.dtype == torch.qint8, \
            'Weight observer must have a dtype of qint8'
        qweight = _quantize_weight(mod.weight.float(), weight_post_process)
        # the __init__ call used is the one from derived classes and not the one from _ConvNd
        qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,
                    mod.stride, mod.padding, mod.dilation, mod.groups,

            

Reported by Pylint.

Module 'torch' has no 'qint8' member
Error

Line: 579 Column: 45

                      weight_post_process = mod.qconfig.weight()
        weight_post_process(mod.weight)
        act_scale, act_zp = mod.activation_post_process.calculate_qparams()
        assert weight_post_process.dtype == torch.qint8, \
            'Weight observer must have a dtype of qint8'
        qweight = _quantize_weight(mod.weight.float(), weight_post_process)
        # the __init__ call used is the one from derived classes and not the one from _ConvTransposeNd
        qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,  # type: ignore[call-arg]
                    mod.stride, mod.padding, mod.output_padding, mod.groups,

            

Reported by Pylint.

No value for argument 'padding_mode' in constructor call
Error

Line: 583 Column: 17

                          'Weight observer must have a dtype of qint8'
        qweight = _quantize_weight(mod.weight.float(), weight_post_process)
        # the __init__ call used is the one from derived classes and not the one from _ConvTransposeNd
        qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,  # type: ignore[call-arg]
                    mod.stride, mod.padding, mod.output_padding, mod.groups,
                    mod.bias is not None, mod.dilation, mod.padding_mode)
        qconv.set_weight_bias(qweight, mod.bias)
        qconv.scale = float(act_scale)
        qconv.zero_point = int(act_zp)

            

Reported by Pylint.

__init__ method from base class 'Module' is not called
Error

Line: 33 Column: 5

                  return _reversed_padding_repeated_twice

class _ConvNd(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, bias=True,
                 padding_mode='zeros', device=None, dtype=None):
        # All subclasses have this signature - See PR #49702s
        raise NotImplementedError


            

Reported by Pylint.

caffe2/python/operator_test/conv_test.py
158 issues
Unable to import 'hypothesis.strategies'
Error

Line: 11 Column: 1

              import caffe2.python.hip_test_util as hiputl
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import brew, core, utils, workspace
from caffe2.python.model_helper import ModelHelper
from hypothesis import assume, given, settings

            

Reported by Pylint.

Unable to import 'hypothesis'
Error

Line: 16 Column: 1

              from caffe2.proto import caffe2_pb2
from caffe2.python import brew, core, utils, workspace
from caffe2.python.model_helper import ModelHelper
from hypothesis import assume, given, settings


def _cudnn_supports(dilation=False, nhwc=False, backward=False):
    """Return True if cuDNN supports this configuration."""
    v = workspace.GetCuDNNVersion()

            

Reported by Pylint.

Module 'caffe2.python._import_c_extension' has no 'cudnn_convolution_fwd_algo_count' member
Error

Line: 40 Column: 35

              def _cudnn_convolution_algo_count(direction):
    try:
        if direction == "fwd":
            return st.integers(0, C.cudnn_convolution_fwd_algo_count - 1)
        elif direction == "dgrad":
            return st.integers(0, C.cudnn_convolution_bwd_data_algo_count - 1)
        elif direction == "wgrad":
            return st.integers(0, C.cudnn_convolution_bwd_filter_algo_count - 1)
        else:

            

Reported by Pylint.

Module 'caffe2.python._import_c_extension' has no 'cudnn_convolution_bwd_data_algo_count' member
Error

Line: 42 Column: 35

                      if direction == "fwd":
            return st.integers(0, C.cudnn_convolution_fwd_algo_count - 1)
        elif direction == "dgrad":
            return st.integers(0, C.cudnn_convolution_bwd_data_algo_count - 1)
        elif direction == "wgrad":
            return st.integers(0, C.cudnn_convolution_bwd_filter_algo_count - 1)
        else:
            assert False
    except Exception:

            

Reported by Pylint.

Module 'caffe2.python._import_c_extension' has no 'cudnn_convolution_bwd_filter_algo_count' member
Error

Line: 44 Column: 35

                      elif direction == "dgrad":
            return st.integers(0, C.cudnn_convolution_bwd_data_algo_count - 1)
        elif direction == "wgrad":
            return st.integers(0, C.cudnn_convolution_bwd_filter_algo_count - 1)
        else:
            assert False
    except Exception:
        return st.sampled_from([-1])


            

Reported by Pylint.

Too many positional arguments for method call
Error

Line: 973 Column: 17

                          for i in range(N):
                for j in range(G):
                    Y[i, j, :, :] = np.dot(filter[j, :, :], X[i, j, :, :])
            Y = Y.reshape(N, M, H, W)
            if bias is not None:
                bias = bias.reshape(1, M, 1, 1)
                Y = np.add(Y, bias)
            return [Y]


            

Reported by Pylint.

Too many positional arguments for method call
Error

Line: 990 Column: 17

                          for i in range(N):
                for j in range(G):
                    Y[i, :, j, :] = np.dot(X[i, :, j, :], filter[j, :, :].transpose())
            Y = Y.reshape(N, H, W, M)
            if bias is not None:
                bias = bias.reshape(1, 1, 1, M)
                Y = np.add(Y, bias)
            return [Y]


            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 47 Column: 12

                          return st.integers(0, C.cudnn_convolution_bwd_filter_algo_count - 1)
        else:
            assert False
    except Exception:
        return st.sampled_from([-1])


class TestConvolution(serial.SerializedTestCase):
    # CUDNN does NOT support different padding values and we skip it

            

Reported by Pylint.

TODO: Group conv in NHWC not implemented for GPU yet.
Error

Line: 96 Column: 3

                      gc,
        dc,
    ):
        # TODO: Group conv in NHWC not implemented for GPU yet.
        assume(group == 1 or order == "NCHW" or gc.device_type == caffe2_pb2.CPU)
        if group != 1 and order == "NHWC":
            dc = [d for d in dc if d.device_type == caffe2_pb2.CPU]
        # Group conv not implemented with EIGEN engine.
        assume(group == 1 or engine != "EIGEN")

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 185 Column: 9

                      engine,
        use_bias,
        gc,
        dc,
    ):
        X = (
            np.random.rand(batch_size, size, size, input_channels).astype(np.float32)
            - 0.5
        )

            

Reported by Pylint.