The following issues were found

caffe2/python/control_ops_grad.py
83 issues
TODO(iliacher): Remove unnecessary blob copying
Error

Line: 111 Column: 3

                          # that requires a bijection between subset of inner blob names and
            # a set of all (Do's input and output) outer blob names

            # TODO(iliacher): Remove unnecessary blob copying

            new_inner_grad_input_name = \
                inner_input_name + "/_DO_OPERATOR_INNER_GRAD_COPY_"
            grad_copy_ops.append(_prepare_blob_copy_op(
                inner_grad_input_name, new_inner_grad_input_name))

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              ## @package control_ops_grad
# Module caffe2.python.control_ops_grad





from caffe2.proto import caffe2_pb2


            

Reported by Pylint.

Too many local variables (37/15)
Error

Line: 11 Column: 1

              from caffe2.proto import caffe2_pb2


def gen_do_gradient(op, g_output):
    """
    Generates gradient Do operator, given forward Do op and a list
    of gradient blobs corresponding to forward op's outputs
    Returns a gradient op and a list of blobs corresponding to input gradients
    """

            

Reported by Pylint.

Too many branches (13/12)
Error

Line: 11 Column: 1

              from caffe2.proto import caffe2_pb2


def gen_do_gradient(op, g_output):
    """
    Generates gradient Do operator, given forward Do op and a list
    of gradient blobs corresponding to forward op's outputs
    Returns a gradient op and a list of blobs corresponding to input gradients
    """

            

Reported by Pylint.

Argument name "op" doesn't conform to snake_case naming style
Error

Line: 11 Column: 1

              from caffe2.proto import caffe2_pb2


def gen_do_gradient(op, g_output):
    """
    Generates gradient Do operator, given forward Do op and a list
    of gradient blobs corresponding to forward op's outputs
    Returns a gradient op and a list of blobs corresponding to input gradients
    """

            

Reported by Pylint.

Too many statements (57/50)
Error

Line: 11 Column: 1

              from caffe2.proto import caffe2_pb2


def gen_do_gradient(op, g_output):
    """
    Generates gradient Do operator, given forward Do op and a list
    of gradient blobs corresponding to forward op's outputs
    Returns a gradient op and a list of blobs corresponding to input gradients
    """

            

Reported by Pylint.

Import outside toplevel (caffe2.python.core.BlobReference)
Error

Line: 17 Column: 5

                  of gradient blobs corresponding to forward op's outputs
    Returns a gradient op and a list of blobs corresponding to input gradients
    """
    from caffe2.python.core import BlobReference
    subnet, outer_to_inner_map, inner_to_outer_map, workspace_blob_name = \
        _do_op_sanity_check_and_process(op)

    assert len(g_output) == len(op.output), \
        "Different number of gradient blobs and Do op outputs"

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 21
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                  subnet, outer_to_inner_map, inner_to_outer_map, workspace_blob_name = \
        _do_op_sanity_check_and_process(op)

    assert len(g_output) == len(op.output), \
        "Different number of gradient blobs and Do op outputs"

    grad_ops, deduped_g_output = dedupe_g_output(op, g_output)
    g_output = deduped_g_output


            

Reported by Bandit.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 56
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                          backward_pass_initial_grad_map[BlobReference(inner_output_name)] = \
                BlobReference(inner_grad_output_name)
            initial_grad_map[inner_grad_output_name] = str(outer_grad_output_name)
    assert len(initial_grad_map) > 0, "Empty initial gradient map for Do op"

    inner_grad_ops, inner_grad_names_map = _gen_subgradient_pass(
        subnet, backward_pass_initial_grad_map)

    if len(inner_grad_ops) == 0:

            

Reported by Bandit.

Argument name "op" doesn't conform to snake_case naming style
Error

Line: 169 Column: 1

                  return grad_ops, g_input


def dedupe_g_output(op, g_output):
    # When generation a gradient op it's possible to receive the same gradient
    # blob corresponding to different forward op output blobs, Do operator
    # requires a bijection between inner and outer names, make sure we do
    # deduplication
    grad_ops = []

            

Reported by Pylint.

test/ao/sparsity/test_sparsifier.py
83 issues
Unable to import 'torch'
Error

Line: 5 Column: 1

              
import logging

import torch
from torch import nn
from torch.ao.sparsity import BaseSparsifier, WeightNormSparsifier, FakeSparsity
from torch.nn.utils.parametrize import is_parametrized

from torch.testing._internal.common_utils import TestCase

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 6 Column: 1

              import logging

import torch
from torch import nn
from torch.ao.sparsity import BaseSparsifier, WeightNormSparsifier, FakeSparsity
from torch.nn.utils.parametrize import is_parametrized

from torch.testing._internal.common_utils import TestCase


            

Reported by Pylint.

Unable to import 'torch.ao.sparsity'
Error

Line: 7 Column: 1

              
import torch
from torch import nn
from torch.ao.sparsity import BaseSparsifier, WeightNormSparsifier, FakeSparsity
from torch.nn.utils.parametrize import is_parametrized

from torch.testing._internal.common_utils import TestCase

logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)

            

Reported by Pylint.

Unable to import 'torch.nn.utils.parametrize'
Error

Line: 8 Column: 1

              import torch
from torch import nn
from torch.ao.sparsity import BaseSparsifier, WeightNormSparsifier, FakeSparsity
from torch.nn.utils.parametrize import is_parametrized

from torch.testing._internal.common_utils import TestCase

logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)


            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 10 Column: 1

              from torch.ao.sparsity import BaseSparsifier, WeightNormSparsifier, FakeSparsity
from torch.nn.utils.parametrize import is_parametrized

from torch.testing._internal.common_utils import TestCase

logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)

class Model(nn.Module):
    def __init__(self):

            

Reported by Pylint.

Unused argument 'kwargs'
Error

Line: 32 Column: 1

                  def __init__(self, **kwargs):
        super().__init__(defaults=kwargs)

    def update_mask(self, layer, **kwargs):
        layer.parametrizations.weight[0].mask[0] = 0
        linear_state = self.state['linear']
        linear_state['step_count'] = linear_state.get('step_count', 0) + 1



            

Reported by Pylint.

Attribute 'enable_mask_update' defined outside __init__
Error

Line: 59 Column: 9

                  def test_step(self):
        model = Model()
        sparsifier = ImplementedSparsifier(test=3)
        sparsifier.enable_mask_update = True
        sparsifier.prepare(model, [model.linear])
        sparsifier.step()
        assert torch.all(model.linear.parametrizations.weight[0].mask[0] == 0)

    def test_state_dict(self):

            

Reported by Pylint.

Unused variable 'step'
Error

Line: 71 Column: 13

                      sparsifier0.prepare(model0, [model0.linear])
        mask = model0.linear.parametrizations['weight'][0].mask
        mask.data = torch.arange(mask.shape[0] * mask.shape[1]).reshape(mask.shape)
        for step in range(step_count):
            sparsifier0.step()
        state_dict = sparsifier0.state_dict()

        # Check the expected keys in the state_dict
        assert 'state' in state_dict

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # -*- coding: utf-8 -*-

import logging

import torch
from torch import nn
from torch.ao.sparsity import BaseSparsifier, WeightNormSparsifier, FakeSparsity
from torch.nn.utils.parametrize import is_parametrized


            

Reported by Pylint.

Line too long (102/100)
Error

Line: 12 Column: 1

              
from torch.testing._internal.common_utils import TestCase

logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.seq = nn.Sequential(

            

Reported by Pylint.

caffe2/python/operator_test/boolean_mask_test.py
82 issues
Unable to import 'hypothesis'
Error

Line: 8 Column: 1

              from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import numpy as np


class TestBooleanMaskOp(serial.SerializedTestCase):

            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 9 Column: 1

              import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import numpy as np


class TestBooleanMaskOp(serial.SerializedTestCase):
    @given(x=hu.tensor1d(min_len=1,

            

Reported by Pylint.

Unused variable 'expected_gradient'
Error

Line: 24 Column: 9

                                               ["data", "mask"],
                                 "masked_data")
        mask = np.random.choice(a=[True, False], size=x.shape[0])
        expected_gradient = np.copy(mask).astype(int)
        self.assertDeviceChecks(dc, op, [x, mask], [0])
        self.assertGradientChecks(gc, op, [x, mask], 0, [0])


    @given(x=hu.tensor1d(min_len=1,

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              



from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import assume, given, settings
import hypothesis.strategies as st

            

Reported by Pylint.

Missing class docstring
Error

Line: 13 Column: 1

              import numpy as np


class TestBooleanMaskOp(serial.SerializedTestCase):
    @given(x=hu.tensor1d(min_len=1,
                         max_len=100,
                         elements=hu.floats(min_value=0.5, max_value=1.0)),
           **hu.gcs_cpu_only)
    @settings(deadline=10000)

            

Reported by Pylint.

Argument name "x" doesn't conform to snake_case naming style
Error

Line: 19 Column: 5

                                       elements=hu.floats(min_value=0.5, max_value=1.0)),
           **hu.gcs_cpu_only)
    @settings(deadline=10000)
    def test_boolean_mask_gradient(self, x, gc, dc):
        op = core.CreateOperator("BooleanMask",
                                 ["data", "mask"],
                                 "masked_data")
        mask = np.random.choice(a=[True, False], size=x.shape[0])
        expected_gradient = np.copy(mask).astype(int)

            

Reported by Pylint.

Argument name "gc" doesn't conform to snake_case naming style
Error

Line: 19 Column: 5

                                       elements=hu.floats(min_value=0.5, max_value=1.0)),
           **hu.gcs_cpu_only)
    @settings(deadline=10000)
    def test_boolean_mask_gradient(self, x, gc, dc):
        op = core.CreateOperator("BooleanMask",
                                 ["data", "mask"],
                                 "masked_data")
        mask = np.random.choice(a=[True, False], size=x.shape[0])
        expected_gradient = np.copy(mask).astype(int)

            

Reported by Pylint.

Argument name "dc" doesn't conform to snake_case naming style
Error

Line: 19 Column: 5

                                       elements=hu.floats(min_value=0.5, max_value=1.0)),
           **hu.gcs_cpu_only)
    @settings(deadline=10000)
    def test_boolean_mask_gradient(self, x, gc, dc):
        op = core.CreateOperator("BooleanMask",
                                 ["data", "mask"],
                                 "masked_data")
        mask = np.random.choice(a=[True, False], size=x.shape[0])
        expected_gradient = np.copy(mask).astype(int)

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 19 Column: 5

                                       elements=hu.floats(min_value=0.5, max_value=1.0)),
           **hu.gcs_cpu_only)
    @settings(deadline=10000)
    def test_boolean_mask_gradient(self, x, gc, dc):
        op = core.CreateOperator("BooleanMask",
                                 ["data", "mask"],
                                 "masked_data")
        mask = np.random.choice(a=[True, False], size=x.shape[0])
        expected_gradient = np.copy(mask).astype(int)

            

Reported by Pylint.

Variable name "op" doesn't conform to snake_case naming style
Error

Line: 20 Column: 9

                         **hu.gcs_cpu_only)
    @settings(deadline=10000)
    def test_boolean_mask_gradient(self, x, gc, dc):
        op = core.CreateOperator("BooleanMask",
                                 ["data", "mask"],
                                 "masked_data")
        mask = np.random.choice(a=[True, False], size=x.shape[0])
        expected_gradient = np.copy(mask).astype(int)
        self.assertDeviceChecks(dc, op, [x, mask], [0])

            

Reported by Pylint.

test/distributed/pipeline/sync/test_dependency.py
82 issues
Unable to import 'pytest'
Error

Line: 9 Column: 1

              # LICENSE file in the root directory of this source tree.
import weakref

import pytest
import torch

from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join



            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 10 Column: 1

              import weakref

import pytest
import torch

from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join


@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")

            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync.dependency'
Error

Line: 12 Column: 1

              import pytest
import torch

from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join


@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_fork_join():
    logs = []

            

Reported by Pylint.

leak is not callable
Error

Line: 115 Column: 12

                  x.backward()
    del x, phony

    assert leak() is None


def test_join_when_fork_not_requires_grad():
    x = torch.rand(2, 1)
    a, b = x.chunk(2)

            

Reported by Pylint.

Access to a protected member _backward_cls of a client class
Error

Line: 58 Column: 35

              
    assert x.requires_grad
    assert p.requires_grad
    assert x.grad_fn.__class__ is Fork._backward_cls
    assert p.grad_fn.__class__ is Fork._backward_cls

    with torch.enable_grad():
        x2 = join(x, p)


            

Reported by Pylint.

Access to a protected member _backward_cls of a client class
Error

Line: 59 Column: 35

                  assert x.requires_grad
    assert p.requires_grad
    assert x.grad_fn.__class__ is Fork._backward_cls
    assert p.grad_fn.__class__ is Fork._backward_cls

    with torch.enable_grad():
        x2 = join(x, p)

    assert x2 is not x

            

Reported by Pylint.

Access to a protected member _backward_cls of a client class
Error

Line: 68 Column: 35

                  x = x2

    assert x.requires_grad
    assert x.grad_fn.__class__ is Join._backward_cls


def test_fork_join_no_grad(monkeypatch):
    def do_not_apply(*args):
        raise AssertionError("Function.apply called")

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 98 Column: 26

              
    class F(torch.autograd.Function):
        @staticmethod
        def forward(ctx, input):
            return input

        @staticmethod
        def backward(ctx, grad):
            nonlocal leak

            

Reported by Pylint.

Unused argument 'ctx'
Error

Line: 98 Column: 21

              
    class F(torch.autograd.Function):
        @staticmethod
        def forward(ctx, input):
            return input

        @staticmethod
        def backward(ctx, grad):
            nonlocal leak

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import weakref

import pytest

            

Reported by Pylint.

caffe2/python/task.py
82 issues
TODO(azzolini): consistency checks
Error

Line: 12 Column: 3

              

def _merge_node_kwargs(a, b):
    # TODO(azzolini): consistency checks
    if a is None:
        return b
    if b is None:
        return a
    c = copy(a)

            

Reported by Pylint.

Access to a protected member _setup_used of a client class
Error

Line: 117 Column: 44

                  for obj in objs:
        # these are needed in order to allow nesting of TaskGroup, which
        # is a feature not yet implemented.
        if hasattr(obj, '_setup_used') and obj._setup_used:
            continue
        if hasattr(obj, '_setup_target') and obj._setup_target != target:
            continue
        if hasattr(obj, 'setup'):
            nets = obj.setup(init_net)

            

Reported by Pylint.

Access to a protected member _setup_target of a client class
Error

Line: 119 Column: 46

                      # is a feature not yet implemented.
        if hasattr(obj, '_setup_used') and obj._setup_used:
            continue
        if hasattr(obj, '_setup_target') and obj._setup_target != target:
            continue
        if hasattr(obj, 'setup'):
            nets = obj.setup(init_net)
            if isinstance(nets, (list, tuple)):
                init_nets += nets

            

Reported by Pylint.

Access to a protected member _setup_used of a client class
Error

Line: 129 Column: 13

                              init_nets.append(nets)
            elif nets is not None:
                raise TypeError('Unsupported type for setup: %s' % type(nets))
            obj._setup_used = True
        if hasattr(obj, 'exit'):
            nets = obj.exit(exit_net)
            if isinstance(nets, (list, tuple)):
                exit_nets += nets
            elif isinstance(nets, (core.Net, core.ExecutionStep)):

            

Reported by Pylint.

Access to a protected member _setup_used of a client class
Error

Line: 138 Column: 13

                              exit_nets.append(nets)
            elif nets is not None:
                raise TypeError('Unsupported type for setup: %s' % type(nets))
            obj._setup_used = True

    if len(init_net.Proto().op) > 0:
        init_nets.insert(0, init_net)
    if len(exit_net.Proto().op) > 0:
        exit_nets.insert(0, exit_net)

            

Reported by Pylint.

Access to a protected member _workspace_type of a client class
Error

Line: 214 Column: 13

                          'Cannot add Task to an already used TaskGroup.')
        assert (
            self._workspace_type is None or
            task._workspace_type is None or
            self._workspace_type == task._workspace_type)
        if task._workspace_type is None:
            task._workspace_type = (
                self._workspace_type or WorkspaceType.PRIVATE)
        if self._workspace_type is None:

            

Reported by Pylint.

Access to a protected member _workspace_type of a client class
Error

Line: 215 Column: 37

                      assert (
            self._workspace_type is None or
            task._workspace_type is None or
            self._workspace_type == task._workspace_type)
        if task._workspace_type is None:
            task._workspace_type = (
                self._workspace_type or WorkspaceType.PRIVATE)
        if self._workspace_type is None:
            self._workspace_type = task._workspace_type

            

Reported by Pylint.

Access to a protected member _workspace_type of a client class
Error

Line: 216 Column: 12

                          self._workspace_type is None or
            task._workspace_type is None or
            self._workspace_type == task._workspace_type)
        if task._workspace_type is None:
            task._workspace_type = (
                self._workspace_type or WorkspaceType.PRIVATE)
        if self._workspace_type is None:
            self._workspace_type = task._workspace_type
        task._notify_used()

            

Reported by Pylint.

Access to a protected member _workspace_type of a client class
Error

Line: 217 Column: 13

                          task._workspace_type is None or
            self._workspace_type == task._workspace_type)
        if task._workspace_type is None:
            task._workspace_type = (
                self._workspace_type or WorkspaceType.PRIVATE)
        if self._workspace_type is None:
            self._workspace_type = task._workspace_type
        task._notify_used()
        self._tasks.append(task)

            

Reported by Pylint.

Access to a protected member _workspace_type of a client class
Error

Line: 220 Column: 36

                          task._workspace_type = (
                self._workspace_type or WorkspaceType.PRIVATE)
        if self._workspace_type is None:
            self._workspace_type = task._workspace_type
        task._notify_used()
        self._tasks.append(task)

    def tasks(self):
        for task in self._tasks_to_add:

            

Reported by Pylint.

torch/onnx/symbolic_opset13.py
81 issues
Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 57 Column: 46

                              len(sym_help._unpack_list(split_size_or_sizes)) == _outputs:
            split_sizes = [sym_help._unsqueeze_helper(g, v, [0]) for v in sym_help._unpack_list(split_size_or_sizes)]

            start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
            axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
            res = []
            for i in range(_outputs):
                end = g.op("Add", start, split_sizes[i])  # split_sizes is a list of same length as _outputs
                res.append(g.op("Slice", self, start, end, axis))

            

Reported by Pylint.

Module 'torch' has no 'long' member
Error

Line: 57 Column: 70

                              len(sym_help._unpack_list(split_size_or_sizes)) == _outputs:
            split_sizes = [sym_help._unsqueeze_helper(g, v, [0]) for v in sym_help._unpack_list(split_size_or_sizes)]

            start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
            axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
            res = []
            for i in range(_outputs):
                end = g.op("Add", start, split_sizes[i])  # split_sizes is a list of same length as _outputs
                res.append(g.op("Slice", self, start, end, axis))

            

Reported by Pylint.

Module 'torch' has no 'long' member
Error

Line: 58 Column: 71

                          split_sizes = [sym_help._unsqueeze_helper(g, v, [0]) for v in sym_help._unpack_list(split_size_or_sizes)]

            start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
            axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
            res = []
            for i in range(_outputs):
                end = g.op("Add", start, split_sizes[i])  # split_sizes is a list of same length as _outputs
                res.append(g.op("Slice", self, start, end, axis))
                start = end

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 58 Column: 45

                          split_sizes = [sym_help._unsqueeze_helper(g, v, [0]) for v in sym_help._unpack_list(split_size_or_sizes)]

            start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
            axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
            res = []
            for i in range(_outputs):
                end = g.op("Add", start, split_sizes[i])  # split_sizes is a list of same length as _outputs
                res.append(g.op("Slice", self, start, end, axis))
                start = end

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 65 Column: 72

                              res.append(g.op("Slice", self, start, end, axis))
                start = end
            return res
        return [g.op("SequenceAt", split_out, g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)))
                for i in range(_outputs)]

    split_val = split_size_or_sizes.node()["value"]
    if split_val.dim() > 0:
        return g.op("Split", self, split_size_or_sizes, axis_i=dim, outputs=_outputs)

            

Reported by Pylint.

Module 'torch' has no 'long' member
Error

Line: 65 Column: 96

                              res.append(g.op("Slice", self, start, end, axis))
                start = end
            return res
        return [g.op("SequenceAt", split_out, g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)))
                for i in range(_outputs)]

    split_val = split_size_or_sizes.node()["value"]
    if split_val.dim() > 0:
        return g.op("Split", self, split_size_or_sizes, axis_i=dim, outputs=_outputs)

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 83 Column: 39

                  leftover = size % split_size
    if leftover:
        splits.append(leftover)
    splits = g.op("Constant", value_t=torch.tensor(splits))
    return g.op("Split", self, splits, axis_i=dim, outputs=_outputs)


def split_with_sizes(g, self, split_sizes, dim, _outputs=None):
    return split(g, self, split_sizes, dim, _outputs)

            

Reported by Pylint.

Module 'torch' has no 'long' member
Error

Line: 104 Column: 68

                  if _outputs is None:
        return g.op("SplitToSequence",
                    self,
                    g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)),
                    axis_i=dim, keepdims_i=0)

    splits = g.op("Constant", value_t=torch.tensor([1] * _outputs))
    outputs = g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
    outputs = [outputs] if _outputs == 1 else outputs

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 104 Column: 46

                  if _outputs is None:
        return g.op("SplitToSequence",
                    self,
                    g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)),
                    axis_i=dim, keepdims_i=0)

    splits = g.op("Constant", value_t=torch.tensor([1] * _outputs))
    outputs = g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
    outputs = [outputs] if _outputs == 1 else outputs

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 107 Column: 39

                                  g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)),
                    axis_i=dim, keepdims_i=0)

    splits = g.op("Constant", value_t=torch.tensor([1] * _outputs))
    outputs = g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
    outputs = [outputs] if _outputs == 1 else outputs
    squeezed_outputs = [g.op("Squeeze", out, g.op("Constant", value_t=torch.tensor([dim]))) for out in outputs]
    return squeezed_outputs


            

Reported by Pylint.

caffe2/python/net_builder.py
81 issues
Access to a protected member _use_control_ops of a client class
Error

Line: 98 Column: 52

                      for child in self._children:
            if isinstance(child, core.Net):
                self._lexical_scope |= child.UsedBlobNames()
            elif isinstance(child, NetBuilder) and child._use_control_ops:
                self._lexical_scope |= child._lexical_scope

    def _reset_children(self):
        self._current_net = None
        self._children = []

            

Reported by Pylint.

Access to a protected member _lexical_scope of a client class
Error

Line: 99 Column: 40

                          if isinstance(child, core.Net):
                self._lexical_scope |= child.UsedBlobNames()
            elif isinstance(child, NetBuilder) and child._use_control_ops:
                self._lexical_scope |= child._lexical_scope

    def _reset_children(self):
        self._current_net = None
        self._children = []
        self._lexical_scope = set(self._init_lexical_scope)

            

Reported by Pylint.

Access to a protected member _use_control_ops of a client class
Error

Line: 111 Column: 51

              
        if self._use_control_ops:
            assert isinstance(child, core.Net) or (
                isinstance(child, NetBuilder) and child._use_control_ops), \
                "Expected Net or NetBuilder with control ops"

        self._current_net = None
        self._children.append(child)
        # to-do : check it's not a dag net

            

Reported by Pylint.

Access to a protected member _use_control_ops of a client class
Error

Line: 182 Column: 24

                      for n in nets_or_builders:
            cur = None
            if isinstance(n, NetBuilder):
                assert n._use_control_ops, \
                    "Merging of NetBuilder supported only for control ops"
                nets = n.get()
                assert len(nets) == 1 and isinstance(nets[0], core.Net), \
                    "Invalid control op net builder"
                cur = nets[0]

            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 502 Column: 24

                  INIT = 'init'
    EXIT = 'exit'

    def __init__(self, type, name=None):
        NetBuilder.__init__(self, name)
        self.type = type

    def setup(self, net):
        if self.type == _SetupBuilder.INIT:

            

Reported by Pylint.

Unused argument 'net'
Error

Line: 506 Column: 21

                      NetBuilder.__init__(self, name)
        self.type = type

    def setup(self, net):
        if self.type == _SetupBuilder.INIT:
            return core.to_execution_step(self)

    def exit(self, net):
        if self.type == _SetupBuilder.EXIT:

            

Reported by Pylint.

Unused argument 'net'
Error

Line: 510 Column: 20

                      if self.type == _SetupBuilder.INIT:
            return core.to_execution_step(self)

    def exit(self, net):
        if self.type == _SetupBuilder.EXIT:
            return core.to_execution_step(self)


class _RunOnce(NetBuilder):

            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 576 Column: 24

                          ops.stop_if(ops.GE([self._iter, self._num_iters]))
        return builder

    def __exit__(self, type, *args):
        if type is None and self._num_iters is not None:
            self.current_net().Add([self._iter, self._inc], [self._iter])
        NetBuilder.__exit__(self, type, *args)



            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 626 Column: 24

                  def add(self, child):
        return NetBuilder.add(self, child)

    def __exit__(self, type, *args):
        if type is None:
            _then_nets = self._children
            self._reset_children()

            self._then_net = NetBuilder.merge_nets(

            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 657 Column: 24

                          'Invalid use of Else builder'
        self._if_builder = parent._children[-1]

    def __exit__(self, type, *args):
        if type is None:
            _else_nets = self._children
            self._reset_children()

            self._if_builder._else_net = NetBuilder.merge_nets(

            

Reported by Pylint.

caffe2/python/net_builder_test.py
81 issues
Module 'caffe2.python._import_c_extension' has no 'Workspace' member
Error

Line: 91 Column: 14

                          q = _test_if(ops.Const(25))
        plan = Plan('name')
        plan.AddStep(to_execution_step(nb))
        ws = workspace.C.Workspace()
        ws.run(plan)
        expected = [
            (y, 5),
            (z, False),
            (w, True),

            

Reported by Pylint.

Module 'caffe2.python._import_c_extension' has no 'Workspace' member
Error

Line: 297 Column: 14

              
        plan = Plan('if_net_test')
        plan.AddStep(to_execution_step(nb))
        ws = workspace.C.Workspace()
        ws.run(plan)

        first_res_value = ws.blobs[str(first_res)].fetch()
        second_res_value = ws.blobs[str(second_res)].fetch()
        y0_value = ws.blobs[str(y0)].fetch()

            

Reported by Pylint.

Module 'caffe2.python._import_c_extension' has no 'Workspace' member
Error

Line: 325 Column: 14

              
        plan = Plan('while_net_test')
        plan.AddStep(to_execution_step(nb))
        ws = workspace.C.Workspace()
        ws.run(plan)

        x_value = ws.blobs[str(x)].fetch()
        y_value = ws.blobs[str(y)].fetch()


            

Reported by Pylint.

Unused argument 'outputs'
Error

Line: 26 Column: 23

                  PythonOpStats.num_instances += 1
    PythonOpStats.lock.release()

    def my_op(inputs, outputs):
        PythonOpStats.lock.acquire()
        PythonOpStats.num_calls += 1
        PythonOpStats.lock.release()

    return my_op

            

Reported by Pylint.

Unused argument 'inputs'
Error

Line: 26 Column: 15

                  PythonOpStats.num_instances += 1
    PythonOpStats.lock.release()

    def my_op(inputs, outputs):
        PythonOpStats.lock.acquire()
        PythonOpStats.num_calls += 1
        PythonOpStats.lock.release()

    return my_op

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 104 Column: 13

                      ]
        for b, expected in expected:
            actual = ws.blobs[str(b)].fetch()
            self.assertEquals(actual, expected)

    def _expected_loop(self):
        total = 0
        total_large = 0
        total_small = 0

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 155 Column: 13

                          result = final_output(total)
        with LocalSession() as session:
            session.run(task)
            self.assertEquals(2, result.fetch())

    def test_loops(self):
        with Task() as task:
            out_actual = self._actual_loop()
        with LocalSession() as session:

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 165 Column: 17

                          expected = self._expected_loop()
            actual = [o.fetch() for o in out_actual]
            for e, a in zip(expected, actual):
                self.assertEquals(e, a)

    def test_setup(self):
        with Task() as task:
            with ops.task_init():
                one = ops.Const(1)

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 187 Column: 13

                          o7_2 = final_output(seven_2)
        with LocalSession() as session:
            session.run(task)
            self.assertEquals(o6.fetch(), 6)
            self.assertEquals(o7_1.fetch(), 7)
            self.assertEquals(o7_2.fetch(), 7)

    def test_multi_instance_python_op(self):
        """

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 188 Column: 13

                      with LocalSession() as session:
            session.run(task)
            self.assertEquals(o6.fetch(), 6)
            self.assertEquals(o7_1.fetch(), 7)
            self.assertEquals(o7_2.fetch(), 7)

    def test_multi_instance_python_op(self):
        """
        When task instances are created at runtime, C++ concurrently creates

            

Reported by Pylint.

caffe2/python/cnn.py
81 issues
TODO(wyiming): remove this dummy helper later
Error

Line: 75 Column: 3

                      )

    def PadImage(self, blob_in, blob_out, **kwargs):
        # TODO(wyiming): remove this dummy helper later
        self.net.PadImage(blob_in, blob_out, **kwargs)

    def ConvNd(self, *args, **kwargs):
        return brew.conv_nd(
            self,

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              ## @package cnn
# Module caffe2.python.cnn





from caffe2.python import brew, workspace
from caffe2.python.model_helper import ModelHelper

            

Reported by Pylint.

standard import "import logging" should be placed before "from caffe2.python import brew, workspace"
Error

Line: 11 Column: 1

              from caffe2.python import brew, workspace
from caffe2.python.model_helper import ModelHelper
from caffe2.proto import caffe2_pb2
import logging


class CNNModelHelper(ModelHelper):
    """A helper model so we can write CNN models more easily, without having to
    manually define parameter initializations and operators separately.

            

Reported by Pylint.

Too many public methods (37/20)
Error

Line: 14 Column: 1

              import logging


class CNNModelHelper(ModelHelper):
    """A helper model so we can write CNN models more easily, without having to
    manually define parameter initializations and operators separately.
    """

    def __init__(self, order="NCHW", name=None,

            

Reported by Pylint.

Too many arguments (9/5)
Error

Line: 19 Column: 5

                  manually define parameter initializations and operators separately.
    """

    def __init__(self, order="NCHW", name=None,
                 use_cudnn=True, cudnn_exhaustive_search=False,
                 ws_nbytes_limit=None, init_params=True,
                 skip_sparse_optim=False,
                 param_model=None):
        logging.warning(

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 39 Column: 9

                      }
        if ws_nbytes_limit:
            cnn_arg_scope['ws_nbytes_limit'] = ws_nbytes_limit
        super(CNNModelHelper, self).__init__(
            skip_sparse_optim=skip_sparse_optim,
            name="CNN" if name is None else name,
            init_params=init_params,
            param_model=param_model,
            arg_scope=cnn_arg_scope,

            

Reported by Pylint.

Method name "ImageInput" doesn't conform to snake_case naming style
Error

Line: 56 Column: 5

                              "Cannot understand the CNN storage order %s." % self.order
            )

    def ImageInput(self, blob_in, blob_out, use_gpu_transform=False, **kwargs):
        return brew.image_input(
            self,
            blob_in,
            blob_out,
            order=self.order,

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 56 Column: 5

                              "Cannot understand the CNN storage order %s." % self.order
            )

    def ImageInput(self, blob_in, blob_out, use_gpu_transform=False, **kwargs):
        return brew.image_input(
            self,
            blob_in,
            blob_out,
            order=self.order,

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 66 Column: 5

                          **kwargs
        )

    def VideoInput(self, blob_in, blob_out, **kwargs):
        return brew.video_input(
            self,
            blob_in,
            blob_out,
            **kwargs

            

Reported by Pylint.

Method name "VideoInput" doesn't conform to snake_case naming style
Error

Line: 66 Column: 5

                          **kwargs
        )

    def VideoInput(self, blob_in, blob_out, **kwargs):
        return brew.video_input(
            self,
            blob_in,
            blob_out,
            **kwargs

            

Reported by Pylint.

test/quantization/eager/test_numeric_suite_eager.py
81 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.quantization import (
    DeQuantStub,
    QuantStub,
    convert,
    default_qconfig,
    prepare,

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 2 Column: 1

              import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.quantization import (
    DeQuantStub,
    QuantStub,
    convert,
    default_qconfig,
    prepare,

            

Reported by Pylint.

Unable to import 'torch.nn.quantized'
Error

Line: 3 Column: 1

              import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.quantization import (
    DeQuantStub,
    QuantStub,
    convert,
    default_qconfig,
    prepare,

            

Reported by Pylint.

Unable to import 'torch.quantization'
Error

Line: 4 Column: 1

              import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.quantization import (
    DeQuantStub,
    QuantStub,
    convert,
    default_qconfig,
    prepare,

            

Reported by Pylint.

Unable to import 'torch.quantization._numeric_suite'
Error

Line: 13 Column: 1

                  quantize,
    quantize_dynamic,
)
from torch.quantization._numeric_suite import (
    OutputLogger,
    Shadow,
    ShadowLogger,
    compare_model_outputs,
    compare_model_stub,

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_quantization'
Error

Line: 21 Column: 1

                  compare_model_stub,
    compare_weights,
)
from torch.testing._internal.common_quantization import (
    AnnotatedConvBnReLUModel,
    AnnotatedConvModel,
    AnnotatedSingleLayerLinearModel,
    LSTMwithHiddenDynamicModel,
    AnnotatedTwoLayerLinearModel,

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_quantized'
Error

Line: 31 Column: 1

                  SingleLayerLinearDynamicModel,
    test_only_eval_fn,
)
from torch.testing._internal.common_quantized import override_qengines


class SubModule(torch.nn.Module):
    def __init__(self):
        super(SubModule, self).__init__()

            

Reported by Pylint.

Unused variable 'k'
Error

Line: 100 Column: 17

                              float_model.state_dict(), q_model.state_dict()
            )
            self.assertEqual(len(weight_dict), 1)
            for k, v in weight_dict.items():
                self.assertTrue(v["float"].shape == v["quantized"].shape)

        model_list = [AnnotatedConvModel(qengine), AnnotatedConvBnReLUModel(qengine)]
        for model in model_list:
            model.eval()

            

Reported by Pylint.

Unused variable 'k'
Error

Line: 122 Column: 17

                              float_model.state_dict(), q_model.state_dict()
            )
            self.assertEqual(len(weight_dict), 1)
            for k, v in weight_dict.items():
                self.assertTrue(v["float"].shape == v["quantized"].shape)

        model_list = [AnnotatedSingleLayerLinearModel(qengine)]
        for model in model_list:
            model.eval()

            

Reported by Pylint.

Unused variable 'k'
Error

Line: 144 Column: 17

                              float_model.state_dict(), q_model.state_dict()
            )
            self.assertEqual(len(weight_dict), 1)
            for k, v in weight_dict.items():
                self.assertTrue(len(v["float"]) == len(v["quantized"]))
                for i, val in enumerate(v["quantized"]):
                    self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)

        model_list = [SingleLayerLinearDynamicModel(qengine)]

            

Reported by Pylint.